llama3370 / checkpoint-112 /trainer_state.json
ugaoo's picture
Upload folder using huggingface_hub
7b299d4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.8720930232558137,
"eval_steps": 500,
"global_step": 112,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03488372093023256,
"grad_norm": 43.200069427490234,
"learning_rate": 5.0000000000000004e-08,
"loss": 2.5149,
"step": 1
},
{
"epoch": 0.06976744186046512,
"grad_norm": 43.373931884765625,
"learning_rate": 1.0000000000000001e-07,
"loss": 2.5625,
"step": 2
},
{
"epoch": 0.10465116279069768,
"grad_norm": 42.566246032714844,
"learning_rate": 1.5000000000000002e-07,
"loss": 2.4724,
"step": 3
},
{
"epoch": 0.13953488372093023,
"grad_norm": 44.33814239501953,
"learning_rate": 2.0000000000000002e-07,
"loss": 2.5837,
"step": 4
},
{
"epoch": 0.1744186046511628,
"grad_norm": 42.93879318237305,
"learning_rate": 2.5000000000000004e-07,
"loss": 2.5022,
"step": 5
},
{
"epoch": 0.20930232558139536,
"grad_norm": 43.14997863769531,
"learning_rate": 3.0000000000000004e-07,
"loss": 2.4925,
"step": 6
},
{
"epoch": 0.2441860465116279,
"grad_norm": 43.04749298095703,
"learning_rate": 3.5000000000000004e-07,
"loss": 2.5079,
"step": 7
},
{
"epoch": 0.27906976744186046,
"grad_norm": 43.447654724121094,
"learning_rate": 4.0000000000000003e-07,
"loss": 2.4661,
"step": 8
},
{
"epoch": 0.313953488372093,
"grad_norm": 43.50425338745117,
"learning_rate": 4.5000000000000003e-07,
"loss": 2.458,
"step": 9
},
{
"epoch": 0.3488372093023256,
"grad_norm": 42.86153030395508,
"learning_rate": 5.000000000000001e-07,
"loss": 2.4361,
"step": 10
},
{
"epoch": 0.38372093023255816,
"grad_norm": 42.33100509643555,
"learning_rate": 5.5e-07,
"loss": 2.343,
"step": 11
},
{
"epoch": 0.4186046511627907,
"grad_norm": 41.95058059692383,
"learning_rate": 6.000000000000001e-07,
"loss": 2.3186,
"step": 12
},
{
"epoch": 0.45348837209302323,
"grad_norm": 40.86145782470703,
"learning_rate": 6.5e-07,
"loss": 2.193,
"step": 13
},
{
"epoch": 0.4883720930232558,
"grad_norm": 41.81694793701172,
"learning_rate": 7.000000000000001e-07,
"loss": 2.1867,
"step": 14
},
{
"epoch": 0.5232558139534884,
"grad_norm": 40.10181427001953,
"learning_rate": 7.5e-07,
"loss": 2.0906,
"step": 15
},
{
"epoch": 0.5581395348837209,
"grad_norm": 37.55855178833008,
"learning_rate": 8.000000000000001e-07,
"loss": 1.956,
"step": 16
},
{
"epoch": 0.5930232558139535,
"grad_norm": 35.37375259399414,
"learning_rate": 8.500000000000001e-07,
"loss": 1.8531,
"step": 17
},
{
"epoch": 0.627906976744186,
"grad_norm": 34.46567916870117,
"learning_rate": 9.000000000000001e-07,
"loss": 1.7425,
"step": 18
},
{
"epoch": 0.6627906976744186,
"grad_norm": 32.87102127075195,
"learning_rate": 9.500000000000001e-07,
"loss": 1.5349,
"step": 19
},
{
"epoch": 0.6976744186046512,
"grad_norm": 34.433929443359375,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.3663,
"step": 20
},
{
"epoch": 0.7325581395348837,
"grad_norm": 30.009357452392578,
"learning_rate": 1.0500000000000001e-06,
"loss": 1.1505,
"step": 21
},
{
"epoch": 0.7674418604651163,
"grad_norm": 26.409732818603516,
"learning_rate": 1.1e-06,
"loss": 1.0162,
"step": 22
},
{
"epoch": 0.8023255813953488,
"grad_norm": 25.024803161621094,
"learning_rate": 1.1500000000000002e-06,
"loss": 0.8526,
"step": 23
},
{
"epoch": 0.8372093023255814,
"grad_norm": 24.836118698120117,
"learning_rate": 1.2000000000000002e-06,
"loss": 0.7043,
"step": 24
},
{
"epoch": 0.872093023255814,
"grad_norm": 21.178863525390625,
"learning_rate": 1.25e-06,
"loss": 0.5793,
"step": 25
},
{
"epoch": 0.9069767441860465,
"grad_norm": 16.293062210083008,
"learning_rate": 1.3e-06,
"loss": 0.4429,
"step": 26
},
{
"epoch": 0.9418604651162791,
"grad_norm": 11.611193656921387,
"learning_rate": 1.3500000000000002e-06,
"loss": 0.3535,
"step": 27
},
{
"epoch": 0.9767441860465116,
"grad_norm": 7.844841003417969,
"learning_rate": 1.4000000000000001e-06,
"loss": 0.2852,
"step": 28
},
{
"epoch": 1.0,
"grad_norm": 7.844841003417969,
"learning_rate": 1.45e-06,
"loss": 0.2078,
"step": 29
},
{
"epoch": 1.0348837209302326,
"grad_norm": 10.63458251953125,
"learning_rate": 1.5e-06,
"loss": 0.1961,
"step": 30
},
{
"epoch": 1.069767441860465,
"grad_norm": 3.821873188018799,
"learning_rate": 1.5500000000000002e-06,
"loss": 0.1604,
"step": 31
},
{
"epoch": 1.1046511627906976,
"grad_norm": 3.1125831604003906,
"learning_rate": 1.6000000000000001e-06,
"loss": 0.1301,
"step": 32
},
{
"epoch": 1.1395348837209303,
"grad_norm": 2.536491870880127,
"learning_rate": 1.6500000000000003e-06,
"loss": 0.1212,
"step": 33
},
{
"epoch": 1.1744186046511629,
"grad_norm": 2.1026816368103027,
"learning_rate": 1.7000000000000002e-06,
"loss": 0.099,
"step": 34
},
{
"epoch": 1.2093023255813953,
"grad_norm": 1.6219357252120972,
"learning_rate": 1.75e-06,
"loss": 0.0922,
"step": 35
},
{
"epoch": 1.244186046511628,
"grad_norm": 1.2093520164489746,
"learning_rate": 1.8000000000000001e-06,
"loss": 0.0825,
"step": 36
},
{
"epoch": 1.2790697674418605,
"grad_norm": 1.2154234647750854,
"learning_rate": 1.85e-06,
"loss": 0.0793,
"step": 37
},
{
"epoch": 1.3139534883720931,
"grad_norm": 1.2502310276031494,
"learning_rate": 1.9000000000000002e-06,
"loss": 0.076,
"step": 38
},
{
"epoch": 1.3488372093023255,
"grad_norm": 1.066636085510254,
"learning_rate": 1.9500000000000004e-06,
"loss": 0.0707,
"step": 39
},
{
"epoch": 1.3837209302325582,
"grad_norm": 0.8278390169143677,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.0693,
"step": 40
},
{
"epoch": 1.4186046511627908,
"grad_norm": 0.9979249835014343,
"learning_rate": 2.05e-06,
"loss": 0.0628,
"step": 41
},
{
"epoch": 1.4534883720930232,
"grad_norm": 0.8339601755142212,
"learning_rate": 2.1000000000000002e-06,
"loss": 0.0603,
"step": 42
},
{
"epoch": 1.4883720930232558,
"grad_norm": 0.5147156715393066,
"learning_rate": 2.15e-06,
"loss": 0.0568,
"step": 43
},
{
"epoch": 1.5232558139534884,
"grad_norm": 0.5488396883010864,
"learning_rate": 2.2e-06,
"loss": 0.0599,
"step": 44
},
{
"epoch": 1.558139534883721,
"grad_norm": 0.4702109694480896,
"learning_rate": 2.25e-06,
"loss": 0.0598,
"step": 45
},
{
"epoch": 1.5930232558139537,
"grad_norm": 0.7634468078613281,
"learning_rate": 2.3000000000000004e-06,
"loss": 0.0569,
"step": 46
},
{
"epoch": 1.627906976744186,
"grad_norm": 0.6564306020736694,
"learning_rate": 2.35e-06,
"loss": 0.0634,
"step": 47
},
{
"epoch": 1.6627906976744184,
"grad_norm": 0.4530823230743408,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.0511,
"step": 48
},
{
"epoch": 1.697674418604651,
"grad_norm": 0.48178645968437195,
"learning_rate": 2.4500000000000003e-06,
"loss": 0.0531,
"step": 49
},
{
"epoch": 1.7325581395348837,
"grad_norm": 0.4841754138469696,
"learning_rate": 2.5e-06,
"loss": 0.053,
"step": 50
},
{
"epoch": 1.7674418604651163,
"grad_norm": 0.46948689222335815,
"learning_rate": 2.55e-06,
"loss": 0.0479,
"step": 51
},
{
"epoch": 1.802325581395349,
"grad_norm": 0.4315750300884247,
"learning_rate": 2.6e-06,
"loss": 0.0499,
"step": 52
},
{
"epoch": 1.8372093023255816,
"grad_norm": 0.5043461322784424,
"learning_rate": 2.6500000000000005e-06,
"loss": 0.0511,
"step": 53
},
{
"epoch": 1.872093023255814,
"grad_norm": 0.43375664949417114,
"learning_rate": 2.7000000000000004e-06,
"loss": 0.0483,
"step": 54
},
{
"epoch": 1.9069767441860463,
"grad_norm": 0.3950367867946625,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.0506,
"step": 55
},
{
"epoch": 1.941860465116279,
"grad_norm": 0.27337637543678284,
"learning_rate": 2.8000000000000003e-06,
"loss": 0.0538,
"step": 56
},
{
"epoch": 1.9767441860465116,
"grad_norm": 0.4384249448776245,
"learning_rate": 2.85e-06,
"loss": 0.048,
"step": 57
},
{
"epoch": 2.0,
"grad_norm": 0.6116827130317688,
"learning_rate": 2.9e-06,
"loss": 0.0447,
"step": 58
},
{
"epoch": 2.0348837209302326,
"grad_norm": 0.4055020213127136,
"learning_rate": 2.95e-06,
"loss": 0.0465,
"step": 59
},
{
"epoch": 2.0697674418604652,
"grad_norm": 0.40701112151145935,
"learning_rate": 3e-06,
"loss": 0.0488,
"step": 60
},
{
"epoch": 2.104651162790698,
"grad_norm": 0.48277875781059265,
"learning_rate": 3.05e-06,
"loss": 0.0469,
"step": 61
},
{
"epoch": 2.13953488372093,
"grad_norm": 0.48686444759368896,
"learning_rate": 3.1000000000000004e-06,
"loss": 0.049,
"step": 62
},
{
"epoch": 2.1744186046511627,
"grad_norm": 0.4019283652305603,
"learning_rate": 3.1500000000000003e-06,
"loss": 0.0432,
"step": 63
},
{
"epoch": 2.2093023255813953,
"grad_norm": 0.3991710841655731,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.0491,
"step": 64
},
{
"epoch": 2.244186046511628,
"grad_norm": 0.3730027377605438,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.0425,
"step": 65
},
{
"epoch": 2.2790697674418605,
"grad_norm": 0.3493938744068146,
"learning_rate": 3.3000000000000006e-06,
"loss": 0.044,
"step": 66
},
{
"epoch": 2.313953488372093,
"grad_norm": 0.25905123353004456,
"learning_rate": 3.3500000000000005e-06,
"loss": 0.0409,
"step": 67
},
{
"epoch": 2.3488372093023258,
"grad_norm": 0.3177049458026886,
"learning_rate": 3.4000000000000005e-06,
"loss": 0.0398,
"step": 68
},
{
"epoch": 2.383720930232558,
"grad_norm": 0.5045494437217712,
"learning_rate": 3.45e-06,
"loss": 0.0397,
"step": 69
},
{
"epoch": 2.4186046511627906,
"grad_norm": 0.36232468485832214,
"learning_rate": 3.5e-06,
"loss": 0.0441,
"step": 70
},
{
"epoch": 2.453488372093023,
"grad_norm": 0.396090567111969,
"learning_rate": 3.5500000000000003e-06,
"loss": 0.0417,
"step": 71
},
{
"epoch": 2.488372093023256,
"grad_norm": 0.4202136695384979,
"learning_rate": 3.6000000000000003e-06,
"loss": 0.0424,
"step": 72
},
{
"epoch": 2.5232558139534884,
"grad_norm": 0.3984088599681854,
"learning_rate": 3.65e-06,
"loss": 0.0428,
"step": 73
},
{
"epoch": 2.558139534883721,
"grad_norm": 0.4163932800292969,
"learning_rate": 3.7e-06,
"loss": 0.0384,
"step": 74
},
{
"epoch": 2.5930232558139537,
"grad_norm": 0.47908157110214233,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.0366,
"step": 75
},
{
"epoch": 2.6279069767441863,
"grad_norm": 0.44930940866470337,
"learning_rate": 3.8000000000000005e-06,
"loss": 0.0371,
"step": 76
},
{
"epoch": 2.6627906976744184,
"grad_norm": 0.47626495361328125,
"learning_rate": 3.85e-06,
"loss": 0.0372,
"step": 77
},
{
"epoch": 2.697674418604651,
"grad_norm": 0.47038382291793823,
"learning_rate": 3.900000000000001e-06,
"loss": 0.0402,
"step": 78
},
{
"epoch": 2.7325581395348837,
"grad_norm": 0.26434236764907837,
"learning_rate": 3.95e-06,
"loss": 0.0366,
"step": 79
},
{
"epoch": 2.7674418604651163,
"grad_norm": 0.234429270029068,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0364,
"step": 80
},
{
"epoch": 2.802325581395349,
"grad_norm": 0.3855961859226227,
"learning_rate": 4.05e-06,
"loss": 0.0406,
"step": 81
},
{
"epoch": 2.8372093023255816,
"grad_norm": 0.4205980598926544,
"learning_rate": 4.1e-06,
"loss": 0.0372,
"step": 82
},
{
"epoch": 2.8720930232558137,
"grad_norm": 0.3006373643875122,
"learning_rate": 4.15e-06,
"loss": 0.0388,
"step": 83
},
{
"epoch": 2.9069767441860463,
"grad_norm": 0.34969958662986755,
"learning_rate": 4.2000000000000004e-06,
"loss": 0.0323,
"step": 84
},
{
"epoch": 2.941860465116279,
"grad_norm": 0.4388487935066223,
"learning_rate": 4.25e-06,
"loss": 0.0319,
"step": 85
},
{
"epoch": 2.9767441860465116,
"grad_norm": 0.30945926904678345,
"learning_rate": 4.3e-06,
"loss": 0.031,
"step": 86
},
{
"epoch": 3.0,
"grad_norm": 0.4770514667034149,
"learning_rate": 4.350000000000001e-06,
"loss": 0.0353,
"step": 87
},
{
"epoch": 3.0348837209302326,
"grad_norm": 0.35560059547424316,
"learning_rate": 4.4e-06,
"loss": 0.0296,
"step": 88
},
{
"epoch": 3.0697674418604652,
"grad_norm": 0.3195272386074066,
"learning_rate": 4.450000000000001e-06,
"loss": 0.0331,
"step": 89
},
{
"epoch": 3.104651162790698,
"grad_norm": 0.2900415360927582,
"learning_rate": 4.5e-06,
"loss": 0.0266,
"step": 90
},
{
"epoch": 3.13953488372093,
"grad_norm": 0.28650081157684326,
"learning_rate": 4.5500000000000005e-06,
"loss": 0.0264,
"step": 91
},
{
"epoch": 3.1744186046511627,
"grad_norm": 0.41960638761520386,
"learning_rate": 4.600000000000001e-06,
"loss": 0.0206,
"step": 92
},
{
"epoch": 3.2093023255813953,
"grad_norm": 0.3138435184955597,
"learning_rate": 4.65e-06,
"loss": 0.025,
"step": 93
},
{
"epoch": 3.244186046511628,
"grad_norm": 0.28306272625923157,
"learning_rate": 4.7e-06,
"loss": 0.0254,
"step": 94
},
{
"epoch": 3.2790697674418605,
"grad_norm": 0.31455615162849426,
"learning_rate": 4.75e-06,
"loss": 0.0278,
"step": 95
},
{
"epoch": 3.313953488372093,
"grad_norm": 0.3875473141670227,
"learning_rate": 4.800000000000001e-06,
"loss": 0.0216,
"step": 96
},
{
"epoch": 3.3488372093023258,
"grad_norm": 0.31162887811660767,
"learning_rate": 4.85e-06,
"loss": 0.0222,
"step": 97
},
{
"epoch": 3.383720930232558,
"grad_norm": 0.325513631105423,
"learning_rate": 4.9000000000000005e-06,
"loss": 0.0207,
"step": 98
},
{
"epoch": 3.4186046511627906,
"grad_norm": 0.27081894874572754,
"learning_rate": 4.95e-06,
"loss": 0.022,
"step": 99
},
{
"epoch": 3.453488372093023,
"grad_norm": 0.22119346261024475,
"learning_rate": 5e-06,
"loss": 0.0199,
"step": 100
},
{
"epoch": 3.488372093023256,
"grad_norm": 0.25160327553749084,
"learning_rate": 4.997332437005932e-06,
"loss": 0.0203,
"step": 101
},
{
"epoch": 3.5232558139534884,
"grad_norm": 0.2517716884613037,
"learning_rate": 4.989335440737587e-06,
"loss": 0.0187,
"step": 102
},
{
"epoch": 3.558139534883721,
"grad_norm": 0.2127666473388672,
"learning_rate": 4.976026077188013e-06,
"loss": 0.0159,
"step": 103
},
{
"epoch": 3.5930232558139537,
"grad_norm": 0.3233763873577118,
"learning_rate": 4.957432749209755e-06,
"loss": 0.0163,
"step": 104
},
{
"epoch": 3.6279069767441863,
"grad_norm": 0.28826045989990234,
"learning_rate": 4.933595135901733e-06,
"loss": 0.017,
"step": 105
},
{
"epoch": 3.6627906976744184,
"grad_norm": 0.24980659782886505,
"learning_rate": 4.904564107932048e-06,
"loss": 0.0142,
"step": 106
},
{
"epoch": 3.697674418604651,
"grad_norm": 0.2520740330219269,
"learning_rate": 4.870401618977415e-06,
"loss": 0.0163,
"step": 107
},
{
"epoch": 3.7325581395348837,
"grad_norm": 0.23068122565746307,
"learning_rate": 4.83118057351089e-06,
"loss": 0.0132,
"step": 108
},
{
"epoch": 3.7674418604651163,
"grad_norm": 0.3244348168373108,
"learning_rate": 4.786984671220053e-06,
"loss": 0.0177,
"step": 109
},
{
"epoch": 3.802325581395349,
"grad_norm": 0.1820863038301468,
"learning_rate": 4.737908228387656e-06,
"loss": 0.0108,
"step": 110
},
{
"epoch": 3.8372093023255816,
"grad_norm": 0.25291499495506287,
"learning_rate": 4.684055976615924e-06,
"loss": 0.0115,
"step": 111
},
{
"epoch": 3.8720930232558137,
"grad_norm": 0.23526118695735931,
"learning_rate": 4.625542839324036e-06,
"loss": 0.011,
"step": 112
}
],
"logging_steps": 1,
"max_steps": 168,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 28,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9.521111556499177e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}