ReDI_Decomposition / checkpoint-500 /trainer_state.json
moshesbeta's picture
Add files using upload-large-folder tool
04196d2 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.173964149280047,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.023508668821627974,
"grad_norm": 84.37700653076172,
"learning_rate": 7.031250000000001e-06,
"loss": 115.85,
"step": 10
},
{
"epoch": 0.04701733764325595,
"grad_norm": 106.4520034790039,
"learning_rate": 1.484375e-05,
"loss": 116.5781,
"step": 20
},
{
"epoch": 0.07052600646488393,
"grad_norm": 130.89254760742188,
"learning_rate": 2.2656250000000002e-05,
"loss": 103.3281,
"step": 30
},
{
"epoch": 0.0940346752865119,
"grad_norm": 37.089256286621094,
"learning_rate": 3.0468750000000002e-05,
"loss": 60.0406,
"step": 40
},
{
"epoch": 0.11754334410813988,
"grad_norm": 44.051109313964844,
"learning_rate": 3.828125e-05,
"loss": 39.75,
"step": 50
},
{
"epoch": 0.14105201292976785,
"grad_norm": 43.561981201171875,
"learning_rate": 4.609375e-05,
"loss": 24.7984,
"step": 60
},
{
"epoch": 0.16456068175139582,
"grad_norm": 34.622520446777344,
"learning_rate": 5.3906250000000006e-05,
"loss": 11.9723,
"step": 70
},
{
"epoch": 0.1880693505730238,
"grad_norm": 22.26849937438965,
"learning_rate": 6.171875e-05,
"loss": 5.4578,
"step": 80
},
{
"epoch": 0.2115780193946518,
"grad_norm": 9.059814453125,
"learning_rate": 6.953125e-05,
"loss": 3.992,
"step": 90
},
{
"epoch": 0.23508668821627976,
"grad_norm": 4.332883834838867,
"learning_rate": 7.734375e-05,
"loss": 3.1058,
"step": 100
},
{
"epoch": 0.2585953570379077,
"grad_norm": 6.844908237457275,
"learning_rate": 8.515625e-05,
"loss": 2.6654,
"step": 110
},
{
"epoch": 0.2821040258595357,
"grad_norm": 4.539117813110352,
"learning_rate": 9.296875e-05,
"loss": 2.422,
"step": 120
},
{
"epoch": 0.3056126946811637,
"grad_norm": 3.098025321960449,
"learning_rate": 9.999981342914437e-05,
"loss": 2.1623,
"step": 130
},
{
"epoch": 0.32912136350279164,
"grad_norm": 1.3452321290969849,
"learning_rate": 9.997742661115932e-05,
"loss": 2.1429,
"step": 140
},
{
"epoch": 0.35263003232441964,
"grad_norm": 0.37226417660713196,
"learning_rate": 9.991774476447404e-05,
"loss": 2.1347,
"step": 150
},
{
"epoch": 0.3761387011460476,
"grad_norm": 3.523218870162964,
"learning_rate": 9.982081242591919e-05,
"loss": 2.0512,
"step": 160
},
{
"epoch": 0.3996473699676756,
"grad_norm": 0.33414289355278015,
"learning_rate": 9.968670193003843e-05,
"loss": 2.047,
"step": 170
},
{
"epoch": 0.4231560387893036,
"grad_norm": 0.24106919765472412,
"learning_rate": 9.951551335510978e-05,
"loss": 2.1046,
"step": 180
},
{
"epoch": 0.4466647076109315,
"grad_norm": 0.328752726316452,
"learning_rate": 9.930737444846331e-05,
"loss": 2.1397,
"step": 190
},
{
"epoch": 0.4701733764325595,
"grad_norm": 1.1409285068511963,
"learning_rate": 9.906244053115143e-05,
"loss": 2.1377,
"step": 200
},
{
"epoch": 0.49368204525418746,
"grad_norm": 0.0690290704369545,
"learning_rate": 9.87808943820424e-05,
"loss": 2.0992,
"step": 210
},
{
"epoch": 0.5171907140758154,
"grad_norm": 0.20527280867099762,
"learning_rate": 9.846294610142398e-05,
"loss": 2.1339,
"step": 220
},
{
"epoch": 0.5406993828974435,
"grad_norm": 0.3991909623146057,
"learning_rate": 9.810883295421864e-05,
"loss": 2.1176,
"step": 230
},
{
"epoch": 0.5642080517190714,
"grad_norm": 0.08950258791446686,
"learning_rate": 9.771881919292765e-05,
"loss": 2.0895,
"step": 240
},
{
"epoch": 0.5877167205406993,
"grad_norm": 0.17848969995975494,
"learning_rate": 9.729319586043591e-05,
"loss": 2.0877,
"step": 250
},
{
"epoch": 0.6112253893623274,
"grad_norm": 0.30160897970199585,
"learning_rate": 9.683228057282483e-05,
"loss": 2.0648,
"step": 260
},
{
"epoch": 0.6347340581839553,
"grad_norm": 0.11883804202079773,
"learning_rate": 9.63364172823554e-05,
"loss": 2.0591,
"step": 270
},
{
"epoch": 0.6582427270055833,
"grad_norm": 0.1619461327791214,
"learning_rate": 9.580597602079802e-05,
"loss": 2.1386,
"step": 280
},
{
"epoch": 0.6817513958272113,
"grad_norm": 0.11473017930984497,
"learning_rate": 9.524135262330098e-05,
"loss": 2.1028,
"step": 290
},
{
"epoch": 0.7052600646488393,
"grad_norm": 0.059143248945474625,
"learning_rate": 9.464296843300342e-05,
"loss": 2.0881,
"step": 300
},
{
"epoch": 0.7287687334704672,
"grad_norm": 0.1624162495136261,
"learning_rate": 9.401126998661328e-05,
"loss": 2.0971,
"step": 310
},
{
"epoch": 0.7522774022920952,
"grad_norm": 0.15460653603076935,
"learning_rate": 9.334672868118491e-05,
"loss": 2.1154,
"step": 320
},
{
"epoch": 0.7757860711137232,
"grad_norm": 0.4213317334651947,
"learning_rate": 9.26498404223449e-05,
"loss": 2.1113,
"step": 330
},
{
"epoch": 0.7992947399353512,
"grad_norm": 0.14529550075531006,
"learning_rate": 9.192112525422868e-05,
"loss": 2.0867,
"step": 340
},
{
"epoch": 0.8228034087569791,
"grad_norm": 0.06652400642633438,
"learning_rate": 9.116112697140418e-05,
"loss": 2.0807,
"step": 350
},
{
"epoch": 0.8463120775786072,
"grad_norm": 0.12574820220470428,
"learning_rate": 9.037041271307188e-05,
"loss": 2.1711,
"step": 360
},
{
"epoch": 0.8698207464002351,
"grad_norm": 0.10869150608778,
"learning_rate": 8.954957253984426e-05,
"loss": 2.1252,
"step": 370
},
{
"epoch": 0.893329415221863,
"grad_norm": 0.143524169921875,
"learning_rate": 8.869921899342056e-05,
"loss": 2.0468,
"step": 380
},
{
"epoch": 0.916838084043491,
"grad_norm": 0.27682727575302124,
"learning_rate": 8.781998663948513e-05,
"loss": 2.1023,
"step": 390
},
{
"epoch": 0.940346752865119,
"grad_norm": 0.08671136200428009,
"learning_rate": 8.691253159417074e-05,
"loss": 2.0991,
"step": 400
},
{
"epoch": 0.963855421686747,
"grad_norm": 0.14743109047412872,
"learning_rate": 8.597753103444016e-05,
"loss": 2.0858,
"step": 410
},
{
"epoch": 0.9873640905083749,
"grad_norm": 0.14369799196720123,
"learning_rate": 8.501568269275126e-05,
"loss": 2.1057,
"step": 420
},
{
"epoch": 1.0094034675286512,
"grad_norm": 0.09220755100250244,
"learning_rate": 8.40277043363831e-05,
"loss": 1.9405,
"step": 430
},
{
"epoch": 1.0329121363502791,
"grad_norm": 0.25697803497314453,
"learning_rate": 8.301433323181076e-05,
"loss": 2.1221,
"step": 440
},
{
"epoch": 1.056420805171907,
"grad_norm": 0.1321459412574768,
"learning_rate": 8.19763255945298e-05,
"loss": 2.1576,
"step": 450
},
{
"epoch": 1.079929473993535,
"grad_norm": 0.11310122162103653,
"learning_rate": 8.091445602473972e-05,
"loss": 2.0483,
"step": 460
},
{
"epoch": 1.1034381428151632,
"grad_norm": 0.08170254528522491,
"learning_rate": 7.982951692930829e-05,
"loss": 2.1367,
"step": 470
},
{
"epoch": 1.1269468116367911,
"grad_norm": 0.022492246702313423,
"learning_rate": 7.87223179304479e-05,
"loss": 2.1435,
"step": 480
},
{
"epoch": 1.150455480458419,
"grad_norm": 0.015893638134002686,
"learning_rate": 7.759368526154509e-05,
"loss": 2.0666,
"step": 490
},
{
"epoch": 1.173964149280047,
"grad_norm": 0.053727954626083374,
"learning_rate": 7.644446115059425e-05,
"loss": 2.0886,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 1278,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.002988656862822e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}