oh-dcft-v1-no-curation / trainer_state.json
sedrickkeh's picture
End of training
eb2c3c9 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.994385527136619,
"eval_steps": 500,
"global_step": 1200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.024953212726138492,
"grad_norm": 8.501412658362785,
"learning_rate": 5e-06,
"loss": 0.9043,
"step": 10
},
{
"epoch": 0.049906425452276984,
"grad_norm": 4.686127489377169,
"learning_rate": 5e-06,
"loss": 0.7713,
"step": 20
},
{
"epoch": 0.07485963817841547,
"grad_norm": 1.065750712888582,
"learning_rate": 5e-06,
"loss": 0.7261,
"step": 30
},
{
"epoch": 0.09981285090455397,
"grad_norm": 1.1573725910946995,
"learning_rate": 5e-06,
"loss": 0.6939,
"step": 40
},
{
"epoch": 0.12476606363069245,
"grad_norm": 1.0825665225776888,
"learning_rate": 5e-06,
"loss": 0.6773,
"step": 50
},
{
"epoch": 0.14971927635683094,
"grad_norm": 0.8282989762012689,
"learning_rate": 5e-06,
"loss": 0.6551,
"step": 60
},
{
"epoch": 0.17467248908296942,
"grad_norm": 0.8431312859314775,
"learning_rate": 5e-06,
"loss": 0.6458,
"step": 70
},
{
"epoch": 0.19962570180910794,
"grad_norm": 0.7090142603822943,
"learning_rate": 5e-06,
"loss": 0.6379,
"step": 80
},
{
"epoch": 0.22457891453524642,
"grad_norm": 0.6113443247263295,
"learning_rate": 5e-06,
"loss": 0.6258,
"step": 90
},
{
"epoch": 0.2495321272613849,
"grad_norm": 0.5723628494735549,
"learning_rate": 5e-06,
"loss": 0.6294,
"step": 100
},
{
"epoch": 0.2744853399875234,
"grad_norm": 0.5986798385585294,
"learning_rate": 5e-06,
"loss": 0.616,
"step": 110
},
{
"epoch": 0.2994385527136619,
"grad_norm": 0.6703694452116971,
"learning_rate": 5e-06,
"loss": 0.61,
"step": 120
},
{
"epoch": 0.32439176543980036,
"grad_norm": 0.48278079529549894,
"learning_rate": 5e-06,
"loss": 0.6088,
"step": 130
},
{
"epoch": 0.34934497816593885,
"grad_norm": 0.5383252054302604,
"learning_rate": 5e-06,
"loss": 0.6097,
"step": 140
},
{
"epoch": 0.37429819089207733,
"grad_norm": 0.5904470071991698,
"learning_rate": 5e-06,
"loss": 0.6053,
"step": 150
},
{
"epoch": 0.39925140361821587,
"grad_norm": 0.5818363085286041,
"learning_rate": 5e-06,
"loss": 0.6014,
"step": 160
},
{
"epoch": 0.42420461634435436,
"grad_norm": 0.6764973062341755,
"learning_rate": 5e-06,
"loss": 0.5993,
"step": 170
},
{
"epoch": 0.44915782907049284,
"grad_norm": 0.6999555821582358,
"learning_rate": 5e-06,
"loss": 0.5991,
"step": 180
},
{
"epoch": 0.4741110417966313,
"grad_norm": 0.7880847284498282,
"learning_rate": 5e-06,
"loss": 0.5961,
"step": 190
},
{
"epoch": 0.4990642545227698,
"grad_norm": 0.7979122334833642,
"learning_rate": 5e-06,
"loss": 0.5916,
"step": 200
},
{
"epoch": 0.5240174672489083,
"grad_norm": 0.5793813415501323,
"learning_rate": 5e-06,
"loss": 0.5921,
"step": 210
},
{
"epoch": 0.5489706799750468,
"grad_norm": 0.5540868891147879,
"learning_rate": 5e-06,
"loss": 0.5806,
"step": 220
},
{
"epoch": 0.5739238927011853,
"grad_norm": 0.5338207990715906,
"learning_rate": 5e-06,
"loss": 0.5851,
"step": 230
},
{
"epoch": 0.5988771054273238,
"grad_norm": 0.5622641527094587,
"learning_rate": 5e-06,
"loss": 0.5848,
"step": 240
},
{
"epoch": 0.6238303181534622,
"grad_norm": 0.5384076976816572,
"learning_rate": 5e-06,
"loss": 0.5791,
"step": 250
},
{
"epoch": 0.6487835308796007,
"grad_norm": 0.6033419963307188,
"learning_rate": 5e-06,
"loss": 0.5842,
"step": 260
},
{
"epoch": 0.6737367436057392,
"grad_norm": 0.5342190421176904,
"learning_rate": 5e-06,
"loss": 0.5802,
"step": 270
},
{
"epoch": 0.6986899563318777,
"grad_norm": 0.5282395994026974,
"learning_rate": 5e-06,
"loss": 0.5769,
"step": 280
},
{
"epoch": 0.7236431690580162,
"grad_norm": 0.6039215573643518,
"learning_rate": 5e-06,
"loss": 0.5811,
"step": 290
},
{
"epoch": 0.7485963817841547,
"grad_norm": 0.5625470869148316,
"learning_rate": 5e-06,
"loss": 0.5748,
"step": 300
},
{
"epoch": 0.7735495945102931,
"grad_norm": 0.6938197396793522,
"learning_rate": 5e-06,
"loss": 0.576,
"step": 310
},
{
"epoch": 0.7985028072364317,
"grad_norm": 0.5199860087817598,
"learning_rate": 5e-06,
"loss": 0.5733,
"step": 320
},
{
"epoch": 0.8234560199625702,
"grad_norm": 0.5347102211468582,
"learning_rate": 5e-06,
"loss": 0.5713,
"step": 330
},
{
"epoch": 0.8484092326887087,
"grad_norm": 0.49920752644836863,
"learning_rate": 5e-06,
"loss": 0.5747,
"step": 340
},
{
"epoch": 0.8733624454148472,
"grad_norm": 0.5742360043218735,
"learning_rate": 5e-06,
"loss": 0.5672,
"step": 350
},
{
"epoch": 0.8983156581409857,
"grad_norm": 0.6082105330996729,
"learning_rate": 5e-06,
"loss": 0.5648,
"step": 360
},
{
"epoch": 0.9232688708671242,
"grad_norm": 0.5296253264006645,
"learning_rate": 5e-06,
"loss": 0.5701,
"step": 370
},
{
"epoch": 0.9482220835932627,
"grad_norm": 0.5862019120409135,
"learning_rate": 5e-06,
"loss": 0.5693,
"step": 380
},
{
"epoch": 0.9731752963194011,
"grad_norm": 0.5004204006192343,
"learning_rate": 5e-06,
"loss": 0.5667,
"step": 390
},
{
"epoch": 0.9981285090455396,
"grad_norm": 0.5111639493787322,
"learning_rate": 5e-06,
"loss": 0.5574,
"step": 400
},
{
"epoch": 0.9981285090455396,
"eval_loss": 0.5630170702934265,
"eval_runtime": 136.6299,
"eval_samples_per_second": 79.002,
"eval_steps_per_second": 0.622,
"step": 400
},
{
"epoch": 1.023081721771678,
"grad_norm": 0.5280851311810567,
"learning_rate": 5e-06,
"loss": 0.5296,
"step": 410
},
{
"epoch": 1.0480349344978166,
"grad_norm": 0.8751970650854215,
"learning_rate": 5e-06,
"loss": 0.5324,
"step": 420
},
{
"epoch": 1.072988147223955,
"grad_norm": 0.5756582314387694,
"learning_rate": 5e-06,
"loss": 0.5245,
"step": 430
},
{
"epoch": 1.0979413599500936,
"grad_norm": 0.5619488808417233,
"learning_rate": 5e-06,
"loss": 0.5277,
"step": 440
},
{
"epoch": 1.122894572676232,
"grad_norm": 0.8057161437263283,
"learning_rate": 5e-06,
"loss": 0.5255,
"step": 450
},
{
"epoch": 1.1478477854023705,
"grad_norm": 0.581447349352516,
"learning_rate": 5e-06,
"loss": 0.5246,
"step": 460
},
{
"epoch": 1.172800998128509,
"grad_norm": 0.5907089920281934,
"learning_rate": 5e-06,
"loss": 0.5308,
"step": 470
},
{
"epoch": 1.1977542108546475,
"grad_norm": 0.527607335995899,
"learning_rate": 5e-06,
"loss": 0.5314,
"step": 480
},
{
"epoch": 1.222707423580786,
"grad_norm": 0.6625045440230309,
"learning_rate": 5e-06,
"loss": 0.5271,
"step": 490
},
{
"epoch": 1.2476606363069245,
"grad_norm": 0.49276396345156864,
"learning_rate": 5e-06,
"loss": 0.5307,
"step": 500
},
{
"epoch": 1.272613849033063,
"grad_norm": 0.47964009848881595,
"learning_rate": 5e-06,
"loss": 0.525,
"step": 510
},
{
"epoch": 1.2975670617592014,
"grad_norm": 0.6903279013904727,
"learning_rate": 5e-06,
"loss": 0.5173,
"step": 520
},
{
"epoch": 1.32252027448534,
"grad_norm": 0.6235250926674746,
"learning_rate": 5e-06,
"loss": 0.5241,
"step": 530
},
{
"epoch": 1.3474734872114784,
"grad_norm": 0.7115742016246728,
"learning_rate": 5e-06,
"loss": 0.5209,
"step": 540
},
{
"epoch": 1.372426699937617,
"grad_norm": 0.5305573388431144,
"learning_rate": 5e-06,
"loss": 0.527,
"step": 550
},
{
"epoch": 1.3973799126637554,
"grad_norm": 0.5804195692188839,
"learning_rate": 5e-06,
"loss": 0.5253,
"step": 560
},
{
"epoch": 1.4223331253898939,
"grad_norm": 0.49733338644989056,
"learning_rate": 5e-06,
"loss": 0.523,
"step": 570
},
{
"epoch": 1.4472863381160324,
"grad_norm": 0.5635813062598766,
"learning_rate": 5e-06,
"loss": 0.518,
"step": 580
},
{
"epoch": 1.472239550842171,
"grad_norm": 0.5233795326173047,
"learning_rate": 5e-06,
"loss": 0.5184,
"step": 590
},
{
"epoch": 1.4971927635683095,
"grad_norm": 0.5340519491855418,
"learning_rate": 5e-06,
"loss": 0.5199,
"step": 600
},
{
"epoch": 1.522145976294448,
"grad_norm": 0.6382502305796666,
"learning_rate": 5e-06,
"loss": 0.5152,
"step": 610
},
{
"epoch": 1.5470991890205865,
"grad_norm": 0.6114232456070381,
"learning_rate": 5e-06,
"loss": 0.5202,
"step": 620
},
{
"epoch": 1.572052401746725,
"grad_norm": 0.5267522341441786,
"learning_rate": 5e-06,
"loss": 0.5194,
"step": 630
},
{
"epoch": 1.5970056144728635,
"grad_norm": 0.49682168460425896,
"learning_rate": 5e-06,
"loss": 0.5235,
"step": 640
},
{
"epoch": 1.621958827199002,
"grad_norm": 0.5492682660691606,
"learning_rate": 5e-06,
"loss": 0.5224,
"step": 650
},
{
"epoch": 1.6469120399251405,
"grad_norm": 0.5160022677697064,
"learning_rate": 5e-06,
"loss": 0.5136,
"step": 660
},
{
"epoch": 1.671865252651279,
"grad_norm": 0.5108996597287156,
"learning_rate": 5e-06,
"loss": 0.5143,
"step": 670
},
{
"epoch": 1.6968184653774174,
"grad_norm": 0.5398052585347178,
"learning_rate": 5e-06,
"loss": 0.5142,
"step": 680
},
{
"epoch": 1.721771678103556,
"grad_norm": 0.5061450862678053,
"learning_rate": 5e-06,
"loss": 0.5131,
"step": 690
},
{
"epoch": 1.7467248908296944,
"grad_norm": 0.5157055149054185,
"learning_rate": 5e-06,
"loss": 0.508,
"step": 700
},
{
"epoch": 1.7716781035558329,
"grad_norm": 0.5204430984353897,
"learning_rate": 5e-06,
"loss": 0.5175,
"step": 710
},
{
"epoch": 1.7966313162819714,
"grad_norm": 0.5357849253293278,
"learning_rate": 5e-06,
"loss": 0.5168,
"step": 720
},
{
"epoch": 1.8215845290081099,
"grad_norm": 0.7132216372119276,
"learning_rate": 5e-06,
"loss": 0.5192,
"step": 730
},
{
"epoch": 1.8465377417342483,
"grad_norm": 0.6769653937286553,
"learning_rate": 5e-06,
"loss": 0.515,
"step": 740
},
{
"epoch": 1.8714909544603868,
"grad_norm": 0.47758034322098186,
"learning_rate": 5e-06,
"loss": 0.5113,
"step": 750
},
{
"epoch": 1.8964441671865253,
"grad_norm": 0.45410954217292865,
"learning_rate": 5e-06,
"loss": 0.508,
"step": 760
},
{
"epoch": 1.9213973799126638,
"grad_norm": 0.5514628147384041,
"learning_rate": 5e-06,
"loss": 0.5111,
"step": 770
},
{
"epoch": 1.9463505926388023,
"grad_norm": 0.5437873076115038,
"learning_rate": 5e-06,
"loss": 0.515,
"step": 780
},
{
"epoch": 1.9713038053649408,
"grad_norm": 0.5047758942757364,
"learning_rate": 5e-06,
"loss": 0.509,
"step": 790
},
{
"epoch": 1.9962570180910792,
"grad_norm": 0.4712219343056804,
"learning_rate": 5e-06,
"loss": 0.5138,
"step": 800
},
{
"epoch": 1.9987523393636932,
"eval_loss": 0.5372687578201294,
"eval_runtime": 138.155,
"eval_samples_per_second": 78.13,
"eval_steps_per_second": 0.615,
"step": 801
},
{
"epoch": 2.0212102308172177,
"grad_norm": 0.5647461852845252,
"learning_rate": 5e-06,
"loss": 0.478,
"step": 810
},
{
"epoch": 2.046163443543356,
"grad_norm": 0.6651732696313954,
"learning_rate": 5e-06,
"loss": 0.4739,
"step": 820
},
{
"epoch": 2.0711166562694947,
"grad_norm": 0.6344890617063265,
"learning_rate": 5e-06,
"loss": 0.4742,
"step": 830
},
{
"epoch": 2.096069868995633,
"grad_norm": 0.5551443463065495,
"learning_rate": 5e-06,
"loss": 0.4757,
"step": 840
},
{
"epoch": 2.1210230817217717,
"grad_norm": 0.5821542029735364,
"learning_rate": 5e-06,
"loss": 0.4745,
"step": 850
},
{
"epoch": 2.14597629444791,
"grad_norm": 0.5036890967140374,
"learning_rate": 5e-06,
"loss": 0.4741,
"step": 860
},
{
"epoch": 2.1709295071740486,
"grad_norm": 0.526252717008255,
"learning_rate": 5e-06,
"loss": 0.4752,
"step": 870
},
{
"epoch": 2.195882719900187,
"grad_norm": 0.5161295092624307,
"learning_rate": 5e-06,
"loss": 0.4708,
"step": 880
},
{
"epoch": 2.2208359326263256,
"grad_norm": 0.5979999195074699,
"learning_rate": 5e-06,
"loss": 0.474,
"step": 890
},
{
"epoch": 2.245789145352464,
"grad_norm": 0.548385025393527,
"learning_rate": 5e-06,
"loss": 0.4725,
"step": 900
},
{
"epoch": 2.2707423580786026,
"grad_norm": 0.5786454120324753,
"learning_rate": 5e-06,
"loss": 0.4721,
"step": 910
},
{
"epoch": 2.295695570804741,
"grad_norm": 0.49033248942630286,
"learning_rate": 5e-06,
"loss": 0.4754,
"step": 920
},
{
"epoch": 2.3206487835308796,
"grad_norm": 0.578944877860593,
"learning_rate": 5e-06,
"loss": 0.4771,
"step": 930
},
{
"epoch": 2.345601996257018,
"grad_norm": 0.5444827802066028,
"learning_rate": 5e-06,
"loss": 0.478,
"step": 940
},
{
"epoch": 2.3705552089831565,
"grad_norm": 0.5738362287944658,
"learning_rate": 5e-06,
"loss": 0.4706,
"step": 950
},
{
"epoch": 2.395508421709295,
"grad_norm": 0.5497412052181665,
"learning_rate": 5e-06,
"loss": 0.4742,
"step": 960
},
{
"epoch": 2.4204616344354335,
"grad_norm": 0.6321664132907767,
"learning_rate": 5e-06,
"loss": 0.4745,
"step": 970
},
{
"epoch": 2.445414847161572,
"grad_norm": 0.5155397917374155,
"learning_rate": 5e-06,
"loss": 0.4714,
"step": 980
},
{
"epoch": 2.4703680598877105,
"grad_norm": 0.48000015580082134,
"learning_rate": 5e-06,
"loss": 0.4737,
"step": 990
},
{
"epoch": 2.495321272613849,
"grad_norm": 0.5646370042066121,
"learning_rate": 5e-06,
"loss": 0.4737,
"step": 1000
},
{
"epoch": 2.5202744853399874,
"grad_norm": 0.5488929805385689,
"learning_rate": 5e-06,
"loss": 0.479,
"step": 1010
},
{
"epoch": 2.545227698066126,
"grad_norm": 0.5690240798527287,
"learning_rate": 5e-06,
"loss": 0.4735,
"step": 1020
},
{
"epoch": 2.5701809107922644,
"grad_norm": 0.5241263467703002,
"learning_rate": 5e-06,
"loss": 0.4759,
"step": 1030
},
{
"epoch": 2.595134123518403,
"grad_norm": 0.4966621491972494,
"learning_rate": 5e-06,
"loss": 0.4741,
"step": 1040
},
{
"epoch": 2.6200873362445414,
"grad_norm": 0.6751968467864414,
"learning_rate": 5e-06,
"loss": 0.4798,
"step": 1050
},
{
"epoch": 2.64504054897068,
"grad_norm": 0.6383918524471734,
"learning_rate": 5e-06,
"loss": 0.4754,
"step": 1060
},
{
"epoch": 2.6699937616968183,
"grad_norm": 0.5256722819070684,
"learning_rate": 5e-06,
"loss": 0.4726,
"step": 1070
},
{
"epoch": 2.694946974422957,
"grad_norm": 0.5129557694221921,
"learning_rate": 5e-06,
"loss": 0.4738,
"step": 1080
},
{
"epoch": 2.7199001871490953,
"grad_norm": 0.49845285895513675,
"learning_rate": 5e-06,
"loss": 0.4788,
"step": 1090
},
{
"epoch": 2.744853399875234,
"grad_norm": 0.6463893459607853,
"learning_rate": 5e-06,
"loss": 0.4772,
"step": 1100
},
{
"epoch": 2.7698066126013723,
"grad_norm": 0.5326072861407999,
"learning_rate": 5e-06,
"loss": 0.4756,
"step": 1110
},
{
"epoch": 2.7947598253275108,
"grad_norm": 0.5085270847721544,
"learning_rate": 5e-06,
"loss": 0.4741,
"step": 1120
},
{
"epoch": 2.8197130380536493,
"grad_norm": 0.5495621538149744,
"learning_rate": 5e-06,
"loss": 0.4745,
"step": 1130
},
{
"epoch": 2.8446662507797877,
"grad_norm": 0.48958337112517436,
"learning_rate": 5e-06,
"loss": 0.4715,
"step": 1140
},
{
"epoch": 2.8696194635059262,
"grad_norm": 0.5857338163975798,
"learning_rate": 5e-06,
"loss": 0.4747,
"step": 1150
},
{
"epoch": 2.8945726762320647,
"grad_norm": 0.48260180409447406,
"learning_rate": 5e-06,
"loss": 0.4715,
"step": 1160
},
{
"epoch": 2.919525888958203,
"grad_norm": 0.5058713259660398,
"learning_rate": 5e-06,
"loss": 0.4722,
"step": 1170
},
{
"epoch": 2.944479101684342,
"grad_norm": 0.544953115514363,
"learning_rate": 5e-06,
"loss": 0.4765,
"step": 1180
},
{
"epoch": 2.96943231441048,
"grad_norm": 0.4680283080397261,
"learning_rate": 5e-06,
"loss": 0.4756,
"step": 1190
},
{
"epoch": 2.994385527136619,
"grad_norm": 0.52273100015014,
"learning_rate": 5e-06,
"loss": 0.4712,
"step": 1200
},
{
"epoch": 2.994385527136619,
"eval_loss": 0.5305783748626709,
"eval_runtime": 137.7646,
"eval_samples_per_second": 78.351,
"eval_steps_per_second": 0.617,
"step": 1200
},
{
"epoch": 2.994385527136619,
"step": 1200,
"total_flos": 2009625935216640.0,
"train_loss": 0.5358249799410502,
"train_runtime": 20182.0753,
"train_samples_per_second": 30.484,
"train_steps_per_second": 0.059
}
],
"logging_steps": 10,
"max_steps": 1200,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2009625935216640.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}