moshesbeta's picture
Upload folder using huggingface_hub
8dbbbbd verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.670148318362231,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016711928138709004,
"grad_norm": 0.5531014800071716,
"learning_rate": 5e-06,
"loss": 0.9993,
"step": 10
},
{
"epoch": 0.03342385627741801,
"grad_norm": 0.5198462009429932,
"learning_rate": 1.0555555555555555e-05,
"loss": 0.9973,
"step": 20
},
{
"epoch": 0.05013578441612701,
"grad_norm": 0.9699999094009399,
"learning_rate": 1.6111111111111115e-05,
"loss": 0.9474,
"step": 30
},
{
"epoch": 0.06684771255483601,
"grad_norm": 0.3459213078022003,
"learning_rate": 2.1666666666666667e-05,
"loss": 0.7622,
"step": 40
},
{
"epoch": 0.08355964069354502,
"grad_norm": 0.24512118101119995,
"learning_rate": 2.7222222222222223e-05,
"loss": 0.7086,
"step": 50
},
{
"epoch": 0.10027156883225402,
"grad_norm": 0.21762403845787048,
"learning_rate": 3.277777777777778e-05,
"loss": 0.6895,
"step": 60
},
{
"epoch": 0.11698349697096302,
"grad_norm": 0.22329938411712646,
"learning_rate": 3.8333333333333334e-05,
"loss": 0.6491,
"step": 70
},
{
"epoch": 0.13369542510967203,
"grad_norm": 0.19780907034873962,
"learning_rate": 4.388888888888889e-05,
"loss": 0.6225,
"step": 80
},
{
"epoch": 0.15040735324838103,
"grad_norm": 0.17309053242206573,
"learning_rate": 4.9444444444444446e-05,
"loss": 0.6026,
"step": 90
},
{
"epoch": 0.16711928138709004,
"grad_norm": 0.20399844646453857,
"learning_rate": 5.500000000000001e-05,
"loss": 0.5435,
"step": 100
},
{
"epoch": 0.18383120952579904,
"grad_norm": 0.21713724732398987,
"learning_rate": 6.055555555555555e-05,
"loss": 0.581,
"step": 110
},
{
"epoch": 0.20054313766450804,
"grad_norm": 0.24566805362701416,
"learning_rate": 6.611111111111111e-05,
"loss": 0.5203,
"step": 120
},
{
"epoch": 0.21725506580321705,
"grad_norm": 0.26538822054862976,
"learning_rate": 7.166666666666667e-05,
"loss": 0.5975,
"step": 130
},
{
"epoch": 0.23396699394192605,
"grad_norm": 0.24092447757720947,
"learning_rate": 7.722222222222223e-05,
"loss": 0.5627,
"step": 140
},
{
"epoch": 0.2506789220806351,
"grad_norm": 0.24146470427513123,
"learning_rate": 8.277777777777778e-05,
"loss": 0.5289,
"step": 150
},
{
"epoch": 0.26739085021934406,
"grad_norm": 0.8219459652900696,
"learning_rate": 8.833333333333333e-05,
"loss": 0.5347,
"step": 160
},
{
"epoch": 0.2841027783580531,
"grad_norm": 0.2518085241317749,
"learning_rate": 9.388888888888889e-05,
"loss": 0.5491,
"step": 170
},
{
"epoch": 0.30081470649676206,
"grad_norm": 0.2775290012359619,
"learning_rate": 9.944444444444446e-05,
"loss": 0.5701,
"step": 180
},
{
"epoch": 0.3175266346354711,
"grad_norm": 0.2761514186859131,
"learning_rate": 9.999235647539953e-05,
"loss": 0.5489,
"step": 190
},
{
"epoch": 0.33423856277418007,
"grad_norm": 0.32875457406044006,
"learning_rate": 9.996593741531468e-05,
"loss": 0.5374,
"step": 200
},
{
"epoch": 0.3509504909128891,
"grad_norm": 0.2761303186416626,
"learning_rate": 9.992065842489567e-05,
"loss": 0.4916,
"step": 210
},
{
"epoch": 0.3676624190515981,
"grad_norm": 0.2781451940536499,
"learning_rate": 9.985653659495773e-05,
"loss": 0.5826,
"step": 220
},
{
"epoch": 0.3843743471903071,
"grad_norm": 0.32615798711776733,
"learning_rate": 9.977359612865423e-05,
"loss": 0.532,
"step": 230
},
{
"epoch": 0.4010862753290161,
"grad_norm": 0.2824324667453766,
"learning_rate": 9.967186833234101e-05,
"loss": 0.5482,
"step": 240
},
{
"epoch": 0.4177982034677251,
"grad_norm": 0.2630137503147125,
"learning_rate": 9.955139160375959e-05,
"loss": 0.4917,
"step": 250
},
{
"epoch": 0.4345101316064341,
"grad_norm": 0.3076798915863037,
"learning_rate": 9.941221141754385e-05,
"loss": 0.5285,
"step": 260
},
{
"epoch": 0.4512220597451431,
"grad_norm": 0.3244406282901764,
"learning_rate": 9.925438030805518e-05,
"loss": 0.5178,
"step": 270
},
{
"epoch": 0.4679339878838521,
"grad_norm": 0.3224153220653534,
"learning_rate": 9.907795784955327e-05,
"loss": 0.5184,
"step": 280
},
{
"epoch": 0.48464591602256113,
"grad_norm": 0.3541305959224701,
"learning_rate": 9.888301063370934e-05,
"loss": 0.5327,
"step": 290
},
{
"epoch": 0.5013578441612702,
"grad_norm": 0.28691530227661133,
"learning_rate": 9.866961224447075e-05,
"loss": 0.5461,
"step": 300
},
{
"epoch": 0.5180697722999791,
"grad_norm": 0.4132674038410187,
"learning_rate": 9.843784323028638e-05,
"loss": 0.5242,
"step": 310
},
{
"epoch": 0.5347817004386881,
"grad_norm": 0.33508822321891785,
"learning_rate": 9.818779107370309e-05,
"loss": 0.5285,
"step": 320
},
{
"epoch": 0.5514936285773971,
"grad_norm": 0.2662738561630249,
"learning_rate": 9.791955015834492e-05,
"loss": 0.5287,
"step": 330
},
{
"epoch": 0.5682055567161062,
"grad_norm": 0.30024951696395874,
"learning_rate": 9.763322173328753e-05,
"loss": 0.5329,
"step": 340
},
{
"epoch": 0.5849174848548151,
"grad_norm": 0.27861520648002625,
"learning_rate": 9.732891387484104e-05,
"loss": 0.4656,
"step": 350
},
{
"epoch": 0.6016294129935241,
"grad_norm": 0.33017635345458984,
"learning_rate": 9.700674144575614e-05,
"loss": 0.5334,
"step": 360
},
{
"epoch": 0.6183413411322332,
"grad_norm": 0.3784632384777069,
"learning_rate": 9.666682605186835e-05,
"loss": 0.4669,
"step": 370
},
{
"epoch": 0.6350532692709422,
"grad_norm": 0.3362145721912384,
"learning_rate": 9.63092959961973e-05,
"loss": 0.4938,
"step": 380
},
{
"epoch": 0.6517651974096511,
"grad_norm": 0.2863125801086426,
"learning_rate": 9.593428623051792e-05,
"loss": 0.4815,
"step": 390
},
{
"epoch": 0.6684771255483601,
"grad_norm": 0.23034977912902832,
"learning_rate": 9.554193830442229e-05,
"loss": 0.5249,
"step": 400
},
{
"epoch": 0.6851890536870692,
"grad_norm": 0.29409608244895935,
"learning_rate": 9.513240031189067e-05,
"loss": 0.4899,
"step": 410
},
{
"epoch": 0.7019009818257782,
"grad_norm": 0.3073919415473938,
"learning_rate": 9.470582683539285e-05,
"loss": 0.5115,
"step": 420
},
{
"epoch": 0.7186129099644871,
"grad_norm": 0.33221715688705444,
"learning_rate": 9.42623788875399e-05,
"loss": 0.5021,
"step": 430
},
{
"epoch": 0.7353248381031962,
"grad_norm": 0.24137817323207855,
"learning_rate": 9.380222385030915e-05,
"loss": 0.4757,
"step": 440
},
{
"epoch": 0.7520367662419052,
"grad_norm": 0.28623566031455994,
"learning_rate": 9.332553541186485e-05,
"loss": 0.515,
"step": 450
},
{
"epoch": 0.7687486943806142,
"grad_norm": 0.30202344059944153,
"learning_rate": 9.283249350099859e-05,
"loss": 0.5259,
"step": 460
},
{
"epoch": 0.7854606225193231,
"grad_norm": 0.264213502407074,
"learning_rate": 9.23232842192142e-05,
"loss": 0.5224,
"step": 470
},
{
"epoch": 0.8021725506580322,
"grad_norm": 0.2509612739086151,
"learning_rate": 9.179809977048248e-05,
"loss": 0.5318,
"step": 480
},
{
"epoch": 0.8188844787967412,
"grad_norm": 0.28840652108192444,
"learning_rate": 9.125713838869299e-05,
"loss": 0.5287,
"step": 490
},
{
"epoch": 0.8355964069354502,
"grad_norm": 0.30437350273132324,
"learning_rate": 9.070060426282925e-05,
"loss": 0.5256,
"step": 500
},
{
"epoch": 0.8523083350741592,
"grad_norm": 0.3300555646419525,
"learning_rate": 9.012870745989663e-05,
"loss": 0.4876,
"step": 510
},
{
"epoch": 0.8690202632128682,
"grad_norm": 0.27293577790260315,
"learning_rate": 8.954166384563127e-05,
"loss": 0.5136,
"step": 520
},
{
"epoch": 0.8857321913515772,
"grad_norm": 0.29159626364707947,
"learning_rate": 8.893969500302031e-05,
"loss": 0.5224,
"step": 530
},
{
"epoch": 0.9024441194902862,
"grad_norm": 0.34646981954574585,
"learning_rate": 8.832302814866416e-05,
"loss": 0.5117,
"step": 540
},
{
"epoch": 0.9191560476289952,
"grad_norm": 0.2947717607021332,
"learning_rate": 8.76918960470122e-05,
"loss": 0.534,
"step": 550
},
{
"epoch": 0.9358679757677042,
"grad_norm": 0.2506949305534363,
"learning_rate": 8.704653692250466e-05,
"loss": 0.5013,
"step": 560
},
{
"epoch": 0.9525799039064132,
"grad_norm": 0.28196585178375244,
"learning_rate": 8.638719436965325e-05,
"loss": 0.5089,
"step": 570
},
{
"epoch": 0.9692918320451223,
"grad_norm": 0.26513898372650146,
"learning_rate": 8.571411726109519e-05,
"loss": 0.5346,
"step": 580
},
{
"epoch": 0.9860037601838312,
"grad_norm": 0.3039480149745941,
"learning_rate": 8.50275596536546e-05,
"loss": 0.5097,
"step": 590
},
{
"epoch": 1.0016711928138708,
"grad_norm": 0.2783941328525543,
"learning_rate": 8.432778069244749e-05,
"loss": 0.5139,
"step": 600
},
{
"epoch": 1.01838312095258,
"grad_norm": 0.2535961866378784,
"learning_rate": 8.361504451306585e-05,
"loss": 0.4471,
"step": 610
},
{
"epoch": 1.035095049091289,
"grad_norm": 0.32639509439468384,
"learning_rate": 8.288962014187811e-05,
"loss": 0.4725,
"step": 620
},
{
"epoch": 1.0518069772299978,
"grad_norm": 0.3780384361743927,
"learning_rate": 8.21517813944837e-05,
"loss": 0.4459,
"step": 630
},
{
"epoch": 1.068518905368707,
"grad_norm": 0.297475129365921,
"learning_rate": 8.14018067723597e-05,
"loss": 0.4515,
"step": 640
},
{
"epoch": 1.085230833507416,
"grad_norm": 0.32445859909057617,
"learning_rate": 8.063997935773885e-05,
"loss": 0.4648,
"step": 650
},
{
"epoch": 1.101942761646125,
"grad_norm": 0.333881676197052,
"learning_rate": 7.986658670675861e-05,
"loss": 0.4549,
"step": 660
},
{
"epoch": 1.118654689784834,
"grad_norm": 0.3279685974121094,
"learning_rate": 7.908192074092136e-05,
"loss": 0.4768,
"step": 670
},
{
"epoch": 1.1353666179235429,
"grad_norm": 0.36196866631507874,
"learning_rate": 7.828627763690697e-05,
"loss": 0.5039,
"step": 680
},
{
"epoch": 1.152078546062252,
"grad_norm": 0.3395329713821411,
"learning_rate": 7.747995771477928e-05,
"loss": 0.4628,
"step": 690
},
{
"epoch": 1.168790474200961,
"grad_norm": 0.3666079342365265,
"learning_rate": 7.666326532462842e-05,
"loss": 0.444,
"step": 700
},
{
"epoch": 1.1855024023396699,
"grad_norm": 0.3585106432437897,
"learning_rate": 7.583650873169232e-05,
"loss": 0.5077,
"step": 710
},
{
"epoch": 1.202214330478379,
"grad_norm": 0.3411233127117157,
"learning_rate": 7.500000000000001e-05,
"loss": 0.4689,
"step": 720
},
{
"epoch": 1.218926258617088,
"grad_norm": 0.36491307616233826,
"learning_rate": 7.41540548745814e-05,
"loss": 0.4509,
"step": 730
},
{
"epoch": 1.2356381867557968,
"grad_norm": 0.3584769070148468,
"learning_rate": 7.329899266228748e-05,
"loss": 0.4782,
"step": 740
},
{
"epoch": 1.252350114894506,
"grad_norm": 0.3567698299884796,
"learning_rate": 7.243513611126608e-05,
"loss": 0.4287,
"step": 750
},
{
"epoch": 1.269062043033215,
"grad_norm": 0.36464792490005493,
"learning_rate": 7.156281128913871e-05,
"loss": 0.4642,
"step": 760
},
{
"epoch": 1.285773971171924,
"grad_norm": 0.41288235783576965,
"learning_rate": 7.068234745992456e-05,
"loss": 0.4659,
"step": 770
},
{
"epoch": 1.302485899310633,
"grad_norm": 0.8390935063362122,
"learning_rate": 6.979407695975776e-05,
"loss": 0.4932,
"step": 780
},
{
"epoch": 1.3191978274493419,
"grad_norm": 0.3990253806114197,
"learning_rate": 6.889833507144532e-05,
"loss": 0.494,
"step": 790
},
{
"epoch": 1.335909755588051,
"grad_norm": 0.39918258786201477,
"learning_rate": 6.799545989791268e-05,
"loss": 0.4307,
"step": 800
},
{
"epoch": 1.35262168372676,
"grad_norm": 0.3962526321411133,
"learning_rate": 6.708579223458475e-05,
"loss": 0.4515,
"step": 810
},
{
"epoch": 1.369333611865469,
"grad_norm": 0.366760790348053,
"learning_rate": 6.616967544075077e-05,
"loss": 0.4783,
"step": 820
},
{
"epoch": 1.386045540004178,
"grad_norm": 0.3852802515029907,
"learning_rate": 6.524745530996137e-05,
"loss": 0.4933,
"step": 830
},
{
"epoch": 1.402757468142887,
"grad_norm": 0.3308377265930176,
"learning_rate": 6.431947993950682e-05,
"loss": 0.525,
"step": 840
},
{
"epoch": 1.4194693962815959,
"grad_norm": 0.3748877942562103,
"learning_rate": 6.338609959902569e-05,
"loss": 0.4752,
"step": 850
},
{
"epoch": 1.436181324420305,
"grad_norm": 0.37608233094215393,
"learning_rate": 6.244766659829351e-05,
"loss": 0.4818,
"step": 860
},
{
"epoch": 1.452893252559014,
"grad_norm": 0.36020001769065857,
"learning_rate": 6.150453515424153e-05,
"loss": 0.4888,
"step": 870
},
{
"epoch": 1.469605180697723,
"grad_norm": 0.40768539905548096,
"learning_rate": 6.055706125725542e-05,
"loss": 0.5247,
"step": 880
},
{
"epoch": 1.486317108836432,
"grad_norm": 0.397224098443985,
"learning_rate": 5.9605602536804673e-05,
"loss": 0.4356,
"step": 890
},
{
"epoch": 1.503029036975141,
"grad_norm": 0.38998743891716003,
"learning_rate": 5.865051812645329e-05,
"loss": 0.473,
"step": 900
},
{
"epoch": 1.51974096511385,
"grad_norm": 0.3835464417934418,
"learning_rate": 5.7692168528302807e-05,
"loss": 0.4716,
"step": 910
},
{
"epoch": 1.536452893252559,
"grad_norm": 0.3747701644897461,
"learning_rate": 5.673091547691866e-05,
"loss": 0.432,
"step": 920
},
{
"epoch": 1.553164821391268,
"grad_norm": 0.3856564462184906,
"learning_rate": 5.576712180279133e-05,
"loss": 0.5033,
"step": 930
},
{
"epoch": 1.569876749529977,
"grad_norm": 0.3968600332736969,
"learning_rate": 5.480115129538409e-05,
"loss": 0.5051,
"step": 940
},
{
"epoch": 1.586588677668686,
"grad_norm": 0.4307195842266083,
"learning_rate": 5.383336856581833e-05,
"loss": 0.4576,
"step": 950
},
{
"epoch": 1.6033006058073949,
"grad_norm": 0.401885449886322,
"learning_rate": 5.2864138909249176e-05,
"loss": 0.4866,
"step": 960
},
{
"epoch": 1.620012533946104,
"grad_norm": 0.3600904941558838,
"learning_rate": 5.189382816698263e-05,
"loss": 0.4844,
"step": 970
},
{
"epoch": 1.6367244620848131,
"grad_norm": 0.35338348150253296,
"learning_rate": 5.0922802588386766e-05,
"loss": 0.4991,
"step": 980
},
{
"epoch": 1.653436390223522,
"grad_norm": 0.37117472290992737,
"learning_rate": 4.9951428692648664e-05,
"loss": 0.4709,
"step": 990
},
{
"epoch": 1.670148318362231,
"grad_norm": 0.3381313383579254,
"learning_rate": 4.898007313042975e-05,
"loss": 0.4474,
"step": 1000
}
],
"logging_steps": 10,
"max_steps": 1797,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.76025652115882e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}