icl_after_clip_13May_data / trainer_state.json
gzqaq's picture
Add files using upload-large-folder tool
8fc266b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.5257790368271955,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11331444759206799,
"grad_norm": 0.16634757973627667,
"learning_rate": 1.977324263038549e-05,
"loss": 0.2784,
"step": 50
},
{
"epoch": 0.22662889518413598,
"grad_norm": 0.11085298565985235,
"learning_rate": 1.9546485260770977e-05,
"loss": 0.0401,
"step": 100
},
{
"epoch": 0.33994334277620397,
"grad_norm": 0.09305945886557498,
"learning_rate": 1.9319727891156463e-05,
"loss": 0.0367,
"step": 150
},
{
"epoch": 0.45325779036827196,
"grad_norm": 0.12071603098099416,
"learning_rate": 1.9092970521541953e-05,
"loss": 0.0349,
"step": 200
},
{
"epoch": 0.56657223796034,
"grad_norm": 0.09522148816808138,
"learning_rate": 1.886621315192744e-05,
"loss": 0.0341,
"step": 250
},
{
"epoch": 0.6798866855524079,
"grad_norm": 0.09713610169325913,
"learning_rate": 1.863945578231293e-05,
"loss": 0.0328,
"step": 300
},
{
"epoch": 0.7932011331444759,
"grad_norm": 0.0923060147808418,
"learning_rate": 1.8412698412698415e-05,
"loss": 0.0321,
"step": 350
},
{
"epoch": 0.9065155807365439,
"grad_norm": 0.09728099867584644,
"learning_rate": 1.81859410430839e-05,
"loss": 0.0313,
"step": 400
},
{
"epoch": 1.0181303116147309,
"grad_norm": 0.0803018017927487,
"learning_rate": 1.795918367346939e-05,
"loss": 0.0302,
"step": 450
},
{
"epoch": 1.1314447592067989,
"grad_norm": 0.06951950783054574,
"learning_rate": 1.7732426303854877e-05,
"loss": 0.0299,
"step": 500
},
{
"epoch": 1.2447592067988669,
"grad_norm": 0.07485661872923234,
"learning_rate": 1.7505668934240366e-05,
"loss": 0.0294,
"step": 550
},
{
"epoch": 1.3580736543909349,
"grad_norm": 0.09460207950673731,
"learning_rate": 1.7278911564625852e-05,
"loss": 0.0289,
"step": 600
},
{
"epoch": 1.4713881019830028,
"grad_norm": 0.07328832674899002,
"learning_rate": 1.705215419501134e-05,
"loss": 0.0284,
"step": 650
},
{
"epoch": 1.5847025495750708,
"grad_norm": 0.08250947529934735,
"learning_rate": 1.6825396825396828e-05,
"loss": 0.028,
"step": 700
},
{
"epoch": 1.6980169971671388,
"grad_norm": 0.20845099776138606,
"learning_rate": 1.6598639455782314e-05,
"loss": 0.0416,
"step": 750
},
{
"epoch": 1.8113314447592068,
"grad_norm": 0.0696205675426991,
"learning_rate": 1.63718820861678e-05,
"loss": 0.0298,
"step": 800
},
{
"epoch": 1.9246458923512748,
"grad_norm": 0.06790871962351955,
"learning_rate": 1.614512471655329e-05,
"loss": 0.0269,
"step": 850
},
{
"epoch": 2.0362606232294618,
"grad_norm": 0.07065541323706591,
"learning_rate": 1.5918367346938776e-05,
"loss": 0.0262,
"step": 900
},
{
"epoch": 2.1495750708215295,
"grad_norm": 0.06744948277964836,
"learning_rate": 1.5691609977324265e-05,
"loss": 0.0264,
"step": 950
},
{
"epoch": 2.2628895184135978,
"grad_norm": 0.06160642122161638,
"learning_rate": 1.546485260770975e-05,
"loss": 0.0261,
"step": 1000
},
{
"epoch": 2.376203966005666,
"grad_norm": 0.05385845438103807,
"learning_rate": 1.523809523809524e-05,
"loss": 0.0256,
"step": 1050
},
{
"epoch": 2.4895184135977337,
"grad_norm": 0.050240201594038615,
"learning_rate": 1.5011337868480727e-05,
"loss": 0.0255,
"step": 1100
},
{
"epoch": 2.6028328611898015,
"grad_norm": 0.05166678846851462,
"learning_rate": 1.4784580498866215e-05,
"loss": 0.025,
"step": 1150
},
{
"epoch": 2.7161473087818697,
"grad_norm": 0.04683960740904564,
"learning_rate": 1.4557823129251703e-05,
"loss": 0.0247,
"step": 1200
},
{
"epoch": 2.829461756373938,
"grad_norm": 0.06029107098535805,
"learning_rate": 1.433106575963719e-05,
"loss": 0.0244,
"step": 1250
},
{
"epoch": 2.9427762039660057,
"grad_norm": 0.05693111402459813,
"learning_rate": 1.4104308390022677e-05,
"loss": 0.0243,
"step": 1300
},
{
"epoch": 3.0543909348441924,
"grad_norm": 0.0564549475935376,
"learning_rate": 1.3877551020408165e-05,
"loss": 0.0237,
"step": 1350
},
{
"epoch": 3.1677053824362607,
"grad_norm": 0.053035034218339376,
"learning_rate": 1.3650793650793652e-05,
"loss": 0.0237,
"step": 1400
},
{
"epoch": 3.2810198300283284,
"grad_norm": 0.05993864052557364,
"learning_rate": 1.342403628117914e-05,
"loss": 0.0238,
"step": 1450
},
{
"epoch": 3.3943342776203966,
"grad_norm": 0.05520610967439923,
"learning_rate": 1.3197278911564626e-05,
"loss": 0.0236,
"step": 1500
},
{
"epoch": 3.507648725212465,
"grad_norm": 0.03900040916919487,
"learning_rate": 1.2970521541950114e-05,
"loss": 0.0234,
"step": 1550
},
{
"epoch": 3.6209631728045326,
"grad_norm": 0.049610256443639804,
"learning_rate": 1.2743764172335602e-05,
"loss": 0.0234,
"step": 1600
},
{
"epoch": 3.7342776203966004,
"grad_norm": 0.11734157984934857,
"learning_rate": 1.251700680272109e-05,
"loss": 0.0256,
"step": 1650
},
{
"epoch": 3.8475920679886686,
"grad_norm": 0.06914093614568675,
"learning_rate": 1.2290249433106578e-05,
"loss": 0.025,
"step": 1700
},
{
"epoch": 3.960906515580737,
"grad_norm": 0.05085197256090967,
"learning_rate": 1.2063492063492064e-05,
"loss": 0.0235,
"step": 1750
},
{
"epoch": 4.0725212464589235,
"grad_norm": 0.05471296120488471,
"learning_rate": 1.1836734693877552e-05,
"loss": 0.0225,
"step": 1800
},
{
"epoch": 4.185835694050992,
"grad_norm": 0.048226801279643884,
"learning_rate": 1.160997732426304e-05,
"loss": 0.0227,
"step": 1850
},
{
"epoch": 4.299150141643059,
"grad_norm": 0.06890445383362698,
"learning_rate": 1.1383219954648527e-05,
"loss": 0.0226,
"step": 1900
},
{
"epoch": 4.412464589235127,
"grad_norm": 0.04493545467616693,
"learning_rate": 1.1156462585034013e-05,
"loss": 0.0226,
"step": 1950
},
{
"epoch": 4.5257790368271955,
"grad_norm": 0.04998755369871855,
"learning_rate": 1.0929705215419501e-05,
"loss": 0.0222,
"step": 2000
}
],
"logging_steps": 50,
"max_steps": 4410,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.1134043823276032e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}