| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.8355964069354502, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.016711928138709004, | |
| "grad_norm": 1.12109375, | |
| "learning_rate": 5e-06, | |
| "loss": 1.1894, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03342385627741801, | |
| "grad_norm": 0.7248284816741943, | |
| "learning_rate": 1.0555555555555555e-05, | |
| "loss": 1.1506, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.05013578441612701, | |
| "grad_norm": 1.1556353569030762, | |
| "learning_rate": 1.6111111111111115e-05, | |
| "loss": 1.1033, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.06684771255483601, | |
| "grad_norm": 0.4772647023200989, | |
| "learning_rate": 2.1666666666666667e-05, | |
| "loss": 0.9371, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.08355964069354502, | |
| "grad_norm": 0.27529847621917725, | |
| "learning_rate": 2.7222222222222223e-05, | |
| "loss": 0.8313, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.10027156883225402, | |
| "grad_norm": 0.22090303897857666, | |
| "learning_rate": 3.277777777777778e-05, | |
| "loss": 0.7724, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.11698349697096302, | |
| "grad_norm": 0.21275630593299866, | |
| "learning_rate": 3.8333333333333334e-05, | |
| "loss": 0.733, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.13369542510967203, | |
| "grad_norm": 0.2134302407503128, | |
| "learning_rate": 4.388888888888889e-05, | |
| "loss": 0.7134, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.15040735324838103, | |
| "grad_norm": 0.18984167277812958, | |
| "learning_rate": 4.9444444444444446e-05, | |
| "loss": 0.6959, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.16711928138709004, | |
| "grad_norm": 0.2290167659521103, | |
| "learning_rate": 5.500000000000001e-05, | |
| "loss": 0.6518, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.18383120952579904, | |
| "grad_norm": 0.2181967943906784, | |
| "learning_rate": 6.055555555555555e-05, | |
| "loss": 0.6313, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.20054313766450804, | |
| "grad_norm": 0.26380836963653564, | |
| "learning_rate": 6.611111111111111e-05, | |
| "loss": 0.6059, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.21725506580321705, | |
| "grad_norm": 0.25219714641571045, | |
| "learning_rate": 7.166666666666667e-05, | |
| "loss": 0.6613, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.23396699394192605, | |
| "grad_norm": 0.24282532930374146, | |
| "learning_rate": 7.722222222222223e-05, | |
| "loss": 0.6676, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.2506789220806351, | |
| "grad_norm": 0.28735119104385376, | |
| "learning_rate": 8.277777777777778e-05, | |
| "loss": 0.647, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.26739085021934406, | |
| "grad_norm": 0.3064824938774109, | |
| "learning_rate": 8.833333333333333e-05, | |
| "loss": 0.6222, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.2841027783580531, | |
| "grad_norm": 0.33395013213157654, | |
| "learning_rate": 9.388888888888889e-05, | |
| "loss": 0.6109, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.30081470649676206, | |
| "grad_norm": 0.32029104232788086, | |
| "learning_rate": 9.944444444444446e-05, | |
| "loss": 0.6942, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.3175266346354711, | |
| "grad_norm": 0.29911231994628906, | |
| "learning_rate": 9.999235647539953e-05, | |
| "loss": 0.6312, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.33423856277418007, | |
| "grad_norm": 0.3333311676979065, | |
| "learning_rate": 9.996593741531468e-05, | |
| "loss": 0.6118, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3509504909128891, | |
| "grad_norm": 0.31720009446144104, | |
| "learning_rate": 9.992065842489567e-05, | |
| "loss": 0.5505, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.3676624190515981, | |
| "grad_norm": 0.30165988206863403, | |
| "learning_rate": 9.985653659495773e-05, | |
| "loss": 0.6274, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.3843743471903071, | |
| "grad_norm": 0.31748878955841064, | |
| "learning_rate": 9.977359612865423e-05, | |
| "loss": 0.5831, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.4010862753290161, | |
| "grad_norm": 0.5018420815467834, | |
| "learning_rate": 9.967186833234101e-05, | |
| "loss": 0.6331, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.4177982034677251, | |
| "grad_norm": 0.3082675635814667, | |
| "learning_rate": 9.955139160375959e-05, | |
| "loss": 0.5884, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.4345101316064341, | |
| "grad_norm": 0.36464694142341614, | |
| "learning_rate": 9.941221141754385e-05, | |
| "loss": 0.5993, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.4512220597451431, | |
| "grad_norm": 0.36485177278518677, | |
| "learning_rate": 9.925438030805518e-05, | |
| "loss": 0.5846, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.4679339878838521, | |
| "grad_norm": 0.3401179015636444, | |
| "learning_rate": 9.907795784955327e-05, | |
| "loss": 0.61, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.48464591602256113, | |
| "grad_norm": 0.31191331148147583, | |
| "learning_rate": 9.888301063370934e-05, | |
| "loss": 0.5872, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.5013578441612702, | |
| "grad_norm": 0.2694801688194275, | |
| "learning_rate": 9.866961224447075e-05, | |
| "loss": 0.5905, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.5180697722999791, | |
| "grad_norm": 0.323143869638443, | |
| "learning_rate": 9.843784323028638e-05, | |
| "loss": 0.6213, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.5347817004386881, | |
| "grad_norm": 0.30978456139564514, | |
| "learning_rate": 9.818779107370309e-05, | |
| "loss": 0.5602, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.5514936285773971, | |
| "grad_norm": 0.36156347393989563, | |
| "learning_rate": 9.791955015834492e-05, | |
| "loss": 0.617, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.5682055567161062, | |
| "grad_norm": 0.3188885450363159, | |
| "learning_rate": 9.763322173328753e-05, | |
| "loss": 0.6133, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.5849174848548151, | |
| "grad_norm": 3.012507915496826, | |
| "learning_rate": 9.732891387484104e-05, | |
| "loss": 0.5401, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.6016294129935241, | |
| "grad_norm": 0.3500335216522217, | |
| "learning_rate": 9.700674144575614e-05, | |
| "loss": 0.5994, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.6183413411322332, | |
| "grad_norm": 0.38718223571777344, | |
| "learning_rate": 9.666682605186835e-05, | |
| "loss": 0.5362, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.6350532692709422, | |
| "grad_norm": 0.32986128330230713, | |
| "learning_rate": 9.63092959961973e-05, | |
| "loss": 0.596, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.6517651974096511, | |
| "grad_norm": 0.29063621163368225, | |
| "learning_rate": 9.593428623051792e-05, | |
| "loss": 0.5578, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.6684771255483601, | |
| "grad_norm": 0.3483268916606903, | |
| "learning_rate": 9.554193830442229e-05, | |
| "loss": 0.6073, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6851890536870692, | |
| "grad_norm": 0.33267414569854736, | |
| "learning_rate": 9.513240031189067e-05, | |
| "loss": 0.5327, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.7019009818257782, | |
| "grad_norm": 0.3286801874637604, | |
| "learning_rate": 9.470582683539285e-05, | |
| "loss": 0.5884, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.7186129099644871, | |
| "grad_norm": 0.359754353761673, | |
| "learning_rate": 9.42623788875399e-05, | |
| "loss": 0.6042, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.7353248381031962, | |
| "grad_norm": 0.36169129610061646, | |
| "learning_rate": 9.380222385030915e-05, | |
| "loss": 0.4902, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.7520367662419052, | |
| "grad_norm": 0.31606003642082214, | |
| "learning_rate": 9.332553541186485e-05, | |
| "loss": 0.5816, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7687486943806142, | |
| "grad_norm": 0.3151916265487671, | |
| "learning_rate": 9.283249350099859e-05, | |
| "loss": 0.6368, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.7854606225193231, | |
| "grad_norm": 0.2840172350406647, | |
| "learning_rate": 9.23232842192142e-05, | |
| "loss": 0.5906, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.8021725506580322, | |
| "grad_norm": 0.30779144167900085, | |
| "learning_rate": 9.179809977048248e-05, | |
| "loss": 0.5955, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.8188844787967412, | |
| "grad_norm": 0.31077197194099426, | |
| "learning_rate": 9.125713838869299e-05, | |
| "loss": 0.5831, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.8355964069354502, | |
| "grad_norm": 0.33284345269203186, | |
| "learning_rate": 9.070060426282925e-05, | |
| "loss": 0.6071, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1797, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.8837442456711987e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |