Invalid JSON:
Unexpected token 'N', ..."ad_norm": NaN,
"... is not valid JSON
| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 10000, | |
| "global_step": 60130, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04157658406785299, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.08315316813570597, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.12472975220355896, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.16630633627141195, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.20788292033926492, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.24945950440711792, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.2910360884749709, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.3326126725428239, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.37418925661067687, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.41576584067852984, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.4573424247463828, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.49891900881423584, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.5404955928820888, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.5820721769499418, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.6236487610177948, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.6652253450856478, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.7068019291535007, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.7483785132213537, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.7899550972892068, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.8315316813570597, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.8315316813570597, | |
| "eval_loss": NaN, | |
| "eval_runtime": 2641.4469, | |
| "eval_samples_per_second": 145.687, | |
| "eval_steps_per_second": 4.553, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.8731082654249127, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.9146848494927656, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.9562614335606187, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.9978380176284717, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.0394146016963246, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.0809911857641776, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.1225677698320307, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.1641443538998835, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.2057209379677365, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.2472975220355895, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.2888741061034426, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.3304506901712956, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.3720272742391484, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.4136038583070014, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.4551804423748544, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.4967570264427075, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.5383336105105605, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.5799101945784133, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.6214867786462666, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.6630633627141194, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.6630633627141194, | |
| "eval_loss": NaN, | |
| "eval_runtime": 2644.954, | |
| "eval_samples_per_second": 145.494, | |
| "eval_steps_per_second": 4.547, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.7046399467819724, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 1.7462165308498254, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 1.7877931149176782, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 1.8293696989855315, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 1.8709462830533843, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 1.9125228671212373, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 1.9540994511890903, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 1.9956760352569431, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 2.0372526193247964, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 2.078829203392649, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 2.1204057874605025, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 2.1619823715283553, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 2.203558955596208, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 2.2451355396640613, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 2.286712123731914, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 2.328288707799767, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 2.36986529186762, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 2.411441875935473, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 2.4530184600033262, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 2.494595044071179, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 2.494595044071179, | |
| "eval_loss": NaN, | |
| "eval_runtime": 2679.1955, | |
| "eval_samples_per_second": 143.635, | |
| "eval_steps_per_second": 4.489, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 2.536171628139032, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 2.577748212206885, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 2.619324796274738, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 2.660901380342591, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 2.702477964410444, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 2.7440545484782968, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 2.78563113254615, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 2.827207716614003, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 2.868784300681856, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 2.910360884749709, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 2.9519374688175617, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 2.993514052885415, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 3.0350906369532678, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 3.076667221021121, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 3.118243805088974, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 3.159820389156827, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 3.20139697322468, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 3.2429735572925327, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 3.284550141360386, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 3.3261267254282387, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 3.3261267254282387, | |
| "eval_loss": NaN, | |
| "eval_runtime": 2656.6227, | |
| "eval_samples_per_second": 144.855, | |
| "eval_steps_per_second": 4.527, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 3.367703309496092, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 40500 | |
| }, | |
| { | |
| "epoch": 3.409279893563945, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 3.4508564776317976, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 41500 | |
| }, | |
| { | |
| "epoch": 3.492433061699651, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 3.5340096457675036, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 42500 | |
| }, | |
| { | |
| "epoch": 3.5755862298353565, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 3.6171628139032097, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 43500 | |
| }, | |
| { | |
| "epoch": 3.658739397971063, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 3.7003159820389158, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 44500 | |
| }, | |
| { | |
| "epoch": 3.7418925661067686, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 3.7834691501746214, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 45500 | |
| }, | |
| { | |
| "epoch": 3.8250457342424746, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 3.866622318310328, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 46500 | |
| }, | |
| { | |
| "epoch": 3.9081989023781807, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 3.9497754864460335, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 47500 | |
| }, | |
| { | |
| "epoch": 3.9913520705138867, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 4.03292865458174, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 48500 | |
| }, | |
| { | |
| "epoch": 4.074505238649593, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 49000 | |
| }, | |
| { | |
| "epoch": 4.116081822717446, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 49500 | |
| }, | |
| { | |
| "epoch": 4.157658406785298, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 4.157658406785298, | |
| "eval_loss": NaN, | |
| "eval_runtime": 2666.9169, | |
| "eval_samples_per_second": 144.296, | |
| "eval_steps_per_second": 4.509, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 4.199234990853151, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 50500 | |
| }, | |
| { | |
| "epoch": 4.240811574921005, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 51000 | |
| }, | |
| { | |
| "epoch": 4.282388158988858, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 51500 | |
| }, | |
| { | |
| "epoch": 4.3239647430567105, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 52000 | |
| }, | |
| { | |
| "epoch": 4.365541327124563, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 52500 | |
| }, | |
| { | |
| "epoch": 4.407117911192416, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 53000 | |
| }, | |
| { | |
| "epoch": 4.44869449526027, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 53500 | |
| }, | |
| { | |
| "epoch": 4.490271079328123, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 54000 | |
| }, | |
| { | |
| "epoch": 4.5318476633959754, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 54500 | |
| }, | |
| { | |
| "epoch": 4.573424247463828, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 55000 | |
| }, | |
| { | |
| "epoch": 4.615000831531681, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 55500 | |
| }, | |
| { | |
| "epoch": 4.656577415599534, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 4.698153999667388, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 56500 | |
| }, | |
| { | |
| "epoch": 4.73973058373524, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 57000 | |
| }, | |
| { | |
| "epoch": 4.781307167803093, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 57500 | |
| }, | |
| { | |
| "epoch": 4.822883751870946, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 58000 | |
| }, | |
| { | |
| "epoch": 4.8644603359388, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 58500 | |
| }, | |
| { | |
| "epoch": 4.9060369200066525, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 59000 | |
| }, | |
| { | |
| "epoch": 4.947613504074505, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 59500 | |
| }, | |
| { | |
| "epoch": 4.989190088142358, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 4.989190088142358, | |
| "eval_loss": NaN, | |
| "eval_runtime": 2669.6986, | |
| "eval_samples_per_second": 144.145, | |
| "eval_steps_per_second": 4.505, | |
| "step": 60000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 60130, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 10000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.7410158852963e+17, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |