| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 11110, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0450070323488045, | |
| "grad_norm": 57.50090789794922, | |
| "learning_rate": 4.7754275427542756e-05, | |
| "loss": 4.3526, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.090014064697609, | |
| "grad_norm": 33.234798431396484, | |
| "learning_rate": 4.55040504050405e-05, | |
| "loss": 1.6131, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.1350210970464135, | |
| "grad_norm": 40.48593521118164, | |
| "learning_rate": 4.325382538253826e-05, | |
| "loss": 1.3157, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.180028129395218, | |
| "grad_norm": 34.0600700378418, | |
| "learning_rate": 4.1003600360036004e-05, | |
| "loss": 1.1683, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.2250351617440225, | |
| "grad_norm": 11.524826049804688, | |
| "learning_rate": 3.875337533753375e-05, | |
| "loss": 1.0973, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.270042194092827, | |
| "grad_norm": 35.38045120239258, | |
| "learning_rate": 3.6503150315031505e-05, | |
| "loss": 1.0251, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.3150492264416315, | |
| "grad_norm": 13.695218086242676, | |
| "learning_rate": 3.425292529252925e-05, | |
| "loss": 0.958, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.360056258790436, | |
| "grad_norm": 14.775146484375, | |
| "learning_rate": 3.2002700270027e-05, | |
| "loss": 0.9002, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.4050632911392405, | |
| "grad_norm": 9.160333633422852, | |
| "learning_rate": 2.9752475247524754e-05, | |
| "loss": 0.8585, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.450070323488045, | |
| "grad_norm": 7.049114227294922, | |
| "learning_rate": 2.75022502250225e-05, | |
| "loss": 0.8319, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.49507735583684953, | |
| "grad_norm": 7.8238115310668945, | |
| "learning_rate": 2.525202520252025e-05, | |
| "loss": 0.8034, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.540084388185654, | |
| "grad_norm": 5.348775863647461, | |
| "learning_rate": 2.3001800180018002e-05, | |
| "loss": 0.7809, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.5850914205344585, | |
| "grad_norm": 8.474061012268066, | |
| "learning_rate": 2.0751575157515752e-05, | |
| "loss": 0.763, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.630098452883263, | |
| "grad_norm": 7.072192192077637, | |
| "learning_rate": 1.85013501350135e-05, | |
| "loss": 0.7535, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.6751054852320675, | |
| "grad_norm": 7.099096775054932, | |
| "learning_rate": 1.625112511251125e-05, | |
| "loss": 0.7386, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.720112517580872, | |
| "grad_norm": 6.223616600036621, | |
| "learning_rate": 1.4000900090009e-05, | |
| "loss": 0.73, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.7651195499296765, | |
| "grad_norm": 4.832330703735352, | |
| "learning_rate": 1.1750675067506751e-05, | |
| "loss": 0.7213, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.810126582278481, | |
| "grad_norm": 3.9546172618865967, | |
| "learning_rate": 9.500450045004502e-06, | |
| "loss": 0.7076, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.8551336146272855, | |
| "grad_norm": 3.895390510559082, | |
| "learning_rate": 7.250225022502251e-06, | |
| "loss": 0.7049, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.90014064697609, | |
| "grad_norm": 4.706455230712891, | |
| "learning_rate": 5e-06, | |
| "loss": 0.705, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.9451476793248945, | |
| "grad_norm": 3.554635524749756, | |
| "learning_rate": 2.7497749774977497e-06, | |
| "loss": 0.7032, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.9901547116736991, | |
| "grad_norm": 3.6398956775665283, | |
| "learning_rate": 4.995499549954995e-07, | |
| "loss": 0.6962, | |
| "step": 11000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 11110, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 99999999, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |