| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.1893491124260355, | |
| "eval_steps": 20, | |
| "global_step": 160, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.001183431952662722, | |
| "eval_loss": 11.76406478881836, | |
| "eval_runtime": 3.448, | |
| "eval_samples_per_second": 435.611, | |
| "eval_steps_per_second": 27.262, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.011834319526627219, | |
| "grad_norm": 0.275390625, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 11.764, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.023668639053254437, | |
| "grad_norm": 0.1640625, | |
| "learning_rate": 3.2000000000000005e-05, | |
| "loss": 11.7612, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.023668639053254437, | |
| "eval_loss": 11.763768196105957, | |
| "eval_runtime": 3.4066, | |
| "eval_samples_per_second": 440.914, | |
| "eval_steps_per_second": 27.594, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.03550295857988166, | |
| "grad_norm": 0.353515625, | |
| "learning_rate": 4.8e-05, | |
| "loss": 11.7612, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.047337278106508875, | |
| "grad_norm": 0.197265625, | |
| "learning_rate": 6.400000000000001e-05, | |
| "loss": 11.7593, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.047337278106508875, | |
| "eval_loss": 11.76196575164795, | |
| "eval_runtime": 3.4379, | |
| "eval_samples_per_second": 436.894, | |
| "eval_steps_per_second": 27.342, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.05917159763313609, | |
| "grad_norm": 0.55078125, | |
| "learning_rate": 8e-05, | |
| "loss": 11.7623, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07100591715976332, | |
| "grad_norm": 0.259765625, | |
| "learning_rate": 9.6e-05, | |
| "loss": 11.758, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.07100591715976332, | |
| "eval_loss": 11.75584888458252, | |
| "eval_runtime": 3.6091, | |
| "eval_samples_per_second": 416.17, | |
| "eval_steps_per_second": 26.045, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.08284023668639054, | |
| "grad_norm": 0.2314453125, | |
| "learning_rate": 0.00011200000000000001, | |
| "loss": 11.7513, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.09467455621301775, | |
| "grad_norm": 0.404296875, | |
| "learning_rate": 0.00012800000000000002, | |
| "loss": 11.7397, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.09467455621301775, | |
| "eval_loss": 11.732006072998047, | |
| "eval_runtime": 3.4157, | |
| "eval_samples_per_second": 439.736, | |
| "eval_steps_per_second": 27.52, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.10650887573964497, | |
| "grad_norm": 0.5, | |
| "learning_rate": 0.000144, | |
| "loss": 11.7175, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.11834319526627218, | |
| "grad_norm": 0.9140625, | |
| "learning_rate": 0.00016, | |
| "loss": 11.6553, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.11834319526627218, | |
| "eval_loss": 11.619158744812012, | |
| "eval_runtime": 3.4756, | |
| "eval_samples_per_second": 432.159, | |
| "eval_steps_per_second": 27.046, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.1301775147928994, | |
| "grad_norm": 0.453125, | |
| "learning_rate": 0.00017600000000000002, | |
| "loss": 11.5808, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.14201183431952663, | |
| "grad_norm": 0.36328125, | |
| "learning_rate": 0.000192, | |
| "loss": 11.4893, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.14201183431952663, | |
| "eval_loss": 11.44636344909668, | |
| "eval_runtime": 3.4158, | |
| "eval_samples_per_second": 439.727, | |
| "eval_steps_per_second": 27.52, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.15384615384615385, | |
| "grad_norm": 0.51171875, | |
| "learning_rate": 0.0001999978128380225, | |
| "loss": 11.404, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.16568047337278108, | |
| "grad_norm": 0.41015625, | |
| "learning_rate": 0.0001999803161162393, | |
| "loss": 11.3215, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.16568047337278108, | |
| "eval_loss": 11.274709701538086, | |
| "eval_runtime": 3.4634, | |
| "eval_samples_per_second": 433.675, | |
| "eval_steps_per_second": 27.141, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.17751479289940827, | |
| "grad_norm": 0.6953125, | |
| "learning_rate": 0.00019994532573409262, | |
| "loss": 11.2396, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.1893491124260355, | |
| "grad_norm": 0.4296875, | |
| "learning_rate": 0.00019989284781388617, | |
| "loss": 11.1631, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.1893491124260355, | |
| "eval_loss": 11.122227668762207, | |
| "eval_runtime": 3.3962, | |
| "eval_samples_per_second": 442.264, | |
| "eval_steps_per_second": 27.678, | |
| "step": 160 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 2500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 40, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 64864208814080.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |