| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9846153846153847, | |
| "eval_steps": 500, | |
| "global_step": 56, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.0001, | |
| "loss": 0.1065, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1299, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019983081582712685, | |
| "loss": 0.145, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00019932383577419432, | |
| "loss": 0.1167, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00019848077530122083, | |
| "loss": 0.1202, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00019730448705798239, | |
| "loss": 0.1051, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0001957989512315489, | |
| "loss": 0.1127, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019396926207859084, | |
| "loss": 0.0987, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00019182161068802741, | |
| "loss": 0.0992, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00018936326403234125, | |
| "loss": 0.129, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00018660254037844388, | |
| "loss": 0.0898, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00018354878114129367, | |
| "loss": 0.1101, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.0001802123192755044, | |
| "loss": 0.0951, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.0001766044443118978, | |
| "loss": 0.0916, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00017273736415730488, | |
| "loss": 0.0969, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.0001686241637868734, | |
| "loss": 0.082, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00016427876096865394, | |
| "loss": 0.0842, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00015971585917027862, | |
| "loss": 0.0923, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.0001549508978070806, | |
| "loss": 0.0867, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00015000000000000001, | |
| "loss": 0.1179, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00014487991802004623, | |
| "loss": 0.1062, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.0001396079766039157, | |
| "loss": 0.1287, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00013420201433256689, | |
| "loss": 0.0968, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00012868032327110904, | |
| "loss": 0.0906, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.00012306158707424403, | |
| "loss": 0.0819, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00011736481776669306, | |
| "loss": 0.0878, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00011160929141252303, | |
| "loss": 0.0793, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00010581448289104758, | |
| "loss": 0.099, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.0001, | |
| "loss": 0.1027, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 9.418551710895243e-05, | |
| "loss": 0.093, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 8.839070858747697e-05, | |
| "loss": 0.0949, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 8.263518223330697e-05, | |
| "loss": 0.0938, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 7.693841292575598e-05, | |
| "loss": 0.0996, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 7.131967672889101e-05, | |
| "loss": 0.0976, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 6.579798566743314e-05, | |
| "loss": 0.1112, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 6.039202339608432e-05, | |
| "loss": 0.0932, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 5.5120081979953785e-05, | |
| "loss": 0.1011, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 5.000000000000002e-05, | |
| "loss": 0.0911, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 4.50491021929194e-05, | |
| "loss": 0.0997, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 4.028414082972141e-05, | |
| "loss": 0.0987, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 3.5721239031346066e-05, | |
| "loss": 0.0935, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 3.137583621312665e-05, | |
| "loss": 0.1054, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 2.7262635842695127e-05, | |
| "loss": 0.0869, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 2.339555568810221e-05, | |
| "loss": 0.095, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 1.9787680724495617e-05, | |
| "loss": 0.1, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 1.6451218858706374e-05, | |
| "loss": 0.0985, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 1.339745962155613e-05, | |
| "loss": 0.088, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 1.0636735967658784e-05, | |
| "loss": 0.0777, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 8.178389311972612e-06, | |
| "loss": 0.0844, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 6.030737921409169e-06, | |
| "loss": 0.0971, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 4.20104876845111e-06, | |
| "loss": 0.0946, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 2.6955129420176196e-06, | |
| "loss": 0.0853, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 1.5192246987791981e-06, | |
| "loss": 0.0889, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 6.761642258056978e-07, | |
| "loss": 0.0927, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 1.6918417287318245e-07, | |
| "loss": 0.0971, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 0.0, | |
| "loss": 0.1002, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "step": 56, | |
| "total_flos": 1401921699840.0, | |
| "train_loss": 0.0989542007446289, | |
| "train_runtime": 4591.4133, | |
| "train_samples_per_second": 0.791, | |
| "train_steps_per_second": 0.012 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 56, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 800, | |
| "total_flos": 1401921699840.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |