| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 12.192307692307692, |
| "eval_steps": 500, |
| "global_step": 78, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.15384615384615385, |
| "grad_norm": 4.947939872741699, |
| "learning_rate": 2e-05, |
| "loss": 2.7425, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.3076923076923077, |
| "grad_norm": 5.395760536193848, |
| "learning_rate": 4e-05, |
| "loss": 3.0917, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.46153846153846156, |
| "grad_norm": 4.972382068634033, |
| "learning_rate": 6e-05, |
| "loss": 2.5889, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.6153846153846154, |
| "grad_norm": 4.1436309814453125, |
| "learning_rate": 8e-05, |
| "loss": 2.6749, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.7692307692307693, |
| "grad_norm": 3.4529764652252197, |
| "learning_rate": 0.0001, |
| "loss": 3.0256, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.9230769230769231, |
| "grad_norm": 3.0711050033569336, |
| "learning_rate": 0.00012, |
| "loss": 2.5193, |
| "step": 6 |
| }, |
| { |
| "epoch": 1.0769230769230769, |
| "grad_norm": 6.622050762176514, |
| "learning_rate": 0.00014, |
| "loss": 3.7169, |
| "step": 7 |
| }, |
| { |
| "epoch": 1.2307692307692308, |
| "grad_norm": 1.689590334892273, |
| "learning_rate": 0.00016, |
| "loss": 1.8293, |
| "step": 8 |
| }, |
| { |
| "epoch": 1.3846153846153846, |
| "grad_norm": 1.1921875476837158, |
| "learning_rate": 0.00018, |
| "loss": 2.1506, |
| "step": 9 |
| }, |
| { |
| "epoch": 1.5384615384615383, |
| "grad_norm": 1.3741947412490845, |
| "learning_rate": 0.0002, |
| "loss": 1.7855, |
| "step": 10 |
| }, |
| { |
| "epoch": 1.6923076923076923, |
| "grad_norm": 1.1680189371109009, |
| "learning_rate": 0.00019990989662046818, |
| "loss": 1.8602, |
| "step": 11 |
| }, |
| { |
| "epoch": 1.8461538461538463, |
| "grad_norm": 2.420065402984619, |
| "learning_rate": 0.00019963974885425266, |
| "loss": 1.6481, |
| "step": 12 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 5.954460144042969, |
| "learning_rate": 0.00019919004352588767, |
| "loss": 2.3852, |
| "step": 13 |
| }, |
| { |
| "epoch": 2.1538461538461537, |
| "grad_norm": 1.9217047691345215, |
| "learning_rate": 0.00019856159103477086, |
| "loss": 1.3094, |
| "step": 14 |
| }, |
| { |
| "epoch": 2.3076923076923075, |
| "grad_norm": 2.4744794368743896, |
| "learning_rate": 0.00019775552389476864, |
| "loss": 1.4755, |
| "step": 15 |
| }, |
| { |
| "epoch": 2.4615384615384617, |
| "grad_norm": 1.807592511177063, |
| "learning_rate": 0.0001967732946933499, |
| "loss": 1.2384, |
| "step": 16 |
| }, |
| { |
| "epoch": 2.6153846153846154, |
| "grad_norm": 1.5733166933059692, |
| "learning_rate": 0.00019561667347392508, |
| "loss": 1.36, |
| "step": 17 |
| }, |
| { |
| "epoch": 2.769230769230769, |
| "grad_norm": 0.5816245079040527, |
| "learning_rate": 0.00019428774454610843, |
| "loss": 1.1345, |
| "step": 18 |
| }, |
| { |
| "epoch": 2.9230769230769234, |
| "grad_norm": 0.3857601583003998, |
| "learning_rate": 0.00019278890272965096, |
| "loss": 1.0873, |
| "step": 19 |
| }, |
| { |
| "epoch": 3.1153846153846154, |
| "grad_norm": 1.011568307876587, |
| "learning_rate": 0.0001911228490388136, |
| "loss": 1.8665, |
| "step": 20 |
| }, |
| { |
| "epoch": 3.269230769230769, |
| "grad_norm": 2.7151846885681152, |
| "learning_rate": 0.00018929258581495685, |
| "loss": 0.8705, |
| "step": 21 |
| }, |
| { |
| "epoch": 3.423076923076923, |
| "grad_norm": 0.3418419361114502, |
| "learning_rate": 0.00018730141131611882, |
| "loss": 0.6694, |
| "step": 22 |
| }, |
| { |
| "epoch": 3.5769230769230766, |
| "grad_norm": 0.581399142742157, |
| "learning_rate": 0.00018515291377333112, |
| "loss": 1.1754, |
| "step": 23 |
| }, |
| { |
| "epoch": 3.730769230769231, |
| "grad_norm": 0.3654610514640808, |
| "learning_rate": 0.00018285096492438424, |
| "loss": 0.6974, |
| "step": 24 |
| }, |
| { |
| "epoch": 3.8846153846153846, |
| "grad_norm": 0.3354414701461792, |
| "learning_rate": 0.00018039971303669407, |
| "loss": 0.7878, |
| "step": 25 |
| }, |
| { |
| "epoch": 4.076923076923077, |
| "grad_norm": 1.0409477949142456, |
| "learning_rate": 0.00017780357543184397, |
| "loss": 1.7312, |
| "step": 26 |
| }, |
| { |
| "epoch": 4.230769230769231, |
| "grad_norm": 0.2807375192642212, |
| "learning_rate": 0.00017506723052527242, |
| "loss": 0.6799, |
| "step": 27 |
| }, |
| { |
| "epoch": 4.384615384615385, |
| "grad_norm": 0.3576838970184326, |
| "learning_rate": 0.00017219560939545246, |
| "loss": 0.8706, |
| "step": 28 |
| }, |
| { |
| "epoch": 4.538461538461538, |
| "grad_norm": 0.28981831669807434, |
| "learning_rate": 0.00016919388689775464, |
| "loss": 0.7189, |
| "step": 29 |
| }, |
| { |
| "epoch": 4.6923076923076925, |
| "grad_norm": 0.258354514837265, |
| "learning_rate": 0.00016606747233900815, |
| "loss": 0.6547, |
| "step": 30 |
| }, |
| { |
| "epoch": 4.846153846153846, |
| "grad_norm": 0.5264655351638794, |
| "learning_rate": 0.00016282199972956425, |
| "loss": 0.7603, |
| "step": 31 |
| }, |
| { |
| "epoch": 5.038461538461538, |
| "grad_norm": 0.6122850179672241, |
| "learning_rate": 0.00015946331763042867, |
| "loss": 1.2304, |
| "step": 32 |
| }, |
| { |
| "epoch": 5.1923076923076925, |
| "grad_norm": 0.32399341464042664, |
| "learning_rate": 0.00015599747861375955, |
| "loss": 0.5769, |
| "step": 33 |
| }, |
| { |
| "epoch": 5.346153846153846, |
| "grad_norm": 0.36697638034820557, |
| "learning_rate": 0.00015243072835572318, |
| "loss": 0.5996, |
| "step": 34 |
| }, |
| { |
| "epoch": 5.5, |
| "grad_norm": 0.2546577751636505, |
| "learning_rate": 0.00014876949438136347, |
| "loss": 0.6029, |
| "step": 35 |
| }, |
| { |
| "epoch": 5.653846153846154, |
| "grad_norm": 0.275869756937027, |
| "learning_rate": 0.00014502037448176734, |
| "loss": 0.7286, |
| "step": 36 |
| }, |
| { |
| "epoch": 5.8076923076923075, |
| "grad_norm": 0.2754996418952942, |
| "learning_rate": 0.0001411901248243993, |
| "loss": 0.5506, |
| "step": 37 |
| }, |
| { |
| "epoch": 5.961538461538462, |
| "grad_norm": 0.4771808683872223, |
| "learning_rate": 0.00013728564777803088, |
| "loss": 0.868, |
| "step": 38 |
| }, |
| { |
| "epoch": 6.153846153846154, |
| "grad_norm": 0.29702961444854736, |
| "learning_rate": 0.00013331397947420576, |
| "loss": 0.5534, |
| "step": 39 |
| }, |
| { |
| "epoch": 6.3076923076923075, |
| "grad_norm": 0.24644634127616882, |
| "learning_rate": 0.00012928227712765504, |
| "loss": 0.5194, |
| "step": 40 |
| }, |
| { |
| "epoch": 6.461538461538462, |
| "grad_norm": 0.2727232277393341, |
| "learning_rate": 0.00012519780613851254, |
| "loss": 0.373, |
| "step": 41 |
| }, |
| { |
| "epoch": 6.615384615384615, |
| "grad_norm": 0.22836743295192719, |
| "learning_rate": 0.00012106792699957263, |
| "loss": 0.4847, |
| "step": 42 |
| }, |
| { |
| "epoch": 6.769230769230769, |
| "grad_norm": 0.2520367205142975, |
| "learning_rate": 0.00011690008203218493, |
| "loss": 0.4785, |
| "step": 43 |
| }, |
| { |
| "epoch": 6.923076923076923, |
| "grad_norm": 0.2718031406402588, |
| "learning_rate": 0.00011270178197468789, |
| "loss": 0.5619, |
| "step": 44 |
| }, |
| { |
| "epoch": 7.076923076923077, |
| "grad_norm": 0.3759967088699341, |
| "learning_rate": 0.00010848059244755093, |
| "loss": 0.6291, |
| "step": 45 |
| }, |
| { |
| "epoch": 7.230769230769231, |
| "grad_norm": 0.20126377046108246, |
| "learning_rate": 0.00010424412031961484, |
| "loss": 0.3597, |
| "step": 46 |
| }, |
| { |
| "epoch": 7.384615384615385, |
| "grad_norm": 0.2589998245239258, |
| "learning_rate": 0.0001, |
| "loss": 0.467, |
| "step": 47 |
| }, |
| { |
| "epoch": 7.538461538461538, |
| "grad_norm": 0.2349349856376648, |
| "learning_rate": 9.57558796803852e-05, |
| "loss": 0.4365, |
| "step": 48 |
| }, |
| { |
| "epoch": 7.6923076923076925, |
| "grad_norm": 0.22222279012203217, |
| "learning_rate": 9.151940755244912e-05, |
| "loss": 0.3549, |
| "step": 49 |
| }, |
| { |
| "epoch": 7.846153846153846, |
| "grad_norm": 0.222075954079628, |
| "learning_rate": 8.729821802531212e-05, |
| "loss": 0.3699, |
| "step": 50 |
| }, |
| { |
| "epoch": 8.0, |
| "grad_norm": 0.3602772653102875, |
| "learning_rate": 8.309991796781511e-05, |
| "loss": 0.5873, |
| "step": 51 |
| }, |
| { |
| "epoch": 8.153846153846153, |
| "grad_norm": 0.22580599784851074, |
| "learning_rate": 7.89320730004274e-05, |
| "loss": 0.3826, |
| "step": 52 |
| }, |
| { |
| "epoch": 8.307692307692308, |
| "grad_norm": 0.25292614102363586, |
| "learning_rate": 7.48021938614875e-05, |
| "loss": 0.3367, |
| "step": 53 |
| }, |
| { |
| "epoch": 8.461538461538462, |
| "grad_norm": 0.24913984537124634, |
| "learning_rate": 7.071772287234497e-05, |
| "loss": 0.3329, |
| "step": 54 |
| }, |
| { |
| "epoch": 8.615384615384615, |
| "grad_norm": 0.217300683259964, |
| "learning_rate": 6.668602052579424e-05, |
| "loss": 0.3753, |
| "step": 55 |
| }, |
| { |
| "epoch": 8.76923076923077, |
| "grad_norm": 0.20688268542289734, |
| "learning_rate": 6.271435222196916e-05, |
| "loss": 0.3123, |
| "step": 56 |
| }, |
| { |
| "epoch": 8.923076923076923, |
| "grad_norm": 0.2173534780740738, |
| "learning_rate": 5.880987517560075e-05, |
| "loss": 0.2578, |
| "step": 57 |
| }, |
| { |
| "epoch": 9.076923076923077, |
| "grad_norm": 0.5606672167778015, |
| "learning_rate": 5.497962551823266e-05, |
| "loss": 0.5143, |
| "step": 58 |
| }, |
| { |
| "epoch": 9.23076923076923, |
| "grad_norm": 0.1941177397966385, |
| "learning_rate": 5.123050561863657e-05, |
| "loss": 0.2446, |
| "step": 59 |
| }, |
| { |
| "epoch": 9.384615384615385, |
| "grad_norm": 0.21033766865730286, |
| "learning_rate": 4.756927164427685e-05, |
| "loss": 0.313, |
| "step": 60 |
| }, |
| { |
| "epoch": 9.538461538461538, |
| "grad_norm": 0.24171344935894012, |
| "learning_rate": 4.4002521386240466e-05, |
| "loss": 0.3002, |
| "step": 61 |
| }, |
| { |
| "epoch": 9.692307692307692, |
| "grad_norm": 0.19194771349430084, |
| "learning_rate": 4.053668236957134e-05, |
| "loss": 0.2588, |
| "step": 62 |
| }, |
| { |
| "epoch": 9.846153846153847, |
| "grad_norm": 0.22882544994354248, |
| "learning_rate": 3.717800027043576e-05, |
| "loss": 0.2999, |
| "step": 63 |
| }, |
| { |
| "epoch": 10.038461538461538, |
| "grad_norm": 0.5228483080863953, |
| "learning_rate": 3.393252766099187e-05, |
| "loss": 0.5175, |
| "step": 64 |
| }, |
| { |
| "epoch": 10.192307692307692, |
| "grad_norm": 0.188337504863739, |
| "learning_rate": 3.080611310224539e-05, |
| "loss": 0.2212, |
| "step": 65 |
| }, |
| { |
| "epoch": 10.346153846153847, |
| "grad_norm": 0.22668048739433289, |
| "learning_rate": 2.7804390604547557e-05, |
| "loss": 0.2561, |
| "step": 66 |
| }, |
| { |
| "epoch": 10.5, |
| "grad_norm": 0.2146831899881363, |
| "learning_rate": 2.493276947472756e-05, |
| "loss": 0.259, |
| "step": 67 |
| }, |
| { |
| "epoch": 10.653846153846153, |
| "grad_norm": 0.20940867066383362, |
| "learning_rate": 2.2196424568156073e-05, |
| "loss": 0.2535, |
| "step": 68 |
| }, |
| { |
| "epoch": 10.807692307692308, |
| "grad_norm": 0.19297531247138977, |
| "learning_rate": 1.9600286963305957e-05, |
| "loss": 0.2409, |
| "step": 69 |
| }, |
| { |
| "epoch": 10.961538461538462, |
| "grad_norm": 0.4436163604259491, |
| "learning_rate": 1.7149035075615794e-05, |
| "loss": 0.4615, |
| "step": 70 |
| }, |
| { |
| "epoch": 11.115384615384615, |
| "grad_norm": 0.2383939027786255, |
| "learning_rate": 1.4847086226668872e-05, |
| "loss": 0.2431, |
| "step": 71 |
| }, |
| { |
| "epoch": 11.26923076923077, |
| "grad_norm": 0.2192695140838623, |
| "learning_rate": 1.2698588683881186e-05, |
| "loss": 0.2613, |
| "step": 72 |
| }, |
| { |
| "epoch": 11.423076923076923, |
| "grad_norm": 0.19706690311431885, |
| "learning_rate": 1.0707414185043163e-05, |
| "loss": 0.2265, |
| "step": 73 |
| }, |
| { |
| "epoch": 11.576923076923077, |
| "grad_norm": 0.2416417896747589, |
| "learning_rate": 8.87715096118642e-06, |
| "loss": 0.2464, |
| "step": 74 |
| }, |
| { |
| "epoch": 11.73076923076923, |
| "grad_norm": 0.18953856825828552, |
| "learning_rate": 7.211097270349066e-06, |
| "loss": 0.1902, |
| "step": 75 |
| }, |
| { |
| "epoch": 11.884615384615385, |
| "grad_norm": 0.24142511188983917, |
| "learning_rate": 5.71225545389158e-06, |
| "loss": 0.2684, |
| "step": 76 |
| }, |
| { |
| "epoch": 12.038461538461538, |
| "grad_norm": 0.38491952419281006, |
| "learning_rate": 4.383326526074916e-06, |
| "loss": 0.417, |
| "step": 77 |
| }, |
| { |
| "epoch": 12.192307692307692, |
| "grad_norm": 0.22990219295024872, |
| "learning_rate": 3.226705306650113e-06, |
| "loss": 0.273, |
| "step": 78 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 84, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 14, |
| "save_steps": 6, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 160914305187840.0, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|