| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 3.0, |
| "eval_steps": 63, |
| "global_step": 63, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.047619047619047616, |
| "grad_norm": 0.4443244934082031, |
| "learning_rate": 0.0, |
| "loss": 1.7686, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.09523809523809523, |
| "grad_norm": 0.40687671303749084, |
| "learning_rate": 5e-05, |
| "loss": 1.6323, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.14285714285714285, |
| "grad_norm": 0.30237045884132385, |
| "learning_rate": 0.0001, |
| "loss": 1.5591, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.19047619047619047, |
| "grad_norm": 0.15168103575706482, |
| "learning_rate": 0.00015000000000000001, |
| "loss": 1.2163, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.23809523809523808, |
| "grad_norm": 0.12413862347602844, |
| "learning_rate": 0.0002, |
| "loss": 1.0686, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.2857142857142857, |
| "grad_norm": 0.10256382077932358, |
| "learning_rate": 0.0001998724426109086, |
| "loss": 0.9393, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.3333333333333333, |
| "grad_norm": 0.12325788289308548, |
| "learning_rate": 0.00019949013201891235, |
| "loss": 1.0493, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.38095238095238093, |
| "grad_norm": 0.10104131698608398, |
| "learning_rate": 0.00019885415192492104, |
| "loss": 0.8018, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.42857142857142855, |
| "grad_norm": 0.08828374743461609, |
| "learning_rate": 0.000197966305083608, |
| "loss": 0.8639, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.47619047619047616, |
| "grad_norm": 0.07189645618200302, |
| "learning_rate": 0.00019682910819330646, |
| "loss": 0.9037, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.5238095238095238, |
| "grad_norm": 0.07045362889766693, |
| "learning_rate": 0.0001954457847621543, |
| "loss": 0.8889, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.5714285714285714, |
| "grad_norm": 0.05801837518811226, |
| "learning_rate": 0.00019382025597071049, |
| "loss": 0.7699, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.6190476190476191, |
| "grad_norm": 0.06970051676034927, |
| "learning_rate": 0.0001919571295569422, |
| "loss": 0.9115, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.6666666666666666, |
| "grad_norm": 0.0628025084733963, |
| "learning_rate": 0.00018986168675509145, |
| "loss": 0.7743, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.7142857142857143, |
| "grad_norm": 0.06034975126385689, |
| "learning_rate": 0.00018753986732544255, |
| "loss": 0.8773, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.7619047619047619, |
| "grad_norm": 0.06134900078177452, |
| "learning_rate": 0.00018499825271742635, |
| "loss": 0.8964, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.8095238095238095, |
| "grad_norm": 0.0681927427649498, |
| "learning_rate": 0.00018224404741378674, |
| "loss": 0.7794, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.8571428571428571, |
| "grad_norm": 0.04857668653130531, |
| "learning_rate": 0.00017928505850869157, |
| "loss": 0.8812, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.9047619047619048, |
| "grad_norm": 0.09804921597242355, |
| "learning_rate": 0.00017612967357767605, |
| "loss": 0.6323, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.9523809523809523, |
| "grad_norm": 0.100680872797966, |
| "learning_rate": 0.00017278683690214865, |
| "loss": 0.4672, |
| "step": 20 |
| }, |
| { |
| "epoch": 1.0, |
| "grad_norm": 0.06439036875963211, |
| "learning_rate": 0.0001692660241158535, |
| "loss": 0.6914, |
| "step": 21 |
| }, |
| { |
| "epoch": 1.0476190476190477, |
| "grad_norm": 0.09892687946557999, |
| "learning_rate": 0.0001655772153451573, |
| "loss": 0.7493, |
| "step": 22 |
| }, |
| { |
| "epoch": 1.0952380952380953, |
| "grad_norm": 0.06118744984269142, |
| "learning_rate": 0.00016173086691929664, |
| "loss": 0.6816, |
| "step": 23 |
| }, |
| { |
| "epoch": 1.1428571428571428, |
| "grad_norm": 0.05294615402817726, |
| "learning_rate": 0.00015773788173077682, |
| "loss": 0.778, |
| "step": 24 |
| }, |
| { |
| "epoch": 1.1904761904761905, |
| "grad_norm": 0.05229621380567551, |
| "learning_rate": 0.00015360957832993852, |
| "loss": 0.7626, |
| "step": 25 |
| }, |
| { |
| "epoch": 1.2380952380952381, |
| "grad_norm": 0.05765673518180847, |
| "learning_rate": 0.000149357658841297, |
| "loss": 0.6942, |
| "step": 26 |
| }, |
| { |
| "epoch": 1.2857142857142856, |
| "grad_norm": 0.05352270230650902, |
| "learning_rate": 0.0001449941757925989, |
| "loss": 0.6517, |
| "step": 27 |
| }, |
| { |
| "epoch": 1.3333333333333333, |
| "grad_norm": 0.05008169636130333, |
| "learning_rate": 0.00014053149795062274, |
| "loss": 0.8101, |
| "step": 28 |
| }, |
| { |
| "epoch": 1.380952380952381, |
| "grad_norm": 0.06564240157604218, |
| "learning_rate": 0.0001359822752605659, |
| "loss": 0.5362, |
| "step": 29 |
| }, |
| { |
| "epoch": 1.4285714285714286, |
| "grad_norm": 0.05252636596560478, |
| "learning_rate": 0.00013135940298840035, |
| "loss": 0.6523, |
| "step": 30 |
| }, |
| { |
| "epoch": 1.4761904761904763, |
| "grad_norm": 0.057543374598026276, |
| "learning_rate": 0.0001266759851678403, |
| "loss": 0.6996, |
| "step": 31 |
| }, |
| { |
| "epoch": 1.5238095238095237, |
| "grad_norm": 0.05740825831890106, |
| "learning_rate": 0.00012194529745553497, |
| "loss": 0.6906, |
| "step": 32 |
| }, |
| { |
| "epoch": 1.5714285714285714, |
| "grad_norm": 0.06167572736740112, |
| "learning_rate": 0.00011718074949977748, |
| "loss": 0.5829, |
| "step": 33 |
| }, |
| { |
| "epoch": 1.619047619047619, |
| "grad_norm": 0.0648028552532196, |
| "learning_rate": 0.00011239584692939975, |
| "loss": 0.7389, |
| "step": 34 |
| }, |
| { |
| "epoch": 1.6666666666666665, |
| "grad_norm": 0.06073867157101631, |
| "learning_rate": 0.00010760415307060027, |
| "loss": 0.6082, |
| "step": 35 |
| }, |
| { |
| "epoch": 1.7142857142857144, |
| "grad_norm": 0.06266023218631744, |
| "learning_rate": 0.00010281925050022251, |
| "loss": 0.7021, |
| "step": 36 |
| }, |
| { |
| "epoch": 1.7619047619047619, |
| "grad_norm": 0.059252478182315826, |
| "learning_rate": 9.805470254446503e-05, |
| "loss": 0.7452, |
| "step": 37 |
| }, |
| { |
| "epoch": 1.8095238095238095, |
| "grad_norm": 0.07034861296415329, |
| "learning_rate": 9.332401483215973e-05, |
| "loss": 0.5951, |
| "step": 38 |
| }, |
| { |
| "epoch": 1.8571428571428572, |
| "grad_norm": 0.06802237033843994, |
| "learning_rate": 8.864059701159965e-05, |
| "loss": 0.7478, |
| "step": 39 |
| }, |
| { |
| "epoch": 1.9047619047619047, |
| "grad_norm": 0.06658069789409637, |
| "learning_rate": 8.401772473943415e-05, |
| "loss": 0.4647, |
| "step": 40 |
| }, |
| { |
| "epoch": 1.9523809523809523, |
| "grad_norm": 0.06517181545495987, |
| "learning_rate": 7.946850204937728e-05, |
| "loss": 0.3225, |
| "step": 41 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 0.07028491795063019, |
| "learning_rate": 7.500582420740115e-05, |
| "loss": 0.5495, |
| "step": 42 |
| }, |
| { |
| "epoch": 2.0476190476190474, |
| "grad_norm": 0.0754581019282341, |
| "learning_rate": 7.064234115870303e-05, |
| "loss": 0.569, |
| "step": 43 |
| }, |
| { |
| "epoch": 2.0952380952380953, |
| "grad_norm": 0.07950273901224136, |
| "learning_rate": 6.63904216700615e-05, |
| "loss": 0.5266, |
| "step": 44 |
| }, |
| { |
| "epoch": 2.142857142857143, |
| "grad_norm": 0.06878385692834854, |
| "learning_rate": 6.226211826922319e-05, |
| "loss": 0.636, |
| "step": 45 |
| }, |
| { |
| "epoch": 2.1904761904761907, |
| "grad_norm": 0.06523990631103516, |
| "learning_rate": 5.8269133080703386e-05, |
| "loss": 0.633, |
| "step": 46 |
| }, |
| { |
| "epoch": 2.238095238095238, |
| "grad_norm": 0.07029258459806442, |
| "learning_rate": 5.442278465484274e-05, |
| "loss": 0.5476, |
| "step": 47 |
| }, |
| { |
| "epoch": 2.2857142857142856, |
| "grad_norm": 0.06552315503358841, |
| "learning_rate": 5.0733975884146533e-05, |
| "loss": 0.5169, |
| "step": 48 |
| }, |
| { |
| "epoch": 2.3333333333333335, |
| "grad_norm": 0.06902378797531128, |
| "learning_rate": 4.721316309785139e-05, |
| "loss": 0.6818, |
| "step": 49 |
| }, |
| { |
| "epoch": 2.380952380952381, |
| "grad_norm": 0.07481586933135986, |
| "learning_rate": 4.387032642232397e-05, |
| "loss": 0.3834, |
| "step": 50 |
| }, |
| { |
| "epoch": 2.4285714285714284, |
| "grad_norm": 0.0717826560139656, |
| "learning_rate": 4.071494149130843e-05, |
| "loss": 0.5264, |
| "step": 51 |
| }, |
| { |
| "epoch": 2.4761904761904763, |
| "grad_norm": 0.07843846827745438, |
| "learning_rate": 3.775595258621329e-05, |
| "loss": 0.5632, |
| "step": 52 |
| }, |
| { |
| "epoch": 2.5238095238095237, |
| "grad_norm": 0.07552829384803772, |
| "learning_rate": 3.500174728257365e-05, |
| "loss": 0.5551, |
| "step": 53 |
| }, |
| { |
| "epoch": 2.571428571428571, |
| "grad_norm": 0.07394373416900635, |
| "learning_rate": 3.246013267455745e-05, |
| "loss": 0.458, |
| "step": 54 |
| }, |
| { |
| "epoch": 2.619047619047619, |
| "grad_norm": 0.07493584603071213, |
| "learning_rate": 3.0138313244908573e-05, |
| "loss": 0.6155, |
| "step": 55 |
| }, |
| { |
| "epoch": 2.6666666666666665, |
| "grad_norm": 0.07051674276590347, |
| "learning_rate": 2.8042870443057812e-05, |
| "loss": 0.4904, |
| "step": 56 |
| }, |
| { |
| "epoch": 2.7142857142857144, |
| "grad_norm": 0.08204561471939087, |
| "learning_rate": 2.6179744029289542e-05, |
| "loss": 0.5739, |
| "step": 57 |
| }, |
| { |
| "epoch": 2.761904761904762, |
| "grad_norm": 0.07661724835634232, |
| "learning_rate": 2.4554215237845687e-05, |
| "loss": 0.6318, |
| "step": 58 |
| }, |
| { |
| "epoch": 2.8095238095238093, |
| "grad_norm": 0.08298637717962265, |
| "learning_rate": 2.3170891806693566e-05, |
| "loss": 0.4678, |
| "step": 59 |
| }, |
| { |
| "epoch": 2.857142857142857, |
| "grad_norm": 0.07713375240564346, |
| "learning_rate": 2.2033694916392002e-05, |
| "loss": 0.6443, |
| "step": 60 |
| }, |
| { |
| "epoch": 2.9047619047619047, |
| "grad_norm": 0.08164560794830322, |
| "learning_rate": 2.1145848075078993e-05, |
| "loss": 0.3579, |
| "step": 61 |
| }, |
| { |
| "epoch": 2.9523809523809526, |
| "grad_norm": 0.07253335416316986, |
| "learning_rate": 2.0509867981087663e-05, |
| "loss": 0.2406, |
| "step": 62 |
| }, |
| { |
| "epoch": 3.0, |
| "grad_norm": 0.0779012143611908, |
| "learning_rate": 2.0127557389091428e-05, |
| "loss": 0.4525, |
| "step": 63 |
| }, |
| { |
| "epoch": 3.0, |
| "eval_loss": 0.4695841372013092, |
| "eval_runtime": 15.6701, |
| "eval_samples_per_second": 1.468, |
| "eval_steps_per_second": 0.191, |
| "step": 63 |
| } |
| ], |
| "logging_steps": 1.0, |
| "max_steps": 63, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 0, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 5.4360679349905e+17, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|