| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 1.967741935483871, |
| "eval_steps": 500, |
| "global_step": 82, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.024193548387096774, |
| "grad_norm": 33.12635803222656, |
| "learning_rate": 5.0000000000000004e-08, |
| "loss": 2.4997, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.04838709677419355, |
| "grad_norm": 32.004058837890625, |
| "learning_rate": 1.0000000000000001e-07, |
| "loss": 2.4277, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.07258064516129033, |
| "grad_norm": 34.234554290771484, |
| "learning_rate": 1.5000000000000002e-07, |
| "loss": 2.6112, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.0967741935483871, |
| "grad_norm": 32.96908187866211, |
| "learning_rate": 2.0000000000000002e-07, |
| "loss": 2.5017, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.12096774193548387, |
| "grad_norm": 35.06013870239258, |
| "learning_rate": 2.5000000000000004e-07, |
| "loss": 2.6115, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.14516129032258066, |
| "grad_norm": 33.552955627441406, |
| "learning_rate": 3.0000000000000004e-07, |
| "loss": 2.5234, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.1693548387096774, |
| "grad_norm": 32.13972091674805, |
| "learning_rate": 3.5000000000000004e-07, |
| "loss": 2.4724, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.1935483870967742, |
| "grad_norm": 32.68510055541992, |
| "learning_rate": 4.0000000000000003e-07, |
| "loss": 2.4925, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.21774193548387097, |
| "grad_norm": 32.32320785522461, |
| "learning_rate": 4.5000000000000003e-07, |
| "loss": 2.4983, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.24193548387096775, |
| "grad_norm": 32.311553955078125, |
| "learning_rate": 5.000000000000001e-07, |
| "loss": 2.4833, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.2661290322580645, |
| "grad_norm": 31.869163513183594, |
| "learning_rate": 5.5e-07, |
| "loss": 2.4362, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.2903225806451613, |
| "grad_norm": 31.329313278198242, |
| "learning_rate": 6.000000000000001e-07, |
| "loss": 2.4228, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.31451612903225806, |
| "grad_norm": 29.42159652709961, |
| "learning_rate": 6.5e-07, |
| "loss": 2.2499, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.3387096774193548, |
| "grad_norm": 31.27863311767578, |
| "learning_rate": 7.000000000000001e-07, |
| "loss": 2.3354, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.3629032258064516, |
| "grad_norm": 31.095605850219727, |
| "learning_rate": 7.5e-07, |
| "loss": 2.2723, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.3870967741935484, |
| "grad_norm": 30.90537452697754, |
| "learning_rate": 8.000000000000001e-07, |
| "loss": 2.2003, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.4112903225806452, |
| "grad_norm": 30.878215789794922, |
| "learning_rate": 8.500000000000001e-07, |
| "loss": 2.0797, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.43548387096774194, |
| "grad_norm": 32.37583541870117, |
| "learning_rate": 9.000000000000001e-07, |
| "loss": 1.9855, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.4596774193548387, |
| "grad_norm": 32.957889556884766, |
| "learning_rate": 9.500000000000001e-07, |
| "loss": 1.8497, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.4838709677419355, |
| "grad_norm": 33.7425537109375, |
| "learning_rate": 1.0000000000000002e-06, |
| "loss": 1.7037, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.5080645161290323, |
| "grad_norm": 35.177791595458984, |
| "learning_rate": 1.0500000000000001e-06, |
| "loss": 1.645, |
| "step": 21 |
| }, |
| { |
| "epoch": 0.532258064516129, |
| "grad_norm": 34.37784957885742, |
| "learning_rate": 1.1e-06, |
| "loss": 1.4705, |
| "step": 22 |
| }, |
| { |
| "epoch": 0.5564516129032258, |
| "grad_norm": 32.561283111572266, |
| "learning_rate": 1.1500000000000002e-06, |
| "loss": 1.3819, |
| "step": 23 |
| }, |
| { |
| "epoch": 0.5806451612903226, |
| "grad_norm": 28.166706085205078, |
| "learning_rate": 1.2000000000000002e-06, |
| "loss": 1.1496, |
| "step": 24 |
| }, |
| { |
| "epoch": 0.6048387096774194, |
| "grad_norm": 30.428386688232422, |
| "learning_rate": 1.25e-06, |
| "loss": 1.0998, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.6290322580645161, |
| "grad_norm": 34.153076171875, |
| "learning_rate": 1.3e-06, |
| "loss": 0.9278, |
| "step": 26 |
| }, |
| { |
| "epoch": 0.6532258064516129, |
| "grad_norm": 39.16960906982422, |
| "learning_rate": 1.3500000000000002e-06, |
| "loss": 0.7463, |
| "step": 27 |
| }, |
| { |
| "epoch": 0.6774193548387096, |
| "grad_norm": 39.09505081176758, |
| "learning_rate": 1.4000000000000001e-06, |
| "loss": 0.5676, |
| "step": 28 |
| }, |
| { |
| "epoch": 0.7016129032258065, |
| "grad_norm": 38.89931869506836, |
| "learning_rate": 1.45e-06, |
| "loss": 0.4015, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.7258064516129032, |
| "grad_norm": 23.554725646972656, |
| "learning_rate": 1.5e-06, |
| "loss": 0.2364, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.75, |
| "grad_norm": 11.884359359741211, |
| "learning_rate": 1.5500000000000002e-06, |
| "loss": 0.1429, |
| "step": 31 |
| }, |
| { |
| "epoch": 0.7741935483870968, |
| "grad_norm": 5.657749176025391, |
| "learning_rate": 1.6000000000000001e-06, |
| "loss": 0.136, |
| "step": 32 |
| }, |
| { |
| "epoch": 0.7983870967741935, |
| "grad_norm": 3.42618465423584, |
| "learning_rate": 1.6500000000000003e-06, |
| "loss": 0.0964, |
| "step": 33 |
| }, |
| { |
| "epoch": 0.8225806451612904, |
| "grad_norm": 2.808098554611206, |
| "learning_rate": 1.7000000000000002e-06, |
| "loss": 0.0826, |
| "step": 34 |
| }, |
| { |
| "epoch": 0.8467741935483871, |
| "grad_norm": 2.475355625152588, |
| "learning_rate": 1.75e-06, |
| "loss": 0.0781, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.8709677419354839, |
| "grad_norm": 1.9219006299972534, |
| "learning_rate": 1.8000000000000001e-06, |
| "loss": 0.0637, |
| "step": 36 |
| }, |
| { |
| "epoch": 0.8951612903225806, |
| "grad_norm": 1.8285995721817017, |
| "learning_rate": 1.85e-06, |
| "loss": 0.0708, |
| "step": 37 |
| }, |
| { |
| "epoch": 0.9193548387096774, |
| "grad_norm": 1.9102882146835327, |
| "learning_rate": 1.9000000000000002e-06, |
| "loss": 0.0865, |
| "step": 38 |
| }, |
| { |
| "epoch": 0.9435483870967742, |
| "grad_norm": 2.190868854522705, |
| "learning_rate": 1.9500000000000004e-06, |
| "loss": 0.0732, |
| "step": 39 |
| }, |
| { |
| "epoch": 0.967741935483871, |
| "grad_norm": 1.923084020614624, |
| "learning_rate": 2.0000000000000003e-06, |
| "loss": 0.0767, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.9919354838709677, |
| "grad_norm": 1.8931093215942383, |
| "learning_rate": 2.05e-06, |
| "loss": 0.0868, |
| "step": 41 |
| }, |
| { |
| "epoch": 1.0, |
| "grad_norm": 1.8931093215942383, |
| "learning_rate": 2.1000000000000002e-06, |
| "loss": 0.138, |
| "step": 42 |
| }, |
| { |
| "epoch": 1.0241935483870968, |
| "grad_norm": 6.719915866851807, |
| "learning_rate": 2.15e-06, |
| "loss": 0.0736, |
| "step": 43 |
| }, |
| { |
| "epoch": 1.0483870967741935, |
| "grad_norm": 1.6687527894973755, |
| "learning_rate": 2.2e-06, |
| "loss": 0.0675, |
| "step": 44 |
| }, |
| { |
| "epoch": 1.0725806451612903, |
| "grad_norm": 1.6767126321792603, |
| "learning_rate": 2.25e-06, |
| "loss": 0.0629, |
| "step": 45 |
| }, |
| { |
| "epoch": 1.096774193548387, |
| "grad_norm": 1.3529062271118164, |
| "learning_rate": 2.3000000000000004e-06, |
| "loss": 0.0614, |
| "step": 46 |
| }, |
| { |
| "epoch": 1.120967741935484, |
| "grad_norm": 2.1146080493927, |
| "learning_rate": 2.35e-06, |
| "loss": 0.0602, |
| "step": 47 |
| }, |
| { |
| "epoch": 1.1451612903225807, |
| "grad_norm": 1.1904520988464355, |
| "learning_rate": 2.4000000000000003e-06, |
| "loss": 0.0639, |
| "step": 48 |
| }, |
| { |
| "epoch": 1.1693548387096775, |
| "grad_norm": 1.1737815141677856, |
| "learning_rate": 2.4500000000000003e-06, |
| "loss": 0.0478, |
| "step": 49 |
| }, |
| { |
| "epoch": 1.1935483870967742, |
| "grad_norm": 1.3181250095367432, |
| "learning_rate": 2.5e-06, |
| "loss": 0.0564, |
| "step": 50 |
| }, |
| { |
| "epoch": 1.217741935483871, |
| "grad_norm": 2.057123899459839, |
| "learning_rate": 2.55e-06, |
| "loss": 0.0432, |
| "step": 51 |
| }, |
| { |
| "epoch": 1.2419354838709677, |
| "grad_norm": 1.1123464107513428, |
| "learning_rate": 2.6e-06, |
| "loss": 0.051, |
| "step": 52 |
| }, |
| { |
| "epoch": 1.2661290322580645, |
| "grad_norm": 1.5854344367980957, |
| "learning_rate": 2.6500000000000005e-06, |
| "loss": 0.0494, |
| "step": 53 |
| }, |
| { |
| "epoch": 1.2903225806451613, |
| "grad_norm": 1.2793511152267456, |
| "learning_rate": 2.7000000000000004e-06, |
| "loss": 0.0464, |
| "step": 54 |
| }, |
| { |
| "epoch": 1.314516129032258, |
| "grad_norm": 1.3535298109054565, |
| "learning_rate": 2.7500000000000004e-06, |
| "loss": 0.0477, |
| "step": 55 |
| }, |
| { |
| "epoch": 1.3387096774193548, |
| "grad_norm": 1.186978816986084, |
| "learning_rate": 2.8000000000000003e-06, |
| "loss": 0.0375, |
| "step": 56 |
| }, |
| { |
| "epoch": 1.3629032258064515, |
| "grad_norm": 1.4667912721633911, |
| "learning_rate": 2.85e-06, |
| "loss": 0.0424, |
| "step": 57 |
| }, |
| { |
| "epoch": 1.3870967741935485, |
| "grad_norm": 1.0930287837982178, |
| "learning_rate": 2.9e-06, |
| "loss": 0.0417, |
| "step": 58 |
| }, |
| { |
| "epoch": 1.4112903225806452, |
| "grad_norm": 1.5085082054138184, |
| "learning_rate": 2.95e-06, |
| "loss": 0.0479, |
| "step": 59 |
| }, |
| { |
| "epoch": 1.435483870967742, |
| "grad_norm": 1.4030777215957642, |
| "learning_rate": 3e-06, |
| "loss": 0.0413, |
| "step": 60 |
| }, |
| { |
| "epoch": 1.4596774193548387, |
| "grad_norm": 1.6423301696777344, |
| "learning_rate": 3.05e-06, |
| "loss": 0.0349, |
| "step": 61 |
| }, |
| { |
| "epoch": 1.4838709677419355, |
| "grad_norm": 1.3811825513839722, |
| "learning_rate": 3.1000000000000004e-06, |
| "loss": 0.0482, |
| "step": 62 |
| }, |
| { |
| "epoch": 1.5080645161290323, |
| "grad_norm": 1.2499895095825195, |
| "learning_rate": 3.1500000000000003e-06, |
| "loss": 0.0308, |
| "step": 63 |
| }, |
| { |
| "epoch": 1.532258064516129, |
| "grad_norm": 1.1597909927368164, |
| "learning_rate": 3.2000000000000003e-06, |
| "loss": 0.0293, |
| "step": 64 |
| }, |
| { |
| "epoch": 1.5564516129032258, |
| "grad_norm": 1.1042351722717285, |
| "learning_rate": 3.2500000000000002e-06, |
| "loss": 0.035, |
| "step": 65 |
| }, |
| { |
| "epoch": 1.5806451612903225, |
| "grad_norm": 1.13418710231781, |
| "learning_rate": 3.3000000000000006e-06, |
| "loss": 0.0254, |
| "step": 66 |
| }, |
| { |
| "epoch": 1.6048387096774195, |
| "grad_norm": 0.934019148349762, |
| "learning_rate": 3.3500000000000005e-06, |
| "loss": 0.0283, |
| "step": 67 |
| }, |
| { |
| "epoch": 1.629032258064516, |
| "grad_norm": 1.468568205833435, |
| "learning_rate": 3.4000000000000005e-06, |
| "loss": 0.0325, |
| "step": 68 |
| }, |
| { |
| "epoch": 1.653225806451613, |
| "grad_norm": 1.3268495798110962, |
| "learning_rate": 3.45e-06, |
| "loss": 0.027, |
| "step": 69 |
| }, |
| { |
| "epoch": 1.6774193548387095, |
| "grad_norm": 0.8941407203674316, |
| "learning_rate": 3.5e-06, |
| "loss": 0.0244, |
| "step": 70 |
| }, |
| { |
| "epoch": 1.7016129032258065, |
| "grad_norm": 1.0857181549072266, |
| "learning_rate": 3.5500000000000003e-06, |
| "loss": 0.0225, |
| "step": 71 |
| }, |
| { |
| "epoch": 1.7258064516129032, |
| "grad_norm": 1.1653308868408203, |
| "learning_rate": 3.6000000000000003e-06, |
| "loss": 0.029, |
| "step": 72 |
| }, |
| { |
| "epoch": 1.75, |
| "grad_norm": 1.0501737594604492, |
| "learning_rate": 3.65e-06, |
| "loss": 0.0199, |
| "step": 73 |
| }, |
| { |
| "epoch": 1.7741935483870968, |
| "grad_norm": 0.8470718264579773, |
| "learning_rate": 3.7e-06, |
| "loss": 0.0219, |
| "step": 74 |
| }, |
| { |
| "epoch": 1.7983870967741935, |
| "grad_norm": 0.8664724826812744, |
| "learning_rate": 3.7500000000000005e-06, |
| "loss": 0.0161, |
| "step": 75 |
| }, |
| { |
| "epoch": 1.8225806451612905, |
| "grad_norm": 1.5050084590911865, |
| "learning_rate": 3.8000000000000005e-06, |
| "loss": 0.0246, |
| "step": 76 |
| }, |
| { |
| "epoch": 1.846774193548387, |
| "grad_norm": 1.6326985359191895, |
| "learning_rate": 3.85e-06, |
| "loss": 0.0253, |
| "step": 77 |
| }, |
| { |
| "epoch": 1.870967741935484, |
| "grad_norm": 1.5506129264831543, |
| "learning_rate": 3.900000000000001e-06, |
| "loss": 0.0133, |
| "step": 78 |
| }, |
| { |
| "epoch": 1.8951612903225805, |
| "grad_norm": 0.7956012487411499, |
| "learning_rate": 3.95e-06, |
| "loss": 0.0093, |
| "step": 79 |
| }, |
| { |
| "epoch": 1.9193548387096775, |
| "grad_norm": 1.898987054824829, |
| "learning_rate": 4.000000000000001e-06, |
| "loss": 0.0142, |
| "step": 80 |
| }, |
| { |
| "epoch": 1.9435483870967742, |
| "grad_norm": 0.832822859287262, |
| "learning_rate": 4.05e-06, |
| "loss": 0.0177, |
| "step": 81 |
| }, |
| { |
| "epoch": 1.967741935483871, |
| "grad_norm": 0.9572640657424927, |
| "learning_rate": 4.1e-06, |
| "loss": 0.0077, |
| "step": 82 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 246, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 6, |
| "save_steps": 41, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 2.0481947549564928e+17, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|