| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.06129704548240775, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.001225940909648155, | |
| "grad_norm": 4.383765697479248, | |
| "learning_rate": 4e-05, | |
| "loss": 0.6008, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.00245188181929631, | |
| "grad_norm": 3.269385576248169, | |
| "learning_rate": 8e-05, | |
| "loss": 0.3123, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.003677822728944465, | |
| "grad_norm": 3.815711259841919, | |
| "learning_rate": 9.997266286704631e-05, | |
| "loss": 0.3293, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.00490376363859262, | |
| "grad_norm": 2.635485887527466, | |
| "learning_rate": 9.975414512725057e-05, | |
| "loss": 0.2607, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0061297045482407745, | |
| "grad_norm": 2.7631585597991943, | |
| "learning_rate": 9.931806517013612e-05, | |
| "loss": 0.2159, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.00735564545788893, | |
| "grad_norm": 3.8087055683135986, | |
| "learning_rate": 9.86663298624003e-05, | |
| "loss": 0.2098, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.008581586367537085, | |
| "grad_norm": 1.4436062574386597, | |
| "learning_rate": 9.780178907671789e-05, | |
| "loss": 0.1725, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.00980752727718524, | |
| "grad_norm": 1.69550621509552, | |
| "learning_rate": 9.672822322997305e-05, | |
| "loss": 0.1963, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.011033468186833395, | |
| "grad_norm": 2.8857767581939697, | |
| "learning_rate": 9.545032675245813e-05, | |
| "loss": 0.1729, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.012259409096481549, | |
| "grad_norm": 1.027714490890503, | |
| "learning_rate": 9.397368756032445e-05, | |
| "loss": 0.191, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.013485350006129704, | |
| "grad_norm": 1.3657031059265137, | |
| "learning_rate": 9.230476262104677e-05, | |
| "loss": 0.1567, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.01471129091577786, | |
| "grad_norm": 3.904578924179077, | |
| "learning_rate": 9.045084971874738e-05, | |
| "loss": 0.1534, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.015937231825426015, | |
| "grad_norm": 2.95263934135437, | |
| "learning_rate": 8.842005554284296e-05, | |
| "loss": 0.1863, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.01716317273507417, | |
| "grad_norm": 2.19970703125, | |
| "learning_rate": 8.622126023955446e-05, | |
| "loss": 0.1809, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.018389113644722323, | |
| "grad_norm": 1.7940118312835693, | |
| "learning_rate": 8.386407858128706e-05, | |
| "loss": 0.1367, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.01961505455437048, | |
| "grad_norm": 1.772947072982788, | |
| "learning_rate": 8.135881792367686e-05, | |
| "loss": 0.1194, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.020840995464018634, | |
| "grad_norm": 1.1403783559799194, | |
| "learning_rate": 7.871643313414718e-05, | |
| "loss": 0.1509, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.02206693637366679, | |
| "grad_norm": 2.2660560607910156, | |
| "learning_rate": 7.594847868906076e-05, | |
| "loss": 0.1438, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.023292877283314944, | |
| "grad_norm": 1.812446117401123, | |
| "learning_rate": 7.30670581489344e-05, | |
| "loss": 0.1534, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.024518818192963098, | |
| "grad_norm": 2.2660441398620605, | |
| "learning_rate": 7.008477123264848e-05, | |
| "loss": 0.1517, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.025744759102611255, | |
| "grad_norm": 1.7808526754379272, | |
| "learning_rate": 6.701465872208216e-05, | |
| "loss": 0.1363, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.02697070001225941, | |
| "grad_norm": 1.7594811916351318, | |
| "learning_rate": 6.387014543809223e-05, | |
| "loss": 0.1666, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.028196640921907563, | |
| "grad_norm": 1.3563287258148193, | |
| "learning_rate": 6.066498153718735e-05, | |
| "loss": 0.1141, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.02942258183155572, | |
| "grad_norm": 1.7707332372665405, | |
| "learning_rate": 5.74131823855921e-05, | |
| "loss": 0.1896, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.030648522741203874, | |
| "grad_norm": 1.8754723072052002, | |
| "learning_rate": 5.4128967273616625e-05, | |
| "loss": 0.0991, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.03187446365085203, | |
| "grad_norm": 1.3069236278533936, | |
| "learning_rate": 5.0826697238317935e-05, | |
| "loss": 0.1073, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.03310040456050018, | |
| "grad_norm": 1.3609791994094849, | |
| "learning_rate": 4.7520812266338885e-05, | |
| "loss": 0.1249, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.03432634547014834, | |
| "grad_norm": 1.7567753791809082, | |
| "learning_rate": 4.4225768151520694e-05, | |
| "loss": 0.0932, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.035552286379796495, | |
| "grad_norm": 1.649938702583313, | |
| "learning_rate": 4.095597328339452e-05, | |
| "loss": 0.1208, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.036778227289444645, | |
| "grad_norm": 1.7568031549453735, | |
| "learning_rate": 3.772572564296005e-05, | |
| "loss": 0.1066, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.0380041681990928, | |
| "grad_norm": 2.218881130218506, | |
| "learning_rate": 3.4549150281252636e-05, | |
| "loss": 0.1219, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.03923010910874096, | |
| "grad_norm": 1.6613621711730957, | |
| "learning_rate": 3.144013755408895e-05, | |
| "loss": 0.1022, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.04045605001838912, | |
| "grad_norm": 0.924254298210144, | |
| "learning_rate": 2.8412282383075363e-05, | |
| "loss": 0.117, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.04168199092803727, | |
| "grad_norm": 0.7207609415054321, | |
| "learning_rate": 2.547882480847461e-05, | |
| "loss": 0.0914, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.042907931837685424, | |
| "grad_norm": 1.2665833234786987, | |
| "learning_rate": 2.2652592093878666e-05, | |
| "loss": 0.1334, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.04413387274733358, | |
| "grad_norm": 1.1735299825668335, | |
| "learning_rate": 1.9945942635848748e-05, | |
| "loss": 0.083, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.04535981365698173, | |
| "grad_norm": 1.6450732946395874, | |
| "learning_rate": 1.7370711923791567e-05, | |
| "loss": 0.1008, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.04658575456662989, | |
| "grad_norm": 1.1201252937316895, | |
| "learning_rate": 1.4938160786375572e-05, | |
| "loss": 0.1057, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.047811695476278046, | |
| "grad_norm": 0.8063156604766846, | |
| "learning_rate": 1.2658926150792322e-05, | |
| "loss": 0.086, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.049037636385926196, | |
| "grad_norm": 1.0388742685317993, | |
| "learning_rate": 1.0542974530180327e-05, | |
| "loss": 0.1075, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.05026357729557435, | |
| "grad_norm": 1.5300003290176392, | |
| "learning_rate": 8.599558442598998e-06, | |
| "loss": 0.0809, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.05148951820522251, | |
| "grad_norm": 1.5584441423416138, | |
| "learning_rate": 6.837175952121306e-06, | |
| "loss": 0.1027, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.05271545911487066, | |
| "grad_norm": 0.9144107103347778, | |
| "learning_rate": 5.263533508961827e-06, | |
| "loss": 0.0921, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.05394140002451882, | |
| "grad_norm": 1.0252676010131836, | |
| "learning_rate": 3.885512251130763e-06, | |
| "loss": 0.103, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.055167340934166975, | |
| "grad_norm": 1.020387053489685, | |
| "learning_rate": 2.7091379149682685e-06, | |
| "loss": 0.0946, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.056393281843815125, | |
| "grad_norm": 1.2870961427688599, | |
| "learning_rate": 1.7395544861325718e-06, | |
| "loss": 0.1043, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.05761922275346328, | |
| "grad_norm": 1.3732898235321045, | |
| "learning_rate": 9.810017062595322e-07, | |
| "loss": 0.096, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.05884516366311144, | |
| "grad_norm": 1.1968400478363037, | |
| "learning_rate": 4.367965336512403e-07, | |
| "loss": 0.1426, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.06007110457275959, | |
| "grad_norm": 1.2050400972366333, | |
| "learning_rate": 1.0931863906127327e-07, | |
| "loss": 0.0755, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.06129704548240775, | |
| "grad_norm": 0.7326511740684509, | |
| "learning_rate": 0.0, | |
| "loss": 0.07, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.06129704548240775, | |
| "step": 500, | |
| "total_flos": 2454854258304000.0, | |
| "train_loss": 0.14927043783664704, | |
| "train_runtime": 139.6822, | |
| "train_samples_per_second": 14.318, | |
| "train_steps_per_second": 3.58 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2454854258304000.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |