| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.5, | |
| "eval_steps": 500, | |
| "global_step": 66, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.007575757575757576, | |
| "grad_norm": 0.09537188708782196, | |
| "learning_rate": 5e-05, | |
| "loss": 2.6905, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.015151515151515152, | |
| "grad_norm": 0.11260437220335007, | |
| "learning_rate": 4.9242424242424245e-05, | |
| "loss": 2.7568, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.022727272727272728, | |
| "grad_norm": 0.10646557807922363, | |
| "learning_rate": 4.848484848484849e-05, | |
| "loss": 2.6741, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.030303030303030304, | |
| "grad_norm": 0.13714808225631714, | |
| "learning_rate": 4.772727272727273e-05, | |
| "loss": 2.7554, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.03787878787878788, | |
| "grad_norm": 0.10070829093456268, | |
| "learning_rate": 4.696969696969697e-05, | |
| "loss": 2.5577, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.045454545454545456, | |
| "grad_norm": 0.1274138242006302, | |
| "learning_rate": 4.621212121212121e-05, | |
| "loss": 2.511, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.05303030303030303, | |
| "grad_norm": 0.12040433287620544, | |
| "learning_rate": 4.545454545454546e-05, | |
| "loss": 2.6901, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.06060606060606061, | |
| "grad_norm": 0.12598344683647156, | |
| "learning_rate": 4.46969696969697e-05, | |
| "loss": 2.7296, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.06818181818181818, | |
| "grad_norm": 0.11921223253011703, | |
| "learning_rate": 4.3939393939393944e-05, | |
| "loss": 2.4968, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.07575757575757576, | |
| "grad_norm": 0.15551121532917023, | |
| "learning_rate": 4.318181818181819e-05, | |
| "loss": 2.8515, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.08333333333333333, | |
| "grad_norm": 0.14377211034297943, | |
| "learning_rate": 4.242424242424243e-05, | |
| "loss": 2.8905, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.09090909090909091, | |
| "grad_norm": 0.10209552943706512, | |
| "learning_rate": 4.166666666666667e-05, | |
| "loss": 2.6593, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.09848484848484848, | |
| "grad_norm": 0.13147519528865814, | |
| "learning_rate": 4.0909090909090915e-05, | |
| "loss": 2.6413, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.10606060606060606, | |
| "grad_norm": 0.12913918495178223, | |
| "learning_rate": 4.015151515151515e-05, | |
| "loss": 2.7072, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.11363636363636363, | |
| "grad_norm": 0.1154194250702858, | |
| "learning_rate": 3.939393939393939e-05, | |
| "loss": 2.7163, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.12121212121212122, | |
| "grad_norm": 0.12849478423595428, | |
| "learning_rate": 3.8636363636363636e-05, | |
| "loss": 2.7094, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.12878787878787878, | |
| "grad_norm": 0.13727141916751862, | |
| "learning_rate": 3.787878787878788e-05, | |
| "loss": 2.6256, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.13636363636363635, | |
| "grad_norm": 0.1416122019290924, | |
| "learning_rate": 3.712121212121212e-05, | |
| "loss": 2.5419, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.14393939393939395, | |
| "grad_norm": 0.12187984585762024, | |
| "learning_rate": 3.6363636363636364e-05, | |
| "loss": 2.4036, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.15151515151515152, | |
| "grad_norm": 0.15144124627113342, | |
| "learning_rate": 3.560606060606061e-05, | |
| "loss": 2.7657, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.1590909090909091, | |
| "grad_norm": 0.16624559462070465, | |
| "learning_rate": 3.484848484848485e-05, | |
| "loss": 2.7616, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.16666666666666666, | |
| "grad_norm": 0.13778212666511536, | |
| "learning_rate": 3.409090909090909e-05, | |
| "loss": 2.6763, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.17424242424242425, | |
| "grad_norm": 0.13686427474021912, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 2.5902, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.18181818181818182, | |
| "grad_norm": 0.15202738344669342, | |
| "learning_rate": 3.257575757575758e-05, | |
| "loss": 2.414, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.1893939393939394, | |
| "grad_norm": 0.1230149045586586, | |
| "learning_rate": 3.181818181818182e-05, | |
| "loss": 2.3264, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.19696969696969696, | |
| "grad_norm": 0.15694740414619446, | |
| "learning_rate": 3.106060606060606e-05, | |
| "loss": 2.673, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.20454545454545456, | |
| "grad_norm": 0.14588165283203125, | |
| "learning_rate": 3.0303030303030306e-05, | |
| "loss": 2.5204, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.21212121212121213, | |
| "grad_norm": 0.11643742769956589, | |
| "learning_rate": 2.954545454545455e-05, | |
| "loss": 2.4545, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.2196969696969697, | |
| "grad_norm": 0.13210788369178772, | |
| "learning_rate": 2.878787878787879e-05, | |
| "loss": 2.3838, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.22727272727272727, | |
| "grad_norm": 0.14757853746414185, | |
| "learning_rate": 2.803030303030303e-05, | |
| "loss": 2.5976, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.23484848484848486, | |
| "grad_norm": 0.15585793554782867, | |
| "learning_rate": 2.7272727272727273e-05, | |
| "loss": 2.6754, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.24242424242424243, | |
| "grad_norm": 0.1773393601179123, | |
| "learning_rate": 2.6515151515151516e-05, | |
| "loss": 2.875, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.16676630079746246, | |
| "learning_rate": 2.575757575757576e-05, | |
| "loss": 2.6134, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.25757575757575757, | |
| "grad_norm": 0.18137340247631073, | |
| "learning_rate": 2.5e-05, | |
| "loss": 2.597, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.26515151515151514, | |
| "grad_norm": 0.1503434181213379, | |
| "learning_rate": 2.4242424242424244e-05, | |
| "loss": 2.4953, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.2727272727272727, | |
| "grad_norm": 0.18666601181030273, | |
| "learning_rate": 2.3484848484848487e-05, | |
| "loss": 2.6068, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.2803030303030303, | |
| "grad_norm": 0.1572176069021225, | |
| "learning_rate": 2.272727272727273e-05, | |
| "loss": 2.6932, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.2878787878787879, | |
| "grad_norm": 0.17906226217746735, | |
| "learning_rate": 2.1969696969696972e-05, | |
| "loss": 2.6904, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.29545454545454547, | |
| "grad_norm": 0.14453791081905365, | |
| "learning_rate": 2.1212121212121215e-05, | |
| "loss": 2.6939, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.30303030303030304, | |
| "grad_norm": 0.17917212843894958, | |
| "learning_rate": 2.0454545454545457e-05, | |
| "loss": 2.6053, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3106060606060606, | |
| "grad_norm": 0.13931894302368164, | |
| "learning_rate": 1.9696969696969697e-05, | |
| "loss": 2.4931, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.3181818181818182, | |
| "grad_norm": 0.1697995960712433, | |
| "learning_rate": 1.893939393939394e-05, | |
| "loss": 2.7841, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.32575757575757575, | |
| "grad_norm": 0.18437106907367706, | |
| "learning_rate": 1.8181818181818182e-05, | |
| "loss": 2.8778, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.3333333333333333, | |
| "grad_norm": 0.17622561752796173, | |
| "learning_rate": 1.7424242424242425e-05, | |
| "loss": 2.4296, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.3409090909090909, | |
| "grad_norm": 0.1804683357477188, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 2.6399, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.3484848484848485, | |
| "grad_norm": 0.18295425176620483, | |
| "learning_rate": 1.590909090909091e-05, | |
| "loss": 2.8005, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.3560606060606061, | |
| "grad_norm": 0.15364795923233032, | |
| "learning_rate": 1.5151515151515153e-05, | |
| "loss": 2.5525, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.36363636363636365, | |
| "grad_norm": 0.16968029737472534, | |
| "learning_rate": 1.4393939393939396e-05, | |
| "loss": 2.6071, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.3712121212121212, | |
| "grad_norm": 0.18445603549480438, | |
| "learning_rate": 1.3636363636363637e-05, | |
| "loss": 2.6512, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.3787878787878788, | |
| "grad_norm": 0.16352227330207825, | |
| "learning_rate": 1.287878787878788e-05, | |
| "loss": 2.5452, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.38636363636363635, | |
| "grad_norm": 0.15646548569202423, | |
| "learning_rate": 1.2121212121212122e-05, | |
| "loss": 2.4993, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.3939393939393939, | |
| "grad_norm": 0.1422729194164276, | |
| "learning_rate": 1.1363636363636365e-05, | |
| "loss": 2.4923, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.4015151515151515, | |
| "grad_norm": 0.1983378529548645, | |
| "learning_rate": 1.0606060606060607e-05, | |
| "loss": 2.5515, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.4090909090909091, | |
| "grad_norm": 0.1466148942708969, | |
| "learning_rate": 9.848484848484848e-06, | |
| "loss": 2.5185, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.4166666666666667, | |
| "grad_norm": 0.1356826275587082, | |
| "learning_rate": 9.090909090909091e-06, | |
| "loss": 2.4658, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.42424242424242425, | |
| "grad_norm": 0.15932199358940125, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 2.4564, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.4318181818181818, | |
| "grad_norm": 0.1498263031244278, | |
| "learning_rate": 7.5757575757575764e-06, | |
| "loss": 2.4288, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.4393939393939394, | |
| "grad_norm": 0.16778239607810974, | |
| "learning_rate": 6.818181818181818e-06, | |
| "loss": 2.378, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.44696969696969696, | |
| "grad_norm": 0.1550946831703186, | |
| "learning_rate": 6.060606060606061e-06, | |
| "loss": 2.5211, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.45454545454545453, | |
| "grad_norm": 0.15183551609516144, | |
| "learning_rate": 5.303030303030304e-06, | |
| "loss": 2.5247, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.4621212121212121, | |
| "grad_norm": 0.17925047874450684, | |
| "learning_rate": 4.5454545454545455e-06, | |
| "loss": 2.5269, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.4696969696969697, | |
| "grad_norm": 0.16853377223014832, | |
| "learning_rate": 3.7878787878787882e-06, | |
| "loss": 2.3889, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.4772727272727273, | |
| "grad_norm": 0.19110067188739777, | |
| "learning_rate": 3.0303030303030305e-06, | |
| "loss": 2.5901, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.48484848484848486, | |
| "grad_norm": 0.16294632852077484, | |
| "learning_rate": 2.2727272727272728e-06, | |
| "loss": 2.6124, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.49242424242424243, | |
| "grad_norm": 0.1811217963695526, | |
| "learning_rate": 1.5151515151515152e-06, | |
| "loss": 2.5185, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.20019829273223877, | |
| "learning_rate": 7.575757575757576e-07, | |
| "loss": 2.7543, | |
| "step": 66 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 66, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 30, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 884521398435840.0, | |
| "train_batch_size": 10, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |