| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 0.10080645161290322, |
| "eval_steps": 500, |
| "global_step": 5000, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.0010080645161290322, |
| "grad_norm": 1.5859375, |
| "learning_rate": 0.00019800000000000002, |
| "loss": 1.1003, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.0020161290322580645, |
| "grad_norm": 0.0, |
| "learning_rate": 0.000196, |
| "loss": 1.2089, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.0030241935483870967, |
| "grad_norm": 0.00189208984375, |
| "learning_rate": 0.000194, |
| "loss": 1.3291, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.004032258064516129, |
| "grad_norm": 0.028564453125, |
| "learning_rate": 0.000192, |
| "loss": 0.87, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.005040322580645161, |
| "grad_norm": 1.6484375, |
| "learning_rate": 0.00019, |
| "loss": 1.0644, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.006048387096774193, |
| "grad_norm": 0.00119781494140625, |
| "learning_rate": 0.000188, |
| "loss": 0.9578, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.007056451612903226, |
| "grad_norm": 0.8359375, |
| "learning_rate": 0.00018600000000000002, |
| "loss": 0.7294, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.008064516129032258, |
| "grad_norm": 1.78125, |
| "learning_rate": 0.00018400000000000003, |
| "loss": 1.126, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.009072580645161291, |
| "grad_norm": 0.0113525390625, |
| "learning_rate": 0.000182, |
| "loss": 1.659, |
| "step": 450 |
| }, |
| { |
| "epoch": 0.010080645161290322, |
| "grad_norm": 0.00714111328125, |
| "learning_rate": 0.00018, |
| "loss": 1.2084, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.011088709677419355, |
| "grad_norm": 0.055908203125, |
| "learning_rate": 0.00017800000000000002, |
| "loss": 0.9386, |
| "step": 550 |
| }, |
| { |
| "epoch": 0.012096774193548387, |
| "grad_norm": 0.828125, |
| "learning_rate": 0.00017600000000000002, |
| "loss": 1.4331, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.01310483870967742, |
| "grad_norm": 15.4375, |
| "learning_rate": 0.000174, |
| "loss": 1.1943, |
| "step": 650 |
| }, |
| { |
| "epoch": 0.014112903225806451, |
| "grad_norm": 0.0, |
| "learning_rate": 0.000172, |
| "loss": 0.7673, |
| "step": 700 |
| }, |
| { |
| "epoch": 0.015120967741935484, |
| "grad_norm": 0.0096435546875, |
| "learning_rate": 0.00017, |
| "loss": 0.9278, |
| "step": 750 |
| }, |
| { |
| "epoch": 0.016129032258064516, |
| "grad_norm": 0.00262451171875, |
| "learning_rate": 0.000168, |
| "loss": 0.8818, |
| "step": 800 |
| }, |
| { |
| "epoch": 0.017137096774193547, |
| "grad_norm": 0.0098876953125, |
| "learning_rate": 0.000166, |
| "loss": 1.0701, |
| "step": 850 |
| }, |
| { |
| "epoch": 0.018145161290322582, |
| "grad_norm": 0.0025177001953125, |
| "learning_rate": 0.000164, |
| "loss": 1.0132, |
| "step": 900 |
| }, |
| { |
| "epoch": 0.019153225806451613, |
| "grad_norm": 0.0162353515625, |
| "learning_rate": 0.000162, |
| "loss": 0.909, |
| "step": 950 |
| }, |
| { |
| "epoch": 0.020161290322580645, |
| "grad_norm": 0.0, |
| "learning_rate": 0.00016, |
| "loss": 1.1242, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.021169354838709676, |
| "grad_norm": 0.0, |
| "learning_rate": 0.00015800000000000002, |
| "loss": 1.4603, |
| "step": 1050 |
| }, |
| { |
| "epoch": 0.02217741935483871, |
| "grad_norm": 0.66796875, |
| "learning_rate": 0.00015600000000000002, |
| "loss": 0.7965, |
| "step": 1100 |
| }, |
| { |
| "epoch": 0.023185483870967742, |
| "grad_norm": 0.00054168701171875, |
| "learning_rate": 0.000154, |
| "loss": 0.7828, |
| "step": 1150 |
| }, |
| { |
| "epoch": 0.024193548387096774, |
| "grad_norm": 0.9921875, |
| "learning_rate": 0.000152, |
| "loss": 0.9853, |
| "step": 1200 |
| }, |
| { |
| "epoch": 0.025201612903225805, |
| "grad_norm": 0.00689697265625, |
| "learning_rate": 0.00015000000000000001, |
| "loss": 1.375, |
| "step": 1250 |
| }, |
| { |
| "epoch": 0.02620967741935484, |
| "grad_norm": 0.0, |
| "learning_rate": 0.000148, |
| "loss": 0.9719, |
| "step": 1300 |
| }, |
| { |
| "epoch": 0.02721774193548387, |
| "grad_norm": 0.0, |
| "learning_rate": 0.000146, |
| "loss": 1.2151, |
| "step": 1350 |
| }, |
| { |
| "epoch": 0.028225806451612902, |
| "grad_norm": 0.00982666015625, |
| "learning_rate": 0.000144, |
| "loss": 0.7872, |
| "step": 1400 |
| }, |
| { |
| "epoch": 0.029233870967741934, |
| "grad_norm": 0.00653076171875, |
| "learning_rate": 0.000142, |
| "loss": 1.0166, |
| "step": 1450 |
| }, |
| { |
| "epoch": 0.03024193548387097, |
| "grad_norm": 0.00665283203125, |
| "learning_rate": 0.00014, |
| "loss": 1.0774, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.03125, |
| "grad_norm": 0.00933837890625, |
| "learning_rate": 0.000138, |
| "loss": 1.0596, |
| "step": 1550 |
| }, |
| { |
| "epoch": 0.03225806451612903, |
| "grad_norm": 0.85546875, |
| "learning_rate": 0.00013600000000000003, |
| "loss": 1.048, |
| "step": 1600 |
| }, |
| { |
| "epoch": 0.03326612903225806, |
| "grad_norm": 0.0, |
| "learning_rate": 0.000134, |
| "loss": 0.8372, |
| "step": 1650 |
| }, |
| { |
| "epoch": 0.034274193548387094, |
| "grad_norm": 0.81640625, |
| "learning_rate": 0.000132, |
| "loss": 1.4669, |
| "step": 1700 |
| }, |
| { |
| "epoch": 0.03528225806451613, |
| "grad_norm": 0.0, |
| "learning_rate": 0.00013000000000000002, |
| "loss": 0.7405, |
| "step": 1750 |
| }, |
| { |
| "epoch": 0.036290322580645164, |
| "grad_norm": 0.0, |
| "learning_rate": 0.00012800000000000002, |
| "loss": 0.988, |
| "step": 1800 |
| }, |
| { |
| "epoch": 0.037298387096774195, |
| "grad_norm": 0.0096435546875, |
| "learning_rate": 0.000126, |
| "loss": 1.1337, |
| "step": 1850 |
| }, |
| { |
| "epoch": 0.038306451612903226, |
| "grad_norm": 0.006011962890625, |
| "learning_rate": 0.000124, |
| "loss": 0.8881, |
| "step": 1900 |
| }, |
| { |
| "epoch": 0.03931451612903226, |
| "grad_norm": 0.02197265625, |
| "learning_rate": 0.000122, |
| "loss": 0.9004, |
| "step": 1950 |
| }, |
| { |
| "epoch": 0.04032258064516129, |
| "grad_norm": 0.002838134765625, |
| "learning_rate": 0.00012, |
| "loss": 0.9567, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.04133064516129032, |
| "grad_norm": 0.0230712890625, |
| "learning_rate": 0.000118, |
| "loss": 0.9933, |
| "step": 2050 |
| }, |
| { |
| "epoch": 0.04233870967741935, |
| "grad_norm": 0.002471923828125, |
| "learning_rate": 0.000116, |
| "loss": 1.0882, |
| "step": 2100 |
| }, |
| { |
| "epoch": 0.04334677419354839, |
| "grad_norm": 0.01080322265625, |
| "learning_rate": 0.00011399999999999999, |
| "loss": 0.9519, |
| "step": 2150 |
| }, |
| { |
| "epoch": 0.04435483870967742, |
| "grad_norm": 0.032958984375, |
| "learning_rate": 0.00011200000000000001, |
| "loss": 0.7841, |
| "step": 2200 |
| }, |
| { |
| "epoch": 0.04536290322580645, |
| "grad_norm": 0.00653076171875, |
| "learning_rate": 0.00011000000000000002, |
| "loss": 0.7603, |
| "step": 2250 |
| }, |
| { |
| "epoch": 0.046370967741935484, |
| "grad_norm": 0.012939453125, |
| "learning_rate": 0.00010800000000000001, |
| "loss": 1.2195, |
| "step": 2300 |
| }, |
| { |
| "epoch": 0.047379032258064516, |
| "grad_norm": 0.00433349609375, |
| "learning_rate": 0.00010600000000000002, |
| "loss": 1.2472, |
| "step": 2350 |
| }, |
| { |
| "epoch": 0.04838709677419355, |
| "grad_norm": 0.04052734375, |
| "learning_rate": 0.00010400000000000001, |
| "loss": 0.9298, |
| "step": 2400 |
| }, |
| { |
| "epoch": 0.04939516129032258, |
| "grad_norm": 0.0, |
| "learning_rate": 0.00010200000000000001, |
| "loss": 1.3759, |
| "step": 2450 |
| }, |
| { |
| "epoch": 0.05040322580645161, |
| "grad_norm": 0.00701904296875, |
| "learning_rate": 0.0001, |
| "loss": 0.7772, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.05141129032258065, |
| "grad_norm": 0.0120849609375, |
| "learning_rate": 9.8e-05, |
| "loss": 1.1628, |
| "step": 2550 |
| }, |
| { |
| "epoch": 0.05241935483870968, |
| "grad_norm": 0.041748046875, |
| "learning_rate": 9.6e-05, |
| "loss": 1.064, |
| "step": 2600 |
| }, |
| { |
| "epoch": 0.05342741935483871, |
| "grad_norm": 0.0, |
| "learning_rate": 9.4e-05, |
| "loss": 0.9408, |
| "step": 2650 |
| }, |
| { |
| "epoch": 0.05443548387096774, |
| "grad_norm": 1.0078125, |
| "learning_rate": 9.200000000000001e-05, |
| "loss": 0.933, |
| "step": 2700 |
| }, |
| { |
| "epoch": 0.055443548387096774, |
| "grad_norm": 0.03466796875, |
| "learning_rate": 9e-05, |
| "loss": 0.918, |
| "step": 2750 |
| }, |
| { |
| "epoch": 0.056451612903225805, |
| "grad_norm": 0.0322265625, |
| "learning_rate": 8.800000000000001e-05, |
| "loss": 0.8227, |
| "step": 2800 |
| }, |
| { |
| "epoch": 0.057459677419354836, |
| "grad_norm": 0.84765625, |
| "learning_rate": 8.6e-05, |
| "loss": 0.974, |
| "step": 2850 |
| }, |
| { |
| "epoch": 0.05846774193548387, |
| "grad_norm": 0.0015106201171875, |
| "learning_rate": 8.4e-05, |
| "loss": 0.9285, |
| "step": 2900 |
| }, |
| { |
| "epoch": 0.059475806451612906, |
| "grad_norm": 0.0059814453125, |
| "learning_rate": 8.2e-05, |
| "loss": 0.6635, |
| "step": 2950 |
| }, |
| { |
| "epoch": 0.06048387096774194, |
| "grad_norm": 0.002471923828125, |
| "learning_rate": 8e-05, |
| "loss": 0.929, |
| "step": 3000 |
| }, |
| { |
| "epoch": 0.06149193548387097, |
| "grad_norm": 0.0, |
| "learning_rate": 7.800000000000001e-05, |
| "loss": 1.1495, |
| "step": 3050 |
| }, |
| { |
| "epoch": 0.0625, |
| "grad_norm": 0.00122833251953125, |
| "learning_rate": 7.6e-05, |
| "loss": 0.9024, |
| "step": 3100 |
| }, |
| { |
| "epoch": 0.06350806451612903, |
| "grad_norm": 0.00201416015625, |
| "learning_rate": 7.4e-05, |
| "loss": 1.183, |
| "step": 3150 |
| }, |
| { |
| "epoch": 0.06451612903225806, |
| "grad_norm": 0.0050048828125, |
| "learning_rate": 7.2e-05, |
| "loss": 0.8827, |
| "step": 3200 |
| }, |
| { |
| "epoch": 0.0655241935483871, |
| "grad_norm": 0.6875, |
| "learning_rate": 7e-05, |
| "loss": 1.0943, |
| "step": 3250 |
| }, |
| { |
| "epoch": 0.06653225806451613, |
| "grad_norm": 0.007080078125, |
| "learning_rate": 6.800000000000001e-05, |
| "loss": 1.0726, |
| "step": 3300 |
| }, |
| { |
| "epoch": 0.06754032258064516, |
| "grad_norm": 0.0081787109375, |
| "learning_rate": 6.6e-05, |
| "loss": 1.2269, |
| "step": 3350 |
| }, |
| { |
| "epoch": 0.06854838709677419, |
| "grad_norm": 0.828125, |
| "learning_rate": 6.400000000000001e-05, |
| "loss": 0.9572, |
| "step": 3400 |
| }, |
| { |
| "epoch": 0.06955645161290322, |
| "grad_norm": 0.84375, |
| "learning_rate": 6.2e-05, |
| "loss": 1.0595, |
| "step": 3450 |
| }, |
| { |
| "epoch": 0.07056451612903226, |
| "grad_norm": 0.73828125, |
| "learning_rate": 6e-05, |
| "loss": 1.0699, |
| "step": 3500 |
| }, |
| { |
| "epoch": 0.0715725806451613, |
| "grad_norm": 0.0, |
| "learning_rate": 5.8e-05, |
| "loss": 0.8608, |
| "step": 3550 |
| }, |
| { |
| "epoch": 0.07258064516129033, |
| "grad_norm": 0.005645751953125, |
| "learning_rate": 5.6000000000000006e-05, |
| "loss": 0.9564, |
| "step": 3600 |
| }, |
| { |
| "epoch": 0.07358870967741936, |
| "grad_norm": 0.0125732421875, |
| "learning_rate": 5.4000000000000005e-05, |
| "loss": 1.003, |
| "step": 3650 |
| }, |
| { |
| "epoch": 0.07459677419354839, |
| "grad_norm": 0.0014190673828125, |
| "learning_rate": 5.2000000000000004e-05, |
| "loss": 0.7659, |
| "step": 3700 |
| }, |
| { |
| "epoch": 0.07560483870967742, |
| "grad_norm": 0.7421875, |
| "learning_rate": 5e-05, |
| "loss": 1.2968, |
| "step": 3750 |
| }, |
| { |
| "epoch": 0.07661290322580645, |
| "grad_norm": 1.09375, |
| "learning_rate": 4.8e-05, |
| "loss": 0.9843, |
| "step": 3800 |
| }, |
| { |
| "epoch": 0.07762096774193548, |
| "grad_norm": 0.2421875, |
| "learning_rate": 4.600000000000001e-05, |
| "loss": 1.1342, |
| "step": 3850 |
| }, |
| { |
| "epoch": 0.07862903225806452, |
| "grad_norm": 0.01904296875, |
| "learning_rate": 4.4000000000000006e-05, |
| "loss": 0.9891, |
| "step": 3900 |
| }, |
| { |
| "epoch": 0.07963709677419355, |
| "grad_norm": 0.91796875, |
| "learning_rate": 4.2e-05, |
| "loss": 1.0487, |
| "step": 3950 |
| }, |
| { |
| "epoch": 0.08064516129032258, |
| "grad_norm": 1.0078125, |
| "learning_rate": 4e-05, |
| "loss": 0.7522, |
| "step": 4000 |
| }, |
| { |
| "epoch": 0.08165322580645161, |
| "grad_norm": 1.046875, |
| "learning_rate": 3.8e-05, |
| "loss": 1.0201, |
| "step": 4050 |
| }, |
| { |
| "epoch": 0.08266129032258064, |
| "grad_norm": 0.0023193359375, |
| "learning_rate": 3.6e-05, |
| "loss": 1.0339, |
| "step": 4100 |
| }, |
| { |
| "epoch": 0.08366935483870967, |
| "grad_norm": 0.001983642578125, |
| "learning_rate": 3.4000000000000007e-05, |
| "loss": 1.0322, |
| "step": 4150 |
| }, |
| { |
| "epoch": 0.0846774193548387, |
| "grad_norm": 0.0, |
| "learning_rate": 3.2000000000000005e-05, |
| "loss": 1.1924, |
| "step": 4200 |
| }, |
| { |
| "epoch": 0.08568548387096774, |
| "grad_norm": 0.62890625, |
| "learning_rate": 3e-05, |
| "loss": 0.932, |
| "step": 4250 |
| }, |
| { |
| "epoch": 0.08669354838709678, |
| "grad_norm": 0.0, |
| "learning_rate": 2.8000000000000003e-05, |
| "loss": 0.8545, |
| "step": 4300 |
| }, |
| { |
| "epoch": 0.08770161290322581, |
| "grad_norm": 1.1875, |
| "learning_rate": 2.6000000000000002e-05, |
| "loss": 1.2498, |
| "step": 4350 |
| }, |
| { |
| "epoch": 0.08870967741935484, |
| "grad_norm": 0.859375, |
| "learning_rate": 2.4e-05, |
| "loss": 0.9398, |
| "step": 4400 |
| }, |
| { |
| "epoch": 0.08971774193548387, |
| "grad_norm": 0.0250244140625, |
| "learning_rate": 2.2000000000000003e-05, |
| "loss": 0.7945, |
| "step": 4450 |
| }, |
| { |
| "epoch": 0.0907258064516129, |
| "grad_norm": 0.01611328125, |
| "learning_rate": 2e-05, |
| "loss": 1.1317, |
| "step": 4500 |
| }, |
| { |
| "epoch": 0.09173387096774194, |
| "grad_norm": 1.03125, |
| "learning_rate": 1.8e-05, |
| "loss": 1.0792, |
| "step": 4550 |
| }, |
| { |
| "epoch": 0.09274193548387097, |
| "grad_norm": 0.8671875, |
| "learning_rate": 1.6000000000000003e-05, |
| "loss": 0.7469, |
| "step": 4600 |
| }, |
| { |
| "epoch": 0.09375, |
| "grad_norm": 1.0078125, |
| "learning_rate": 1.4000000000000001e-05, |
| "loss": 1.0558, |
| "step": 4650 |
| }, |
| { |
| "epoch": 0.09475806451612903, |
| "grad_norm": 0.0198974609375, |
| "learning_rate": 1.2e-05, |
| "loss": 0.8726, |
| "step": 4700 |
| }, |
| { |
| "epoch": 0.09576612903225806, |
| "grad_norm": 1.0234375, |
| "learning_rate": 1e-05, |
| "loss": 0.767, |
| "step": 4750 |
| }, |
| { |
| "epoch": 0.0967741935483871, |
| "grad_norm": 0.00946044921875, |
| "learning_rate": 8.000000000000001e-06, |
| "loss": 0.8851, |
| "step": 4800 |
| }, |
| { |
| "epoch": 0.09778225806451613, |
| "grad_norm": 0.01446533203125, |
| "learning_rate": 6e-06, |
| "loss": 0.7508, |
| "step": 4850 |
| }, |
| { |
| "epoch": 0.09879032258064516, |
| "grad_norm": 0.0169677734375, |
| "learning_rate": 4.000000000000001e-06, |
| "loss": 0.9915, |
| "step": 4900 |
| }, |
| { |
| "epoch": 0.09979838709677419, |
| "grad_norm": 0.0, |
| "learning_rate": 2.0000000000000003e-06, |
| "loss": 0.8425, |
| "step": 4950 |
| }, |
| { |
| "epoch": 0.10080645161290322, |
| "grad_norm": 1.0390625, |
| "learning_rate": 0.0, |
| "loss": 1.2579, |
| "step": 5000 |
| } |
| ], |
| "logging_steps": 50, |
| "max_steps": 5000, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 1, |
| "save_steps": 1000, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 7.021332872122368e+16, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|