| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 0.1561524047470331, |
| "eval_steps": 500, |
| "global_step": 5000, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.001561524047470331, |
| "grad_norm": 0.0020751953125, |
| "learning_rate": 0.00019800000000000002, |
| "loss": 1.1655, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.003123048094940662, |
| "grad_norm": 0.001617431640625, |
| "learning_rate": 0.000196, |
| "loss": 0.55, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.004684572142410993, |
| "grad_norm": 23.75, |
| "learning_rate": 0.000194, |
| "loss": 0.6685, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.006246096189881324, |
| "grad_norm": 5.53125, |
| "learning_rate": 0.000192, |
| "loss": 0.7517, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.007807620237351655, |
| "grad_norm": 7.34375, |
| "learning_rate": 0.00019, |
| "loss": 1.0534, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.009369144284821987, |
| "grad_norm": 0.06201171875, |
| "learning_rate": 0.000188, |
| "loss": 0.7181, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.010930668332292318, |
| "grad_norm": 6.9375, |
| "learning_rate": 0.00018600000000000002, |
| "loss": 0.8464, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.012492192379762648, |
| "grad_norm": 0.462890625, |
| "learning_rate": 0.00018400000000000003, |
| "loss": 0.6547, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.014053716427232979, |
| "grad_norm": 0.00799560546875, |
| "learning_rate": 0.000182, |
| "loss": 0.4934, |
| "step": 450 |
| }, |
| { |
| "epoch": 0.01561524047470331, |
| "grad_norm": 0.11279296875, |
| "learning_rate": 0.00018, |
| "loss": 0.9624, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.017176764522173642, |
| "grad_norm": 6.4375, |
| "learning_rate": 0.00017800000000000002, |
| "loss": 0.7832, |
| "step": 550 |
| }, |
| { |
| "epoch": 0.018738288569643973, |
| "grad_norm": 6.09375, |
| "learning_rate": 0.00017600000000000002, |
| "loss": 0.5558, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.020299812617114305, |
| "grad_norm": 0.0206298828125, |
| "learning_rate": 0.000174, |
| "loss": 0.6964, |
| "step": 650 |
| }, |
| { |
| "epoch": 0.021861336664584636, |
| "grad_norm": 5.375, |
| "learning_rate": 0.000172, |
| "loss": 0.9689, |
| "step": 700 |
| }, |
| { |
| "epoch": 0.023422860712054967, |
| "grad_norm": 0.0128173828125, |
| "learning_rate": 0.00017, |
| "loss": 0.5311, |
| "step": 750 |
| }, |
| { |
| "epoch": 0.024984384759525295, |
| "grad_norm": 5.90625, |
| "learning_rate": 0.000168, |
| "loss": 0.7353, |
| "step": 800 |
| }, |
| { |
| "epoch": 0.026545908806995627, |
| "grad_norm": 1.546875, |
| "learning_rate": 0.000166, |
| "loss": 0.8207, |
| "step": 850 |
| }, |
| { |
| "epoch": 0.028107432854465958, |
| "grad_norm": 0.099609375, |
| "learning_rate": 0.000164, |
| "loss": 0.5583, |
| "step": 900 |
| }, |
| { |
| "epoch": 0.02966895690193629, |
| "grad_norm": 0.125, |
| "learning_rate": 0.000162, |
| "loss": 0.4985, |
| "step": 950 |
| }, |
| { |
| "epoch": 0.03123048094940662, |
| "grad_norm": 0.0004367828369140625, |
| "learning_rate": 0.00016, |
| "loss": 0.8198, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.03279200499687695, |
| "grad_norm": 0.000621795654296875, |
| "learning_rate": 0.00015800000000000002, |
| "loss": 0.7978, |
| "step": 1050 |
| }, |
| { |
| "epoch": 0.034353529044347283, |
| "grad_norm": 8.3125, |
| "learning_rate": 0.00015600000000000002, |
| "loss": 0.6805, |
| "step": 1100 |
| }, |
| { |
| "epoch": 0.03591505309181761, |
| "grad_norm": 0.0291748046875, |
| "learning_rate": 0.000154, |
| "loss": 0.4919, |
| "step": 1150 |
| }, |
| { |
| "epoch": 0.037476577139287946, |
| "grad_norm": 0.0013427734375, |
| "learning_rate": 0.000152, |
| "loss": 0.5838, |
| "step": 1200 |
| }, |
| { |
| "epoch": 0.039038101186758274, |
| "grad_norm": 0.000522613525390625, |
| "learning_rate": 0.00015000000000000001, |
| "loss": 0.8223, |
| "step": 1250 |
| }, |
| { |
| "epoch": 0.04059962523422861, |
| "grad_norm": 6.78125, |
| "learning_rate": 0.000148, |
| "loss": 0.3529, |
| "step": 1300 |
| }, |
| { |
| "epoch": 0.04216114928169894, |
| "grad_norm": 0.0035552978515625, |
| "learning_rate": 0.000146, |
| "loss": 0.359, |
| "step": 1350 |
| }, |
| { |
| "epoch": 0.04372267332916927, |
| "grad_norm": 6.15625, |
| "learning_rate": 0.000144, |
| "loss": 0.9659, |
| "step": 1400 |
| }, |
| { |
| "epoch": 0.0452841973766396, |
| "grad_norm": 2.15625, |
| "learning_rate": 0.000142, |
| "loss": 0.5283, |
| "step": 1450 |
| }, |
| { |
| "epoch": 0.046845721424109935, |
| "grad_norm": 0.08740234375, |
| "learning_rate": 0.00014, |
| "loss": 0.7926, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.04840724547158026, |
| "grad_norm": 4.9375, |
| "learning_rate": 0.000138, |
| "loss": 0.7535, |
| "step": 1550 |
| }, |
| { |
| "epoch": 0.04996876951905059, |
| "grad_norm": 0.050537109375, |
| "learning_rate": 0.00013600000000000003, |
| "loss": 0.6911, |
| "step": 1600 |
| }, |
| { |
| "epoch": 0.051530293566520925, |
| "grad_norm": 0.130859375, |
| "learning_rate": 0.000134, |
| "loss": 0.5835, |
| "step": 1650 |
| }, |
| { |
| "epoch": 0.05309181761399125, |
| "grad_norm": 0.119140625, |
| "learning_rate": 0.000132, |
| "loss": 0.7359, |
| "step": 1700 |
| }, |
| { |
| "epoch": 0.05465334166146159, |
| "grad_norm": 5.4375, |
| "learning_rate": 0.00013000000000000002, |
| "loss": 0.4318, |
| "step": 1750 |
| }, |
| { |
| "epoch": 0.056214865708931916, |
| "grad_norm": 6.09375, |
| "learning_rate": 0.00012800000000000002, |
| "loss": 0.4924, |
| "step": 1800 |
| }, |
| { |
| "epoch": 0.05777638975640225, |
| "grad_norm": 0.0186767578125, |
| "learning_rate": 0.000126, |
| "loss": 0.8286, |
| "step": 1850 |
| }, |
| { |
| "epoch": 0.05933791380387258, |
| "grad_norm": 0.75390625, |
| "learning_rate": 0.000124, |
| "loss": 0.6285, |
| "step": 1900 |
| }, |
| { |
| "epoch": 0.060899437851342914, |
| "grad_norm": 0.0005950927734375, |
| "learning_rate": 0.000122, |
| "loss": 0.6394, |
| "step": 1950 |
| }, |
| { |
| "epoch": 0.06246096189881324, |
| "grad_norm": 3.390625, |
| "learning_rate": 0.00012, |
| "loss": 0.7705, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.06402248594628357, |
| "grad_norm": 20.25, |
| "learning_rate": 0.000118, |
| "loss": 0.7837, |
| "step": 2050 |
| }, |
| { |
| "epoch": 0.0655840099937539, |
| "grad_norm": 0.0009918212890625, |
| "learning_rate": 0.000116, |
| "loss": 0.4914, |
| "step": 2100 |
| }, |
| { |
| "epoch": 0.06714553404122424, |
| "grad_norm": 0.00162506103515625, |
| "learning_rate": 0.00011399999999999999, |
| "loss": 0.6094, |
| "step": 2150 |
| }, |
| { |
| "epoch": 0.06870705808869457, |
| "grad_norm": 5.65625, |
| "learning_rate": 0.00011200000000000001, |
| "loss": 0.6238, |
| "step": 2200 |
| }, |
| { |
| "epoch": 0.0702685821361649, |
| "grad_norm": 0.11181640625, |
| "learning_rate": 0.00011000000000000002, |
| "loss": 0.5555, |
| "step": 2250 |
| }, |
| { |
| "epoch": 0.07183010618363522, |
| "grad_norm": 0.28515625, |
| "learning_rate": 0.00010800000000000001, |
| "loss": 0.3776, |
| "step": 2300 |
| }, |
| { |
| "epoch": 0.07339163023110556, |
| "grad_norm": 0.00787353515625, |
| "learning_rate": 0.00010600000000000002, |
| "loss": 0.7962, |
| "step": 2350 |
| }, |
| { |
| "epoch": 0.07495315427857589, |
| "grad_norm": 0.07080078125, |
| "learning_rate": 0.00010400000000000001, |
| "loss": 0.8551, |
| "step": 2400 |
| }, |
| { |
| "epoch": 0.07651467832604622, |
| "grad_norm": 0.058837890625, |
| "learning_rate": 0.00010200000000000001, |
| "loss": 0.5114, |
| "step": 2450 |
| }, |
| { |
| "epoch": 0.07807620237351655, |
| "grad_norm": 4.1875, |
| "learning_rate": 0.0001, |
| "loss": 0.5047, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.07963772642098688, |
| "grad_norm": 0.0181884765625, |
| "learning_rate": 9.8e-05, |
| "loss": 0.561, |
| "step": 2550 |
| }, |
| { |
| "epoch": 0.08119925046845722, |
| "grad_norm": 6.96875, |
| "learning_rate": 9.6e-05, |
| "loss": 0.6499, |
| "step": 2600 |
| }, |
| { |
| "epoch": 0.08276077451592755, |
| "grad_norm": 5.5625, |
| "learning_rate": 9.4e-05, |
| "loss": 0.5555, |
| "step": 2650 |
| }, |
| { |
| "epoch": 0.08432229856339787, |
| "grad_norm": 0.1015625, |
| "learning_rate": 9.200000000000001e-05, |
| "loss": 0.86, |
| "step": 2700 |
| }, |
| { |
| "epoch": 0.0858838226108682, |
| "grad_norm": 0.021240234375, |
| "learning_rate": 9e-05, |
| "loss": 0.7037, |
| "step": 2750 |
| }, |
| { |
| "epoch": 0.08744534665833854, |
| "grad_norm": 6.1875, |
| "learning_rate": 8.800000000000001e-05, |
| "loss": 0.6467, |
| "step": 2800 |
| }, |
| { |
| "epoch": 0.08900687070580887, |
| "grad_norm": 0.004608154296875, |
| "learning_rate": 8.6e-05, |
| "loss": 0.4786, |
| "step": 2850 |
| }, |
| { |
| "epoch": 0.0905683947532792, |
| "grad_norm": 0.396484375, |
| "learning_rate": 8.4e-05, |
| "loss": 0.9433, |
| "step": 2900 |
| }, |
| { |
| "epoch": 0.09212991880074953, |
| "grad_norm": 6.3125, |
| "learning_rate": 8.2e-05, |
| "loss": 0.7547, |
| "step": 2950 |
| }, |
| { |
| "epoch": 0.09369144284821987, |
| "grad_norm": 0.000637054443359375, |
| "learning_rate": 8e-05, |
| "loss": 0.4408, |
| "step": 3000 |
| }, |
| { |
| "epoch": 0.0952529668956902, |
| "grad_norm": 0.0029449462890625, |
| "learning_rate": 7.800000000000001e-05, |
| "loss": 0.6513, |
| "step": 3050 |
| }, |
| { |
| "epoch": 0.09681449094316052, |
| "grad_norm": 3.546875, |
| "learning_rate": 7.6e-05, |
| "loss": 0.8695, |
| "step": 3100 |
| }, |
| { |
| "epoch": 0.09837601499063085, |
| "grad_norm": 9.875, |
| "learning_rate": 7.4e-05, |
| "loss": 0.3289, |
| "step": 3150 |
| }, |
| { |
| "epoch": 0.09993753903810118, |
| "grad_norm": 0.0277099609375, |
| "learning_rate": 7.2e-05, |
| "loss": 1.0484, |
| "step": 3200 |
| }, |
| { |
| "epoch": 0.10149906308557152, |
| "grad_norm": 0.0023956298828125, |
| "learning_rate": 7e-05, |
| "loss": 0.4233, |
| "step": 3250 |
| }, |
| { |
| "epoch": 0.10306058713304185, |
| "grad_norm": 4.625, |
| "learning_rate": 6.800000000000001e-05, |
| "loss": 1.1324, |
| "step": 3300 |
| }, |
| { |
| "epoch": 0.10462211118051218, |
| "grad_norm": 0.016357421875, |
| "learning_rate": 6.6e-05, |
| "loss": 0.7022, |
| "step": 3350 |
| }, |
| { |
| "epoch": 0.1061836352279825, |
| "grad_norm": 0.0027618408203125, |
| "learning_rate": 6.400000000000001e-05, |
| "loss": 0.7385, |
| "step": 3400 |
| }, |
| { |
| "epoch": 0.10774515927545285, |
| "grad_norm": 0.024658203125, |
| "learning_rate": 6.2e-05, |
| "loss": 0.8815, |
| "step": 3450 |
| }, |
| { |
| "epoch": 0.10930668332292318, |
| "grad_norm": 3.953125, |
| "learning_rate": 6e-05, |
| "loss": 0.7281, |
| "step": 3500 |
| }, |
| { |
| "epoch": 0.1108682073703935, |
| "grad_norm": 6.0, |
| "learning_rate": 5.8e-05, |
| "loss": 0.666, |
| "step": 3550 |
| }, |
| { |
| "epoch": 0.11242973141786383, |
| "grad_norm": 0.0004177093505859375, |
| "learning_rate": 5.6000000000000006e-05, |
| "loss": 0.6714, |
| "step": 3600 |
| }, |
| { |
| "epoch": 0.11399125546533416, |
| "grad_norm": 5.5625, |
| "learning_rate": 5.4000000000000005e-05, |
| "loss": 0.7718, |
| "step": 3650 |
| }, |
| { |
| "epoch": 0.1155527795128045, |
| "grad_norm": 5.65625, |
| "learning_rate": 5.2000000000000004e-05, |
| "loss": 0.6455, |
| "step": 3700 |
| }, |
| { |
| "epoch": 0.11711430356027483, |
| "grad_norm": 0.0012969970703125, |
| "learning_rate": 5e-05, |
| "loss": 0.8328, |
| "step": 3750 |
| }, |
| { |
| "epoch": 0.11867582760774516, |
| "grad_norm": 0.0003147125244140625, |
| "learning_rate": 4.8e-05, |
| "loss": 0.6205, |
| "step": 3800 |
| }, |
| { |
| "epoch": 0.12023735165521549, |
| "grad_norm": 0.00146484375, |
| "learning_rate": 4.600000000000001e-05, |
| "loss": 0.6064, |
| "step": 3850 |
| }, |
| { |
| "epoch": 0.12179887570268583, |
| "grad_norm": 0.11474609375, |
| "learning_rate": 4.4000000000000006e-05, |
| "loss": 0.6402, |
| "step": 3900 |
| }, |
| { |
| "epoch": 0.12336039975015615, |
| "grad_norm": 0.0016021728515625, |
| "learning_rate": 4.2e-05, |
| "loss": 0.2071, |
| "step": 3950 |
| }, |
| { |
| "epoch": 0.12492192379762648, |
| "grad_norm": 0.000591278076171875, |
| "learning_rate": 4e-05, |
| "loss": 0.4755, |
| "step": 4000 |
| }, |
| { |
| "epoch": 0.12648344784509682, |
| "grad_norm": 0.0157470703125, |
| "learning_rate": 3.8e-05, |
| "loss": 0.4945, |
| "step": 4050 |
| }, |
| { |
| "epoch": 0.12804497189256714, |
| "grad_norm": 8.9375, |
| "learning_rate": 3.6e-05, |
| "loss": 0.5449, |
| "step": 4100 |
| }, |
| { |
| "epoch": 0.12960649594003748, |
| "grad_norm": 0.00732421875, |
| "learning_rate": 3.4000000000000007e-05, |
| "loss": 0.5369, |
| "step": 4150 |
| }, |
| { |
| "epoch": 0.1311680199875078, |
| "grad_norm": 8.625, |
| "learning_rate": 3.2000000000000005e-05, |
| "loss": 0.8617, |
| "step": 4200 |
| }, |
| { |
| "epoch": 0.13272954403497814, |
| "grad_norm": 5.5625, |
| "learning_rate": 3e-05, |
| "loss": 0.3852, |
| "step": 4250 |
| }, |
| { |
| "epoch": 0.13429106808244848, |
| "grad_norm": 7.6875, |
| "learning_rate": 2.8000000000000003e-05, |
| "loss": 0.7667, |
| "step": 4300 |
| }, |
| { |
| "epoch": 0.1358525921299188, |
| "grad_norm": 0.000286102294921875, |
| "learning_rate": 2.6000000000000002e-05, |
| "loss": 0.8205, |
| "step": 4350 |
| }, |
| { |
| "epoch": 0.13741411617738913, |
| "grad_norm": 5.96875, |
| "learning_rate": 2.4e-05, |
| "loss": 0.8517, |
| "step": 4400 |
| }, |
| { |
| "epoch": 0.13897564022485948, |
| "grad_norm": 4.9375, |
| "learning_rate": 2.2000000000000003e-05, |
| "loss": 0.9827, |
| "step": 4450 |
| }, |
| { |
| "epoch": 0.1405371642723298, |
| "grad_norm": 5.46875, |
| "learning_rate": 2e-05, |
| "loss": 0.499, |
| "step": 4500 |
| }, |
| { |
| "epoch": 0.14209868831980013, |
| "grad_norm": 0.00156402587890625, |
| "learning_rate": 1.8e-05, |
| "loss": 0.6781, |
| "step": 4550 |
| }, |
| { |
| "epoch": 0.14366021236727045, |
| "grad_norm": 0.000255584716796875, |
| "learning_rate": 1.6000000000000003e-05, |
| "loss": 0.5317, |
| "step": 4600 |
| }, |
| { |
| "epoch": 0.1452217364147408, |
| "grad_norm": 7.59375, |
| "learning_rate": 1.4000000000000001e-05, |
| "loss": 0.7491, |
| "step": 4650 |
| }, |
| { |
| "epoch": 0.14678326046221113, |
| "grad_norm": 8.25, |
| "learning_rate": 1.2e-05, |
| "loss": 0.7373, |
| "step": 4700 |
| }, |
| { |
| "epoch": 0.14834478450968144, |
| "grad_norm": 0.00445556640625, |
| "learning_rate": 1e-05, |
| "loss": 0.4924, |
| "step": 4750 |
| }, |
| { |
| "epoch": 0.14990630855715179, |
| "grad_norm": 7.25, |
| "learning_rate": 8.000000000000001e-06, |
| "loss": 0.7882, |
| "step": 4800 |
| }, |
| { |
| "epoch": 0.1514678326046221, |
| "grad_norm": 2.375, |
| "learning_rate": 6e-06, |
| "loss": 0.5924, |
| "step": 4850 |
| }, |
| { |
| "epoch": 0.15302935665209244, |
| "grad_norm": 0.09814453125, |
| "learning_rate": 4.000000000000001e-06, |
| "loss": 0.3448, |
| "step": 4900 |
| }, |
| { |
| "epoch": 0.15459088069956278, |
| "grad_norm": 0.0004730224609375, |
| "learning_rate": 2.0000000000000003e-06, |
| "loss": 0.8267, |
| "step": 4950 |
| }, |
| { |
| "epoch": 0.1561524047470331, |
| "grad_norm": 0.034423828125, |
| "learning_rate": 0.0, |
| "loss": 0.5791, |
| "step": 5000 |
| } |
| ], |
| "logging_steps": 50, |
| "max_steps": 5000, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 1, |
| "save_steps": 1000, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 2.784334008901632e+16, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|