| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.31357792411414237, | |
| "eval_steps": 1000, | |
| "global_step": 14000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.002239842315101017, | |
| "grad_norm": 71.10852813720703, | |
| "learning_rate": 0.0001, | |
| "loss": 3.5057, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.004479684630202034, | |
| "grad_norm": 57.756534576416016, | |
| "learning_rate": 9.9998756572327e-05, | |
| "loss": 0.8254, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.006719526945303051, | |
| "grad_norm": 44.61080551147461, | |
| "learning_rate": 9.999502635115246e-05, | |
| "loss": 0.6935, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.008959369260404068, | |
| "grad_norm": 50.82619094848633, | |
| "learning_rate": 9.998880952200681e-05, | |
| "loss": 0.5972, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.011199211575505085, | |
| "grad_norm": 44.26677703857422, | |
| "learning_rate": 9.998010639409713e-05, | |
| "loss": 0.5408, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.013439053890606102, | |
| "grad_norm": 42.8045654296875, | |
| "learning_rate": 9.996891740029186e-05, | |
| "loss": 0.5509, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.01567889620570712, | |
| "grad_norm": 30.989139556884766, | |
| "learning_rate": 9.995524309709913e-05, | |
| "loss": 0.4823, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.017918738520808136, | |
| "grad_norm": 34.01952362060547, | |
| "learning_rate": 9.993908416463927e-05, | |
| "loss": 0.5111, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.020158580835909153, | |
| "grad_norm": 34.079307556152344, | |
| "learning_rate": 9.992044140661079e-05, | |
| "loss": 0.4635, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.02239842315101017, | |
| "grad_norm": 26.16071128845215, | |
| "learning_rate": 9.989931575025056e-05, | |
| "loss": 0.4883, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.02239842315101017, | |
| "eval_avg_non_pair_similarity": 0.0020343252948339737, | |
| "eval_avg_pair_similarity": 0.008852629057131708, | |
| "eval_loss": 0.5431402921676636, | |
| "eval_runtime": 19.6178, | |
| "eval_samples_per_second": 25.487, | |
| "eval_similarity_ratio": 4.35162905343228, | |
| "eval_steps_per_second": 0.816, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.024638265466111187, | |
| "grad_norm": 35.38695526123047, | |
| "learning_rate": 9.987570824628759e-05, | |
| "loss": 0.4655, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.026878107781212204, | |
| "grad_norm": 32.071346282958984, | |
| "learning_rate": 9.984962006889084e-05, | |
| "loss": 0.4342, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.029117950096313218, | |
| "grad_norm": 39.6610221862793, | |
| "learning_rate": 9.982105251561082e-05, | |
| "loss": 0.458, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.03135779241141424, | |
| "grad_norm": 31.493322372436523, | |
| "learning_rate": 9.979000700731491e-05, | |
| "loss": 0.4525, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.03359763472651525, | |
| "grad_norm": 34.453399658203125, | |
| "learning_rate": 9.975648508811693e-05, | |
| "loss": 0.41, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.03583747704161627, | |
| "grad_norm": 33.990074157714844, | |
| "learning_rate": 9.972048842530012e-05, | |
| "loss": 0.4097, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.038077319356717286, | |
| "grad_norm": 30.44228172302246, | |
| "learning_rate": 9.968201880923439e-05, | |
| "loss": 0.4257, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.040317161671818307, | |
| "grad_norm": 31.427162170410156, | |
| "learning_rate": 9.964107815328711e-05, | |
| "loss": 0.3821, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.04255700398691932, | |
| "grad_norm": 25.877887725830078, | |
| "learning_rate": 9.959766849372808e-05, | |
| "loss": 0.3788, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.04479684630202034, | |
| "grad_norm": 25.36798095703125, | |
| "learning_rate": 9.955179198962817e-05, | |
| "loss": 0.3854, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.04479684630202034, | |
| "eval_avg_non_pair_similarity": 0.0016289287904792565, | |
| "eval_avg_pair_similarity": 0.0032495629731565715, | |
| "eval_loss": 0.4394480586051941, | |
| "eval_runtime": 19.5064, | |
| "eval_samples_per_second": 25.633, | |
| "eval_similarity_ratio": 1.9949079371360972, | |
| "eval_steps_per_second": 0.82, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.047036688617121354, | |
| "grad_norm": 24.537384033203125, | |
| "learning_rate": 9.950345092275198e-05, | |
| "loss": 0.3886, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.049276530932222375, | |
| "grad_norm": 26.4981632232666, | |
| "learning_rate": 9.945264769744431e-05, | |
| "loss": 0.3926, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.05151637324732339, | |
| "grad_norm": 40.585941314697266, | |
| "learning_rate": 9.939938484051063e-05, | |
| "loss": 0.3796, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.05375621556242441, | |
| "grad_norm": 26.374149322509766, | |
| "learning_rate": 9.934366500109132e-05, | |
| "loss": 0.3723, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.05599605787752542, | |
| "grad_norm": 31.54728126525879, | |
| "learning_rate": 9.928549095053001e-05, | |
| "loss": 0.3639, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.058235900192626436, | |
| "grad_norm": 26.00455093383789, | |
| "learning_rate": 9.922486558223567e-05, | |
| "loss": 0.3387, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.060475742507727456, | |
| "grad_norm": 23.684057235717773, | |
| "learning_rate": 9.916179191153873e-05, | |
| "loss": 0.3494, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.06271558482282848, | |
| "grad_norm": 24.034656524658203, | |
| "learning_rate": 9.909627307554108e-05, | |
| "loss": 0.362, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.06495542713792948, | |
| "grad_norm": 31.07891082763672, | |
| "learning_rate": 9.902831233296009e-05, | |
| "loss": 0.3481, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.0671952694530305, | |
| "grad_norm": 24.183626174926758, | |
| "learning_rate": 9.895791306396644e-05, | |
| "loss": 0.3535, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.0671952694530305, | |
| "eval_avg_non_pair_similarity": -0.0007878901720614375, | |
| "eval_avg_pair_similarity": -0.008789425778668375, | |
| "eval_loss": 0.4187028110027313, | |
| "eval_runtime": 19.4839, | |
| "eval_samples_per_second": 25.662, | |
| "eval_similarity_ratio": 11.155648452463497, | |
| "eval_steps_per_second": 0.821, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.06943511176813152, | |
| "grad_norm": 20.178213119506836, | |
| "learning_rate": 9.888507877001616e-05, | |
| "loss": 0.3375, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.07167495408323254, | |
| "grad_norm": 31.757244110107422, | |
| "learning_rate": 9.880981307367627e-05, | |
| "loss": 0.3489, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.07391479639833355, | |
| "grad_norm": 27.889982223510742, | |
| "learning_rate": 9.873211971844477e-05, | |
| "loss": 0.3463, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.07615463871343457, | |
| "grad_norm": 23.282093048095703, | |
| "learning_rate": 9.865200256856437e-05, | |
| "loss": 0.3235, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.07839448102853559, | |
| "grad_norm": 24.573863983154297, | |
| "learning_rate": 9.856946560883034e-05, | |
| "loss": 0.3474, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.08063432334363661, | |
| "grad_norm": 20.7421932220459, | |
| "learning_rate": 9.848451294439224e-05, | |
| "loss": 0.3533, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.08287416565873762, | |
| "grad_norm": 22.051513671875, | |
| "learning_rate": 9.839714880054987e-05, | |
| "loss": 0.3277, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.08511400797383864, | |
| "grad_norm": 22.470027923583984, | |
| "learning_rate": 9.830737752254294e-05, | |
| "loss": 0.3261, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.08735385028893966, | |
| "grad_norm": 20.80890464782715, | |
| "learning_rate": 9.821520357533513e-05, | |
| "loss": 0.3474, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.08959369260404068, | |
| "grad_norm": 20.187280654907227, | |
| "learning_rate": 9.812063154339191e-05, | |
| "loss": 0.2984, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.08959369260404068, | |
| "eval_avg_non_pair_similarity": -0.0008396646829899625, | |
| "eval_avg_pair_similarity": -0.011315496074035763, | |
| "eval_loss": 0.32538020610809326, | |
| "eval_runtime": 19.4916, | |
| "eval_samples_per_second": 25.652, | |
| "eval_similarity_ratio": 13.476208185560939, | |
| "eval_steps_per_second": 0.821, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.09183353491914169, | |
| "grad_norm": 26.496036529541016, | |
| "learning_rate": 9.802366613045254e-05, | |
| "loss": 0.3326, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.09407337723424271, | |
| "grad_norm": 23.102359771728516, | |
| "learning_rate": 9.792431215929613e-05, | |
| "loss": 0.3341, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.09631321954934373, | |
| "grad_norm": 21.27369499206543, | |
| "learning_rate": 9.782257457150177e-05, | |
| "loss": 0.3254, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.09855306186444475, | |
| "grad_norm": 19.25406265258789, | |
| "learning_rate": 9.771845842720274e-05, | |
| "loss": 0.3334, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.10079290417954576, | |
| "grad_norm": 15.666335105895996, | |
| "learning_rate": 9.761196890483482e-05, | |
| "loss": 0.3064, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.10303274649464678, | |
| "grad_norm": 24.592592239379883, | |
| "learning_rate": 9.75031113008788e-05, | |
| "loss": 0.2902, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.1052725888097478, | |
| "grad_norm": 20.0572566986084, | |
| "learning_rate": 9.739189102959696e-05, | |
| "loss": 0.3121, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.10751243112484882, | |
| "grad_norm": 19.887725830078125, | |
| "learning_rate": 9.727831362276381e-05, | |
| "loss": 0.3014, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.10975227343994982, | |
| "grad_norm": 15.561097145080566, | |
| "learning_rate": 9.716238472939101e-05, | |
| "loss": 0.316, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.11199211575505084, | |
| "grad_norm": 17.96786880493164, | |
| "learning_rate": 9.704411011544629e-05, | |
| "loss": 0.3215, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.11199211575505084, | |
| "eval_avg_non_pair_similarity": 0.00024146916974524413, | |
| "eval_avg_pair_similarity": -0.009633154251612723, | |
| "eval_loss": 0.2951599061489105, | |
| "eval_runtime": 19.5025, | |
| "eval_samples_per_second": 25.638, | |
| "eval_similarity_ratio": -39.893930400207765, | |
| "eval_steps_per_second": 0.82, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.11423195807015186, | |
| "grad_norm": 14.672723770141602, | |
| "learning_rate": 9.692349566356677e-05, | |
| "loss": 0.2954, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.11647180038525287, | |
| "grad_norm": 17.881025314331055, | |
| "learning_rate": 9.680054737276638e-05, | |
| "loss": 0.2968, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.11871164270035389, | |
| "grad_norm": 16.55130958557129, | |
| "learning_rate": 9.667527135813737e-05, | |
| "loss": 0.2707, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 0.12095148501545491, | |
| "grad_norm": 21.650575637817383, | |
| "learning_rate": 9.654767385054627e-05, | |
| "loss": 0.3068, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.12319132733055593, | |
| "grad_norm": 20.990921020507812, | |
| "learning_rate": 9.641776119632397e-05, | |
| "loss": 0.3037, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.12543116964565695, | |
| "grad_norm": 21.97833824157715, | |
| "learning_rate": 9.628553985695005e-05, | |
| "loss": 0.3307, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 0.12767101196075797, | |
| "grad_norm": 20.353246688842773, | |
| "learning_rate": 9.61510164087314e-05, | |
| "loss": 0.2907, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 0.12991085427585897, | |
| "grad_norm": 16.254976272583008, | |
| "learning_rate": 9.601419754247514e-05, | |
| "loss": 0.3025, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 0.13215069659096, | |
| "grad_norm": 24.14662742614746, | |
| "learning_rate": 9.587509006315585e-05, | |
| "loss": 0.2676, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 0.134390538906061, | |
| "grad_norm": 15.861074447631836, | |
| "learning_rate": 9.573370088957712e-05, | |
| "loss": 0.2953, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.134390538906061, | |
| "eval_avg_non_pair_similarity": -0.00021786548842126337, | |
| "eval_avg_pair_similarity": -0.001965825233142823, | |
| "eval_loss": 0.31484127044677734, | |
| "eval_runtime": 19.5134, | |
| "eval_samples_per_second": 25.623, | |
| "eval_similarity_ratio": 9.023114433533939, | |
| "eval_steps_per_second": 0.82, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.13663038122116203, | |
| "grad_norm": 14.910375595092773, | |
| "learning_rate": 9.559003705402737e-05, | |
| "loss": 0.2846, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 0.13887022353626305, | |
| "grad_norm": 17.768138885498047, | |
| "learning_rate": 9.544410570193014e-05, | |
| "loss": 0.3031, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 0.14111006585136407, | |
| "grad_norm": 13.620220184326172, | |
| "learning_rate": 9.529591409148874e-05, | |
| "loss": 0.2716, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 0.1433499081664651, | |
| "grad_norm": 14.697577476501465, | |
| "learning_rate": 9.514546959332509e-05, | |
| "loss": 0.2759, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 0.1455897504815661, | |
| "grad_norm": 21.234310150146484, | |
| "learning_rate": 9.499277969011334e-05, | |
| "loss": 0.2845, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.1478295927966671, | |
| "grad_norm": 16.50038719177246, | |
| "learning_rate": 9.483785197620747e-05, | |
| "loss": 0.2471, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 0.15006943511176812, | |
| "grad_norm": 16.261537551879883, | |
| "learning_rate": 9.468069415726377e-05, | |
| "loss": 0.2978, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 0.15230927742686914, | |
| "grad_norm": 16.553571701049805, | |
| "learning_rate": 9.452131404985752e-05, | |
| "loss": 0.311, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 0.15454911974197016, | |
| "grad_norm": 18.445993423461914, | |
| "learning_rate": 9.43597195810941e-05, | |
| "loss": 0.2777, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 0.15678896205707119, | |
| "grad_norm": 13.675585746765137, | |
| "learning_rate": 9.419591878821496e-05, | |
| "loss": 0.2417, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.15678896205707119, | |
| "eval_avg_non_pair_similarity": 0.0003613333898595999, | |
| "eval_avg_pair_similarity": 0.005676110625499859, | |
| "eval_loss": 0.3019685447216034, | |
| "eval_runtime": 19.5219, | |
| "eval_samples_per_second": 25.612, | |
| "eval_similarity_ratio": 15.708790786551377, | |
| "eval_steps_per_second": 0.82, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.1590288043721722, | |
| "grad_norm": 15.780075073242188, | |
| "learning_rate": 9.402991981819758e-05, | |
| "loss": 0.2866, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 0.16126864668727323, | |
| "grad_norm": 17.34490966796875, | |
| "learning_rate": 9.386173092735051e-05, | |
| "loss": 0.2526, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 0.16350848900237425, | |
| "grad_norm": 14.431266784667969, | |
| "learning_rate": 9.36913604809026e-05, | |
| "loss": 0.2646, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 0.16574833131747524, | |
| "grad_norm": 11.20712661743164, | |
| "learning_rate": 9.351881695258693e-05, | |
| "loss": 0.2279, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 0.16798817363257626, | |
| "grad_norm": 15.549891471862793, | |
| "learning_rate": 9.334410892421945e-05, | |
| "loss": 0.2884, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.17022801594767728, | |
| "grad_norm": 14.635927200317383, | |
| "learning_rate": 9.316724508527205e-05, | |
| "loss": 0.2729, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 0.1724678582627783, | |
| "grad_norm": 12.841429710388184, | |
| "learning_rate": 9.298823423244038e-05, | |
| "loss": 0.271, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 0.17470770057787932, | |
| "grad_norm": 13.053755760192871, | |
| "learning_rate": 9.280708526920636e-05, | |
| "loss": 0.3094, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 0.17694754289298034, | |
| "grad_norm": 14.694463729858398, | |
| "learning_rate": 9.262380720539536e-05, | |
| "loss": 0.2687, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 0.17918738520808136, | |
| "grad_norm": 14.9842529296875, | |
| "learning_rate": 9.243840915672804e-05, | |
| "loss": 0.2539, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.17918738520808136, | |
| "eval_avg_non_pair_similarity": -0.0009227607365296818, | |
| "eval_avg_pair_similarity": -0.005276160409208387, | |
| "eval_loss": 0.22878731787204742, | |
| "eval_runtime": 19.5192, | |
| "eval_samples_per_second": 25.616, | |
| "eval_similarity_ratio": 5.717798992023619, | |
| "eval_steps_per_second": 0.82, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.18142722752318236, | |
| "grad_norm": 11.896175384521484, | |
| "learning_rate": 9.225090034436697e-05, | |
| "loss": 0.2739, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 0.18366706983828338, | |
| "grad_norm": 12.229938507080078, | |
| "learning_rate": 9.206129009445796e-05, | |
| "loss": 0.2467, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 0.1859069121533844, | |
| "grad_norm": 15.017464637756348, | |
| "learning_rate": 9.186958783766633e-05, | |
| "loss": 0.2668, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 0.18814675446848542, | |
| "grad_norm": 15.509882926940918, | |
| "learning_rate": 9.167580310870769e-05, | |
| "loss": 0.2614, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 0.19038659678358644, | |
| "grad_norm": 14.945472717285156, | |
| "learning_rate": 9.147994554587385e-05, | |
| "loss": 0.2266, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.19262643909868746, | |
| "grad_norm": 16.0211238861084, | |
| "learning_rate": 9.128202489055335e-05, | |
| "loss": 0.2428, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 0.19486628141378848, | |
| "grad_norm": 16.380727767944336, | |
| "learning_rate": 9.108205098674698e-05, | |
| "loss": 0.2578, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 0.1971061237288895, | |
| "grad_norm": 12.938665390014648, | |
| "learning_rate": 9.088003378057816e-05, | |
| "loss": 0.2445, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 0.1993459660439905, | |
| "grad_norm": 7.832601070404053, | |
| "learning_rate": 9.067598331979829e-05, | |
| "loss": 0.2284, | |
| "step": 8900 | |
| }, | |
| { | |
| "epoch": 0.2015858083590915, | |
| "grad_norm": 12.243515014648438, | |
| "learning_rate": 9.046990975328694e-05, | |
| "loss": 0.271, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.2015858083590915, | |
| "eval_avg_non_pair_similarity": -0.0005964781935742025, | |
| "eval_avg_pair_similarity": -0.0012636589542962611, | |
| "eval_loss": 0.24378302693367004, | |
| "eval_runtime": 19.5294, | |
| "eval_samples_per_second": 25.602, | |
| "eval_similarity_ratio": 2.1185333645211637, | |
| "eval_steps_per_second": 0.819, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.20382565067419253, | |
| "grad_norm": 14.057201385498047, | |
| "learning_rate": 9.026182333054714e-05, | |
| "loss": 0.2216, | |
| "step": 9100 | |
| }, | |
| { | |
| "epoch": 0.20606549298929355, | |
| "grad_norm": 9.724617958068848, | |
| "learning_rate": 9.005173440119555e-05, | |
| "loss": 0.2625, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 0.20830533530439457, | |
| "grad_norm": 13.209918975830078, | |
| "learning_rate": 8.983965341444769e-05, | |
| "loss": 0.2508, | |
| "step": 9300 | |
| }, | |
| { | |
| "epoch": 0.2105451776194956, | |
| "grad_norm": 12.0099458694458, | |
| "learning_rate": 8.962559091859828e-05, | |
| "loss": 0.2263, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 0.21278501993459661, | |
| "grad_norm": 11.473740577697754, | |
| "learning_rate": 8.940955756049658e-05, | |
| "loss": 0.256, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.21502486224969763, | |
| "grad_norm": 13.63650131225586, | |
| "learning_rate": 8.919156408501678e-05, | |
| "loss": 0.2594, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 0.21726470456479863, | |
| "grad_norm": 11.578638076782227, | |
| "learning_rate": 8.897162133452375e-05, | |
| "loss": 0.225, | |
| "step": 9700 | |
| }, | |
| { | |
| "epoch": 0.21950454687989965, | |
| "grad_norm": 8.985605239868164, | |
| "learning_rate": 8.874974024833351e-05, | |
| "loss": 0.2525, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 0.22174438919500067, | |
| "grad_norm": 15.842543601989746, | |
| "learning_rate": 8.852593186216942e-05, | |
| "loss": 0.239, | |
| "step": 9900 | |
| }, | |
| { | |
| "epoch": 0.2239842315101017, | |
| "grad_norm": 13.797926902770996, | |
| "learning_rate": 8.830020730761308e-05, | |
| "loss": 0.2133, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.2239842315101017, | |
| "eval_avg_non_pair_similarity": 3.824399728542623e-05, | |
| "eval_avg_pair_similarity": -0.010851175129879266, | |
| "eval_loss": 0.2623503506183624, | |
| "eval_runtime": 19.4973, | |
| "eval_samples_per_second": 25.645, | |
| "eval_similarity_ratio": -283.7353807159264, | |
| "eval_steps_per_second": 0.821, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.2262240738252027, | |
| "grad_norm": 13.315906524658203, | |
| "learning_rate": 8.807257781155081e-05, | |
| "loss": 0.2192, | |
| "step": 10100 | |
| }, | |
| { | |
| "epoch": 0.22846391614030373, | |
| "grad_norm": 11.403688430786133, | |
| "learning_rate": 8.784305469561519e-05, | |
| "loss": 0.2359, | |
| "step": 10200 | |
| }, | |
| { | |
| "epoch": 0.23070375845540475, | |
| "grad_norm": 11.676390647888184, | |
| "learning_rate": 8.761164937562199e-05, | |
| "loss": 0.224, | |
| "step": 10300 | |
| }, | |
| { | |
| "epoch": 0.23294360077050574, | |
| "grad_norm": 11.526522636413574, | |
| "learning_rate": 8.737837336100233e-05, | |
| "loss": 0.2288, | |
| "step": 10400 | |
| }, | |
| { | |
| "epoch": 0.23518344308560676, | |
| "grad_norm": 15.950361251831055, | |
| "learning_rate": 8.714323825423031e-05, | |
| "loss": 0.2388, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.23742328540070778, | |
| "grad_norm": 11.149569511413574, | |
| "learning_rate": 8.690625575024586e-05, | |
| "loss": 0.2067, | |
| "step": 10600 | |
| }, | |
| { | |
| "epoch": 0.2396631277158088, | |
| "grad_norm": 10.904093742370605, | |
| "learning_rate": 8.666743763587315e-05, | |
| "loss": 0.2354, | |
| "step": 10700 | |
| }, | |
| { | |
| "epoch": 0.24190297003090983, | |
| "grad_norm": 10.213909149169922, | |
| "learning_rate": 8.642679578923422e-05, | |
| "loss": 0.2236, | |
| "step": 10800 | |
| }, | |
| { | |
| "epoch": 0.24414281234601085, | |
| "grad_norm": 10.054357528686523, | |
| "learning_rate": 8.618434217915838e-05, | |
| "loss": 0.1987, | |
| "step": 10900 | |
| }, | |
| { | |
| "epoch": 0.24638265466111187, | |
| "grad_norm": 11.040483474731445, | |
| "learning_rate": 8.59400888645867e-05, | |
| "loss": 0.2471, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.24638265466111187, | |
| "eval_avg_non_pair_similarity": 0.00023486310084357872, | |
| "eval_avg_pair_similarity": 0.00031724373530596497, | |
| "eval_loss": 0.17524191737174988, | |
| "eval_runtime": 19.4456, | |
| "eval_samples_per_second": 25.713, | |
| "eval_similarity_ratio": 1.3507602265596101, | |
| "eval_steps_per_second": 0.823, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.2486224969762129, | |
| "grad_norm": 12.810904502868652, | |
| "learning_rate": 8.569404799397247e-05, | |
| "loss": 0.1974, | |
| "step": 11100 | |
| }, | |
| { | |
| "epoch": 0.2508623392913139, | |
| "grad_norm": 15.360380172729492, | |
| "learning_rate": 8.544623180467673e-05, | |
| "loss": 0.2204, | |
| "step": 11200 | |
| }, | |
| { | |
| "epoch": 0.2531021816064149, | |
| "grad_norm": 13.429972648620605, | |
| "learning_rate": 8.519665262235979e-05, | |
| "loss": 0.2479, | |
| "step": 11300 | |
| }, | |
| { | |
| "epoch": 0.25534202392151595, | |
| "grad_norm": 10.252517700195312, | |
| "learning_rate": 8.494532286036813e-05, | |
| "loss": 0.1886, | |
| "step": 11400 | |
| }, | |
| { | |
| "epoch": 0.25758186623661694, | |
| "grad_norm": 11.51858901977539, | |
| "learning_rate": 8.4692255019117e-05, | |
| "loss": 0.2191, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.25982170855171793, | |
| "grad_norm": 8.529491424560547, | |
| "learning_rate": 8.443746168546867e-05, | |
| "loss": 0.1856, | |
| "step": 11600 | |
| }, | |
| { | |
| "epoch": 0.262061550866819, | |
| "grad_norm": 7.846275806427002, | |
| "learning_rate": 8.418095553210641e-05, | |
| "loss": 0.1879, | |
| "step": 11700 | |
| }, | |
| { | |
| "epoch": 0.26430139318192, | |
| "grad_norm": 10.658947944641113, | |
| "learning_rate": 8.392274931690422e-05, | |
| "loss": 0.1998, | |
| "step": 11800 | |
| }, | |
| { | |
| "epoch": 0.266541235497021, | |
| "grad_norm": 8.623246192932129, | |
| "learning_rate": 8.366285588229223e-05, | |
| "loss": 0.2073, | |
| "step": 11900 | |
| }, | |
| { | |
| "epoch": 0.268781077812122, | |
| "grad_norm": 8.197981834411621, | |
| "learning_rate": 8.340128815461797e-05, | |
| "loss": 0.179, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.268781077812122, | |
| "eval_avg_non_pair_similarity": 5.694422117604282e-05, | |
| "eval_avg_pair_similarity": -0.00526782719604671, | |
| "eval_loss": 0.19077710807323456, | |
| "eval_runtime": 19.459, | |
| "eval_samples_per_second": 25.695, | |
| "eval_similarity_ratio": -92.50854761471308, | |
| "eval_steps_per_second": 0.822, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.27102092012722306, | |
| "grad_norm": 9.695572853088379, | |
| "learning_rate": 8.313805914350349e-05, | |
| "loss": 0.2022, | |
| "step": 12100 | |
| }, | |
| { | |
| "epoch": 0.27326076244232406, | |
| "grad_norm": 8.658490180969238, | |
| "learning_rate": 8.287318194119825e-05, | |
| "loss": 0.2011, | |
| "step": 12200 | |
| }, | |
| { | |
| "epoch": 0.2755006047574251, | |
| "grad_norm": 9.974045753479004, | |
| "learning_rate": 8.260666972192798e-05, | |
| "loss": 0.2163, | |
| "step": 12300 | |
| }, | |
| { | |
| "epoch": 0.2777404470725261, | |
| "grad_norm": 8.445626258850098, | |
| "learning_rate": 8.233853574123944e-05, | |
| "loss": 0.1947, | |
| "step": 12400 | |
| }, | |
| { | |
| "epoch": 0.2799802893876271, | |
| "grad_norm": 7.46968412399292, | |
| "learning_rate": 8.206879333534109e-05, | |
| "loss": 0.1709, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.28222013170272814, | |
| "grad_norm": 12.895581245422363, | |
| "learning_rate": 8.179745592043982e-05, | |
| "loss": 0.2281, | |
| "step": 12600 | |
| }, | |
| { | |
| "epoch": 0.28445997401782913, | |
| "grad_norm": 10.54282283782959, | |
| "learning_rate": 8.152453699207362e-05, | |
| "loss": 0.184, | |
| "step": 12700 | |
| }, | |
| { | |
| "epoch": 0.2866998163329302, | |
| "grad_norm": 7.9730634689331055, | |
| "learning_rate": 8.125005012444044e-05, | |
| "loss": 0.2525, | |
| "step": 12800 | |
| }, | |
| { | |
| "epoch": 0.2889396586480312, | |
| "grad_norm": 9.222777366638184, | |
| "learning_rate": 8.097400896972296e-05, | |
| "loss": 0.1912, | |
| "step": 12900 | |
| }, | |
| { | |
| "epoch": 0.2911795009631322, | |
| "grad_norm": 10.500025749206543, | |
| "learning_rate": 8.069642725740957e-05, | |
| "loss": 0.1948, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.2911795009631322, | |
| "eval_avg_non_pair_similarity": 0.0007876502425145761, | |
| "eval_avg_pair_similarity": -0.007292054696008563, | |
| "eval_loss": 0.18571369349956512, | |
| "eval_runtime": 19.4919, | |
| "eval_samples_per_second": 25.652, | |
| "eval_similarity_ratio": -9.257985718037302, | |
| "eval_steps_per_second": 0.821, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.2934193432782332, | |
| "grad_norm": 7.784255504608154, | |
| "learning_rate": 8.041731879361164e-05, | |
| "loss": 0.214, | |
| "step": 13100 | |
| }, | |
| { | |
| "epoch": 0.2956591855933342, | |
| "grad_norm": 9.356244087219238, | |
| "learning_rate": 8.013669746037662e-05, | |
| "loss": 0.168, | |
| "step": 13200 | |
| }, | |
| { | |
| "epoch": 0.29789902790843525, | |
| "grad_norm": 8.42744255065918, | |
| "learning_rate": 7.985457721499779e-05, | |
| "loss": 0.1759, | |
| "step": 13300 | |
| }, | |
| { | |
| "epoch": 0.30013887022353625, | |
| "grad_norm": 8.323335647583008, | |
| "learning_rate": 7.957097208931997e-05, | |
| "loss": 0.193, | |
| "step": 13400 | |
| }, | |
| { | |
| "epoch": 0.3023787125386373, | |
| "grad_norm": 10.35467529296875, | |
| "learning_rate": 7.928589618904158e-05, | |
| "loss": 0.244, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 0.3046185548537383, | |
| "grad_norm": 9.027731895446777, | |
| "learning_rate": 7.899936369301319e-05, | |
| "loss": 0.1878, | |
| "step": 13600 | |
| }, | |
| { | |
| "epoch": 0.30685839716883934, | |
| "grad_norm": 11.577035903930664, | |
| "learning_rate": 7.871138885253216e-05, | |
| "loss": 0.2033, | |
| "step": 13700 | |
| }, | |
| { | |
| "epoch": 0.30909823948394033, | |
| "grad_norm": 6.524970054626465, | |
| "learning_rate": 7.842198599063395e-05, | |
| "loss": 0.1792, | |
| "step": 13800 | |
| }, | |
| { | |
| "epoch": 0.3113380817990413, | |
| "grad_norm": 8.987520217895508, | |
| "learning_rate": 7.813116950137964e-05, | |
| "loss": 0.1936, | |
| "step": 13900 | |
| }, | |
| { | |
| "epoch": 0.31357792411414237, | |
| "grad_norm": 8.995404243469238, | |
| "learning_rate": 7.783895384914004e-05, | |
| "loss": 0.1831, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.31357792411414237, | |
| "eval_avg_non_pair_similarity": 0.000529102375272793, | |
| "eval_avg_pair_similarity": -0.001620051197707653, | |
| "eval_loss": 0.18957822024822235, | |
| "eval_runtime": 19.4615, | |
| "eval_samples_per_second": 25.692, | |
| "eval_similarity_ratio": -3.061886079933759, | |
| "eval_steps_per_second": 0.822, | |
| "step": 14000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 44646, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |