MiniLM-v2-v60-CustomTokenizerPositivesOnlyRankingSemanticEngine / checkpoint-76965 /trainer_state.json
| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 2000, | |
| "global_step": 76965, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 6.496459429610862e-05, | |
| "grad_norm": 8.543986320495605, | |
| "learning_rate": 0.0, | |
| "loss": 2.8055, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.12992918859221725, | |
| "grad_norm": 4.228509902954102, | |
| "learning_rate": 9.740449584199584e-07, | |
| "loss": 2.5053, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.12992918859221725, | |
| "eval_cosine_accuracy": 0.9490216970443726, | |
| "eval_loss": 0.9158492088317871, | |
| "eval_runtime": 24.8196, | |
| "eval_samples_per_second": 380.948, | |
| "eval_steps_per_second": 1.491, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.2598583771844345, | |
| "grad_norm": 4.29846715927124, | |
| "learning_rate": 1.948577182952183e-06, | |
| "loss": 2.0544, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.2598583771844345, | |
| "eval_cosine_accuracy": 0.9499735832214355, | |
| "eval_loss": 0.9200904369354248, | |
| "eval_runtime": 24.5453, | |
| "eval_samples_per_second": 385.206, | |
| "eval_steps_per_second": 1.507, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.38978756577665175, | |
| "grad_norm": 4.714716911315918, | |
| "learning_rate": 2.9226221413721413e-06, | |
| "loss": 1.7583, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.38978756577665175, | |
| "eval_cosine_accuracy": 0.9456372261047363, | |
| "eval_loss": 0.8998962640762329, | |
| "eval_runtime": 24.5547, | |
| "eval_samples_per_second": 385.059, | |
| "eval_steps_per_second": 1.507, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.519716754368869, | |
| "grad_norm": 4.514920234680176, | |
| "learning_rate": 3.8966670997921e-06, | |
| "loss": 1.5402, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.519716754368869, | |
| "eval_cosine_accuracy": 0.9466948509216309, | |
| "eval_loss": 0.8402906060218811, | |
| "eval_runtime": 24.5672, | |
| "eval_samples_per_second": 384.862, | |
| "eval_steps_per_second": 1.506, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.6496459429610862, | |
| "grad_norm": 4.10648250579834, | |
| "learning_rate": 4.871199324324324e-06, | |
| "loss": 1.3738, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.6496459429610862, | |
| "eval_cosine_accuracy": 0.947012186050415, | |
| "eval_loss": 0.8332843780517578, | |
| "eval_runtime": 24.6002, | |
| "eval_samples_per_second": 384.346, | |
| "eval_steps_per_second": 1.504, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.7795751315533035, | |
| "grad_norm": 4.327205657958984, | |
| "learning_rate": 5.844757016632017e-06, | |
| "loss": 1.241, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.7795751315533035, | |
| "eval_cosine_accuracy": 0.9478582739830017, | |
| "eval_loss": 0.8381510972976685, | |
| "eval_runtime": 24.3575, | |
| "eval_samples_per_second": 388.176, | |
| "eval_steps_per_second": 1.519, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.9095043201455207, | |
| "grad_norm": 4.739709854125977, | |
| "learning_rate": 6.818801975051975e-06, | |
| "loss": 1.1337, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.9095043201455207, | |
| "eval_cosine_accuracy": 0.9480698108673096, | |
| "eval_loss": 0.8515253663063049, | |
| "eval_runtime": 24.5042, | |
| "eval_samples_per_second": 385.853, | |
| "eval_steps_per_second": 1.51, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.039433508737738, | |
| "grad_norm": 3.9595558643341064, | |
| "learning_rate": 7.7933341995842e-06, | |
| "loss": 1.3481, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.039433508737738, | |
| "eval_cosine_accuracy": 0.9481755495071411, | |
| "eval_loss": 0.861483097076416, | |
| "eval_runtime": 24.9212, | |
| "eval_samples_per_second": 379.395, | |
| "eval_steps_per_second": 1.485, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.1693626973299551, | |
| "grad_norm": 3.8362483978271484, | |
| "learning_rate": 8.767379158004158e-06, | |
| "loss": 0.9775, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.1693626973299551, | |
| "eval_cosine_accuracy": 0.9490216970443726, | |
| "eval_loss": 0.866976261138916, | |
| "eval_runtime": 24.7567, | |
| "eval_samples_per_second": 381.917, | |
| "eval_steps_per_second": 1.495, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.2992918859221725, | |
| "grad_norm": 3.660410165786743, | |
| "learning_rate": 9.740936850311851e-06, | |
| "loss": 0.9106, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.2992918859221725, | |
| "eval_cosine_accuracy": 0.9511369466781616, | |
| "eval_loss": 0.8142719268798828, | |
| "eval_runtime": 24.6537, | |
| "eval_samples_per_second": 383.513, | |
| "eval_steps_per_second": 1.501, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.4292210745143896, | |
| "grad_norm": 3.5640735626220703, | |
| "learning_rate": 1.0715469074844075e-05, | |
| "loss": 0.8581, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 1.4292210745143896, | |
| "eval_cosine_accuracy": 0.9521946310997009, | |
| "eval_loss": 0.7982929944992065, | |
| "eval_runtime": 24.9083, | |
| "eval_samples_per_second": 379.592, | |
| "eval_steps_per_second": 1.485, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 1.559150263106607, | |
| "grad_norm": 3.663590908050537, | |
| "learning_rate": 1.1689514033264035e-05, | |
| "loss": 0.8119, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 1.559150263106607, | |
| "eval_cosine_accuracy": 0.9538868069648743, | |
| "eval_loss": 0.7904353141784668, | |
| "eval_runtime": 24.419, | |
| "eval_samples_per_second": 387.198, | |
| "eval_steps_per_second": 1.515, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 1.6890794516988241, | |
| "grad_norm": 3.101531505584717, | |
| "learning_rate": 1.2664046257796258e-05, | |
| "loss": 0.775, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 1.6890794516988241, | |
| "eval_cosine_accuracy": 0.9561078548431396, | |
| "eval_loss": 0.7433677315711975, | |
| "eval_runtime": 24.4716, | |
| "eval_samples_per_second": 386.366, | |
| "eval_steps_per_second": 1.512, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 1.8190086402910413, | |
| "grad_norm": 3.2498459815979004, | |
| "learning_rate": 1.3638091216216216e-05, | |
| "loss": 0.7376, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 1.8190086402910413, | |
| "eval_cosine_accuracy": 0.9565309286117554, | |
| "eval_loss": 0.779238760471344, | |
| "eval_runtime": 24.4481, | |
| "eval_samples_per_second": 386.737, | |
| "eval_steps_per_second": 1.513, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 1.9489378288832586, | |
| "grad_norm": 2.890700101852417, | |
| "learning_rate": 1.4612136174636175e-05, | |
| "loss": 0.7072, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 1.9489378288832586, | |
| "eval_cosine_accuracy": 0.9578000903129578, | |
| "eval_loss": 0.7882058024406433, | |
| "eval_runtime": 24.4341, | |
| "eval_samples_per_second": 386.959, | |
| "eval_steps_per_second": 1.514, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 2.078867017475476, | |
| "grad_norm": 3.015334367752075, | |
| "learning_rate": 1.5586181133056135e-05, | |
| "loss": 0.9877, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 2.078867017475476, | |
| "eval_cosine_accuracy": 0.9568482041358948, | |
| "eval_loss": 0.8047419786453247, | |
| "eval_runtime": 25.0473, | |
| "eval_samples_per_second": 377.486, | |
| "eval_steps_per_second": 1.477, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 2.208796206067693, | |
| "grad_norm": 2.955296039581299, | |
| "learning_rate": 1.6559738825363826e-05, | |
| "loss": 0.6526, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 2.208796206067693, | |
| "eval_cosine_accuracy": 0.959280788898468, | |
| "eval_loss": 0.7193956971168518, | |
| "eval_runtime": 24.5845, | |
| "eval_samples_per_second": 384.592, | |
| "eval_steps_per_second": 1.505, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 2.3387253946599103, | |
| "grad_norm": 2.620438575744629, | |
| "learning_rate": 1.7534271049896048e-05, | |
| "loss": 0.6275, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 2.3387253946599103, | |
| "eval_cosine_accuracy": 0.9604442119598389, | |
| "eval_loss": 0.725292444229126, | |
| "eval_runtime": 24.6763, | |
| "eval_samples_per_second": 383.162, | |
| "eval_steps_per_second": 1.499, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 2.4686545832521274, | |
| "grad_norm": 3.143752098083496, | |
| "learning_rate": 1.8508803274428273e-05, | |
| "loss": 0.6109, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 2.4686545832521274, | |
| "eval_cosine_accuracy": 0.9617133736610413, | |
| "eval_loss": 0.7545588612556458, | |
| "eval_runtime": 27.6088, | |
| "eval_samples_per_second": 342.463, | |
| "eval_steps_per_second": 1.34, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 2.598583771844345, | |
| "grad_norm": 2.62251353263855, | |
| "learning_rate": 1.9482848232848233e-05, | |
| "loss": 0.5918, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 2.598583771844345, | |
| "eval_cosine_accuracy": 0.9620306491851807, | |
| "eval_loss": 0.7361720204353333, | |
| "eval_runtime": 24.6649, | |
| "eval_samples_per_second": 383.339, | |
| "eval_steps_per_second": 1.5, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 2.728512960436562, | |
| "grad_norm": 2.776906967163086, | |
| "learning_rate": 2.0456405925155924e-05, | |
| "loss": 0.5768, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 2.728512960436562, | |
| "eval_cosine_accuracy": 0.9628767967224121, | |
| "eval_loss": 0.7261704206466675, | |
| "eval_runtime": 24.5203, | |
| "eval_samples_per_second": 385.598, | |
| "eval_steps_per_second": 1.509, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 2.8584421490287792, | |
| "grad_norm": 3.0380618572235107, | |
| "learning_rate": 2.143093814968815e-05, | |
| "loss": 0.5618, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 2.8584421490287792, | |
| "eval_cosine_accuracy": 0.96308833360672, | |
| "eval_loss": 0.7182765603065491, | |
| "eval_runtime": 24.7751, | |
| "eval_samples_per_second": 381.633, | |
| "eval_steps_per_second": 1.493, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 2.988371337620997, | |
| "grad_norm": 6.228867053985596, | |
| "learning_rate": 2.2404495841995844e-05, | |
| "loss": 0.7458, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 2.988371337620997, | |
| "eval_cosine_accuracy": 0.9553675055503845, | |
| "eval_loss": 0.819146454334259, | |
| "eval_runtime": 24.7404, | |
| "eval_samples_per_second": 382.168, | |
| "eval_steps_per_second": 1.496, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 3.118300526213214, | |
| "grad_norm": 2.492626428604126, | |
| "learning_rate": 2.337902806652807e-05, | |
| "loss": 0.643, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 3.118300526213214, | |
| "eval_cosine_accuracy": 0.9622421860694885, | |
| "eval_loss": 0.7653650641441345, | |
| "eval_runtime": 25.1236, | |
| "eval_samples_per_second": 376.339, | |
| "eval_steps_per_second": 1.473, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 3.248229714805431, | |
| "grad_norm": 2.750302314758301, | |
| "learning_rate": 2.4353073024948026e-05, | |
| "loss": 0.5174, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 3.248229714805431, | |
| "eval_cosine_accuracy": 0.9646747708320618, | |
| "eval_loss": 0.7210116386413574, | |
| "eval_runtime": 24.8407, | |
| "eval_samples_per_second": 380.626, | |
| "eval_steps_per_second": 1.489, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 3.3781589033976482, | |
| "grad_norm": 2.3254339694976807, | |
| "learning_rate": 2.5327117983367986e-05, | |
| "loss": 0.5049, | |
| "step": 52000 | |
| }, | |
| { | |
| "epoch": 3.3781589033976482, | |
| "eval_cosine_accuracy": 0.9627709984779358, | |
| "eval_loss": 0.7164391875267029, | |
| "eval_runtime": 24.8177, | |
| "eval_samples_per_second": 380.979, | |
| "eval_steps_per_second": 1.491, | |
| "step": 52000 | |
| }, | |
| { | |
| "epoch": 3.5080880919898654, | |
| "grad_norm": 2.2204036712646484, | |
| "learning_rate": 2.630165020790021e-05, | |
| "loss": 0.5003, | |
| "step": 54000 | |
| }, | |
| { | |
| "epoch": 3.5080880919898654, | |
| "eval_cosine_accuracy": 0.9658381938934326, | |
| "eval_loss": 0.6905584931373596, | |
| "eval_runtime": 24.8122, | |
| "eval_samples_per_second": 381.063, | |
| "eval_steps_per_second": 1.491, | |
| "step": 54000 | |
| }, | |
| { | |
| "epoch": 3.6380172805820825, | |
| "grad_norm": 2.4252846240997314, | |
| "learning_rate": 2.7275695166320167e-05, | |
| "loss": 0.4873, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 3.6380172805820825, | |
| "eval_cosine_accuracy": 0.9643574953079224, | |
| "eval_loss": 0.6991865634918213, | |
| "eval_runtime": 24.8084, | |
| "eval_samples_per_second": 381.121, | |
| "eval_steps_per_second": 1.491, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 3.7679464691743, | |
| "grad_norm": 2.161461591720581, | |
| "learning_rate": 2.824925285862786e-05, | |
| "loss": 0.4786, | |
| "step": 58000 | |
| }, | |
| { | |
| "epoch": 3.7679464691743, | |
| "eval_cosine_accuracy": 0.9658381938934326, | |
| "eval_loss": 0.6876057386398315, | |
| "eval_runtime": 25.0108, | |
| "eval_samples_per_second": 378.037, | |
| "eval_steps_per_second": 1.479, | |
| "step": 58000 | |
| }, | |
| { | |
| "epoch": 3.8978756577665172, | |
| "grad_norm": 2.3848555088043213, | |
| "learning_rate": 2.9223785083160084e-05, | |
| "loss": 0.4738, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 3.8978756577665172, | |
| "eval_cosine_accuracy": 0.9663670063018799, | |
| "eval_loss": 0.6974127888679504, | |
| "eval_runtime": 24.9874, | |
| "eval_samples_per_second": 378.39, | |
| "eval_steps_per_second": 1.481, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 4.027804846358735, | |
| "grad_norm": 2.194507360458374, | |
| "learning_rate": 2.9998576762001792e-05, | |
| "loss": 0.7724, | |
| "step": 62000 | |
| }, | |
| { | |
| "epoch": 4.027804846358735, | |
| "eval_cosine_accuracy": 0.9637228846549988, | |
| "eval_loss": 0.7903416752815247, | |
| "eval_runtime": 25.2398, | |
| "eval_samples_per_second": 374.607, | |
| "eval_steps_per_second": 1.466, | |
| "step": 62000 | |
| }, | |
| { | |
| "epoch": 4.157734034950952, | |
| "grad_norm": 2.304309844970703, | |
| "learning_rate": 2.994983944707142e-05, | |
| "loss": 0.446, | |
| "step": 64000 | |
| }, | |
| { | |
| "epoch": 4.157734034950952, | |
| "eval_cosine_accuracy": 0.9673188924789429, | |
| "eval_loss": 0.702480673789978, | |
| "eval_runtime": 24.8537, | |
| "eval_samples_per_second": 380.427, | |
| "eval_steps_per_second": 1.489, | |
| "step": 64000 | |
| }, | |
| { | |
| "epoch": 4.287663223543169, | |
| "grad_norm": 2.074822187423706, | |
| "learning_rate": 2.9832020332202645e-05, | |
| "loss": 0.4415, | |
| "step": 66000 | |
| }, | |
| { | |
| "epoch": 4.287663223543169, | |
| "eval_cosine_accuracy": 0.966155469417572, | |
| "eval_loss": 0.6973665952682495, | |
| "eval_runtime": 25.0867, | |
| "eval_samples_per_second": 376.893, | |
| "eval_steps_per_second": 1.475, | |
| "step": 66000 | |
| }, | |
| { | |
| "epoch": 4.417592412135386, | |
| "grad_norm": 2.2352564334869385, | |
| "learning_rate": 2.96455466160021e-05, | |
| "loss": 0.4351, | |
| "step": 68000 | |
| }, | |
| { | |
| "epoch": 4.417592412135386, | |
| "eval_cosine_accuracy": 0.9681649804115295, | |
| "eval_loss": 0.7310513854026794, | |
| "eval_runtime": 25.0704, | |
| "eval_samples_per_second": 377.138, | |
| "eval_steps_per_second": 1.476, | |
| "step": 68000 | |
| }, | |
| { | |
| "epoch": 4.547521600727603, | |
| "grad_norm": 2.129157304763794, | |
| "learning_rate": 2.9391610307156934e-05, | |
| "loss": 0.4316, | |
| "step": 70000 | |
| }, | |
| { | |
| "epoch": 4.547521600727603, | |
| "eval_cosine_accuracy": 0.9675304293632507, | |
| "eval_loss": 0.7204111218452454, | |
| "eval_runtime": 24.9634, | |
| "eval_samples_per_second": 378.755, | |
| "eval_steps_per_second": 1.482, | |
| "step": 70000 | |
| }, | |
| { | |
| "epoch": 4.6774507893198205, | |
| "grad_norm": 2.012279510498047, | |
| "learning_rate": 2.907087793019168e-05, | |
| "loss": 0.4271, | |
| "step": 72000 | |
| }, | |
| { | |
| "epoch": 4.6774507893198205, | |
| "eval_cosine_accuracy": 0.967107355594635, | |
| "eval_loss": 0.7272541522979736, | |
| "eval_runtime": 25.2676, | |
| "eval_samples_per_second": 374.194, | |
| "eval_steps_per_second": 1.464, | |
| "step": 72000 | |
| }, | |
| { | |
| "epoch": 4.807379977912038, | |
| "grad_norm": 1.9547345638275146, | |
| "learning_rate": 2.8685680306643546e-05, | |
| "loss": 0.4198, | |
| "step": 74000 | |
| }, | |
| { | |
| "epoch": 4.807379977912038, | |
| "eval_cosine_accuracy": 0.969011127948761, | |
| "eval_loss": 0.6957398056983948, | |
| "eval_runtime": 25.4095, | |
| "eval_samples_per_second": 372.105, | |
| "eval_steps_per_second": 1.456, | |
| "step": 74000 | |
| }, | |
| { | |
| "epoch": 4.937309166504255, | |
| "grad_norm": 2.0091848373413086, | |
| "learning_rate": 2.823664247871571e-05, | |
| "loss": 0.4167, | |
| "step": 76000 | |
| }, | |
| { | |
| "epoch": 4.937309166504255, | |
| "eval_cosine_accuracy": 0.9691168665885925, | |
| "eval_loss": 0.6990842819213867, | |
| "eval_runtime": 25.0447, | |
| "eval_samples_per_second": 377.524, | |
| "eval_steps_per_second": 1.477, | |
| "step": 76000 | |
| } | |
| ], | |
| "logging_steps": 2000, | |
| "max_steps": 153930, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |