| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 5958, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.016784155756965426, | |
| "grad_norm": 83914.40625, | |
| "learning_rate": 4.916079221215173e-05, | |
| "loss": 0.9807, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.03356831151393085, | |
| "grad_norm": 97513.53125, | |
| "learning_rate": 4.832158442430346e-05, | |
| "loss": 0.9534, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.050352467270896276, | |
| "grad_norm": 80779.1171875, | |
| "learning_rate": 4.748237663645519e-05, | |
| "loss": 0.9194, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.0671366230278617, | |
| "grad_norm": 71209.2421875, | |
| "learning_rate": 4.664316884860692e-05, | |
| "loss": 0.9223, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.08392077878482712, | |
| "grad_norm": 75543.140625, | |
| "learning_rate": 4.5803961060758646e-05, | |
| "loss": 0.9167, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.10070493454179255, | |
| "grad_norm": 81713.671875, | |
| "learning_rate": 4.4964753272910375e-05, | |
| "loss": 0.8878, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.11748909029875797, | |
| "grad_norm": 79699.15625, | |
| "learning_rate": 4.4125545485062104e-05, | |
| "loss": 0.8801, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.1342732460557234, | |
| "grad_norm": 57691.8359375, | |
| "learning_rate": 4.328633769721383e-05, | |
| "loss": 0.9157, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.1510574018126888, | |
| "grad_norm": 78234.8359375, | |
| "learning_rate": 4.244712990936556e-05, | |
| "loss": 0.8952, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.16784155756965424, | |
| "grad_norm": 64203.4140625, | |
| "learning_rate": 4.160792212151729e-05, | |
| "loss": 0.9049, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.18462571332661967, | |
| "grad_norm": 63400.09375, | |
| "learning_rate": 4.076871433366902e-05, | |
| "loss": 0.8827, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.2014098690835851, | |
| "grad_norm": 71029.0078125, | |
| "learning_rate": 3.992950654582075e-05, | |
| "loss": 0.9008, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.21819402484055053, | |
| "grad_norm": 67397.421875, | |
| "learning_rate": 3.9090298757972476e-05, | |
| "loss": 0.889, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.23497818059751593, | |
| "grad_norm": 63474.31640625, | |
| "learning_rate": 3.8251090970124205e-05, | |
| "loss": 0.8945, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.25176233635448136, | |
| "grad_norm": 61201.125, | |
| "learning_rate": 3.7411883182275934e-05, | |
| "loss": 0.8427, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.2685464921114468, | |
| "grad_norm": 77147.5390625, | |
| "learning_rate": 3.657267539442766e-05, | |
| "loss": 0.8659, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.2853306478684122, | |
| "grad_norm": 135552.828125, | |
| "learning_rate": 3.573346760657939e-05, | |
| "loss": 0.8756, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.3021148036253776, | |
| "grad_norm": 62637.78515625, | |
| "learning_rate": 3.489425981873112e-05, | |
| "loss": 0.8552, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.3188989593823431, | |
| "grad_norm": 86379.7578125, | |
| "learning_rate": 3.405505203088285e-05, | |
| "loss": 0.8667, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.3356831151393085, | |
| "grad_norm": 70539.9921875, | |
| "learning_rate": 3.321584424303458e-05, | |
| "loss": 0.8466, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.35246727089627394, | |
| "grad_norm": 59345.5234375, | |
| "learning_rate": 3.2376636455186307e-05, | |
| "loss": 0.8694, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.36925142665323935, | |
| "grad_norm": 72302.8828125, | |
| "learning_rate": 3.1537428667338035e-05, | |
| "loss": 0.8417, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.38603558241020475, | |
| "grad_norm": 65996.3828125, | |
| "learning_rate": 3.0698220879489764e-05, | |
| "loss": 0.8473, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.4028197381671702, | |
| "grad_norm": 84450.78125, | |
| "learning_rate": 2.9859013091641493e-05, | |
| "loss": 0.8247, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.4196038939241356, | |
| "grad_norm": 69873.5390625, | |
| "learning_rate": 2.9019805303793218e-05, | |
| "loss": 0.8501, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.43638804968110106, | |
| "grad_norm": 69021.078125, | |
| "learning_rate": 2.818059751594495e-05, | |
| "loss": 0.8284, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.45317220543806647, | |
| "grad_norm": 75120.1171875, | |
| "learning_rate": 2.734138972809668e-05, | |
| "loss": 0.847, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.46995636119503187, | |
| "grad_norm": 63536.984375, | |
| "learning_rate": 2.6502181940248405e-05, | |
| "loss": 0.8222, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.4867405169519973, | |
| "grad_norm": 66156.6796875, | |
| "learning_rate": 2.5662974152400137e-05, | |
| "loss": 0.8227, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.5035246727089627, | |
| "grad_norm": 69988.140625, | |
| "learning_rate": 2.4823766364551865e-05, | |
| "loss": 0.8048, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.5203088284659282, | |
| "grad_norm": 71663.9921875, | |
| "learning_rate": 2.398455857670359e-05, | |
| "loss": 0.8351, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.5370929842228936, | |
| "grad_norm": 64774.16796875, | |
| "learning_rate": 2.3145350788855323e-05, | |
| "loss": 0.8121, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.553877139979859, | |
| "grad_norm": 63534.6015625, | |
| "learning_rate": 2.2306143001007052e-05, | |
| "loss": 0.824, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.5706612957368244, | |
| "grad_norm": 73855.3984375, | |
| "learning_rate": 2.1466935213158777e-05, | |
| "loss": 0.817, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.5874454514937899, | |
| "grad_norm": 68886.3984375, | |
| "learning_rate": 2.062772742531051e-05, | |
| "loss": 0.8078, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.6042296072507553, | |
| "grad_norm": 68186.78125, | |
| "learning_rate": 1.9788519637462235e-05, | |
| "loss": 0.8175, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.6210137630077207, | |
| "grad_norm": 74676.046875, | |
| "learning_rate": 1.8949311849613967e-05, | |
| "loss": 0.7929, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.6377979187646862, | |
| "grad_norm": 68078.203125, | |
| "learning_rate": 1.8110104061765696e-05, | |
| "loss": 0.7941, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.6545820745216515, | |
| "grad_norm": 66256.53125, | |
| "learning_rate": 1.727089627391742e-05, | |
| "loss": 0.8264, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.671366230278617, | |
| "grad_norm": 71649.8125, | |
| "learning_rate": 1.6431688486069153e-05, | |
| "loss": 0.7731, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.6881503860355824, | |
| "grad_norm": 70382.28125, | |
| "learning_rate": 1.559248069822088e-05, | |
| "loss": 0.8076, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.7049345417925479, | |
| "grad_norm": 70303.125, | |
| "learning_rate": 1.4753272910372609e-05, | |
| "loss": 0.7841, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.7217186975495132, | |
| "grad_norm": 62633.87109375, | |
| "learning_rate": 1.391406512252434e-05, | |
| "loss": 0.7781, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.7385028533064787, | |
| "grad_norm": 69537.5625, | |
| "learning_rate": 1.3074857334676067e-05, | |
| "loss": 0.7846, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.7552870090634441, | |
| "grad_norm": 65905.9765625, | |
| "learning_rate": 1.2235649546827795e-05, | |
| "loss": 0.7818, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.7720711648204095, | |
| "grad_norm": 66610.0703125, | |
| "learning_rate": 1.1396441758979524e-05, | |
| "loss": 0.774, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.788855320577375, | |
| "grad_norm": 73451.1953125, | |
| "learning_rate": 1.0557233971131253e-05, | |
| "loss": 0.7768, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.8056394763343404, | |
| "grad_norm": 85429.0390625, | |
| "learning_rate": 9.718026183282982e-06, | |
| "loss": 0.7805, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.8224236320913058, | |
| "grad_norm": 63732.640625, | |
| "learning_rate": 8.87881839543471e-06, | |
| "loss": 0.7685, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.8392077878482712, | |
| "grad_norm": 65406.63671875, | |
| "learning_rate": 8.039610607586439e-06, | |
| "loss": 0.7504, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.8559919436052367, | |
| "grad_norm": 74580.1484375, | |
| "learning_rate": 7.200402819738168e-06, | |
| "loss": 0.7814, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.8727760993622021, | |
| "grad_norm": 73900.3671875, | |
| "learning_rate": 6.361195031889897e-06, | |
| "loss": 0.7749, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.8895602551191675, | |
| "grad_norm": 77174.4375, | |
| "learning_rate": 5.5219872440416254e-06, | |
| "loss": 0.7862, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 0.9063444108761329, | |
| "grad_norm": 61536.1875, | |
| "learning_rate": 4.682779456193353e-06, | |
| "loss": 0.7669, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.9231285666330984, | |
| "grad_norm": 60535.73828125, | |
| "learning_rate": 3.843571668345083e-06, | |
| "loss": 0.7767, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.9399127223900637, | |
| "grad_norm": 57083.25, | |
| "learning_rate": 3.0043638804968113e-06, | |
| "loss": 0.7772, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 0.9566968781470292, | |
| "grad_norm": 65630.703125, | |
| "learning_rate": 2.16515609264854e-06, | |
| "loss": 0.7749, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 0.9734810339039947, | |
| "grad_norm": 66215.3359375, | |
| "learning_rate": 1.3259483048002687e-06, | |
| "loss": 0.782, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 0.99026518966096, | |
| "grad_norm": 72915.515625, | |
| "learning_rate": 4.867405169519974e-07, | |
| "loss": 0.7722, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 5958, | |
| "total_flos": 9.08850744042455e+17, | |
| "train_loss": 0.8321872255313152, | |
| "train_runtime": 82908.8976, | |
| "train_samples_per_second": 0.862, | |
| "train_steps_per_second": 0.072 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 5958, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 5958, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9.08850744042455e+17, | |
| "train_batch_size": 12, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |