| { | |
| "best_global_step": 800, | |
| "best_metric": 0.5327131748199463, | |
| "best_model_checkpoint": "models/generation/query/checkpoint-800", | |
| "epoch": 17.5, | |
| "eval_steps": 400, | |
| "global_step": 3500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 4.102665424346924, | |
| "learning_rate": 8.166666666666667e-05, | |
| "loss": 9.566, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 3.9690704345703125, | |
| "learning_rate": 9.996828927498017e-05, | |
| "loss": 2.8146, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 3.130918502807617, | |
| "learning_rate": 9.983493166277486e-05, | |
| "loss": 2.4341, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 2.8213088512420654, | |
| "learning_rate": 9.959768225002347e-05, | |
| "loss": 2.3782, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 2.7424190044403076, | |
| "learning_rate": 9.925703563494947e-05, | |
| "loss": 2.2545, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 2.545445442199707, | |
| "learning_rate": 9.881370196982982e-05, | |
| "loss": 2.2209, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "grad_norm": 2.717111587524414, | |
| "learning_rate": 9.826860548052725e-05, | |
| "loss": 2.2043, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 2.9653303623199463, | |
| "learning_rate": 9.76228825397397e-05, | |
| "loss": 2.1926, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.5487204194068909, | |
| "eval_runtime": 18.2132, | |
| "eval_samples_per_second": 21.962, | |
| "eval_steps_per_second": 5.491, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "grad_norm": 2.5305309295654297, | |
| "learning_rate": 9.687787929798317e-05, | |
| "loss": 2.0739, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 2.5425493717193604, | |
| "learning_rate": 9.603514887724691e-05, | |
| "loss": 2.0889, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "grad_norm": 2.6191768646240234, | |
| "learning_rate": 9.509644813317144e-05, | |
| "loss": 2.0782, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 2.3347673416137695, | |
| "learning_rate": 9.406373399249911e-05, | |
| "loss": 2.0794, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "grad_norm": 2.630434274673462, | |
| "learning_rate": 9.293915937343299e-05, | |
| "loss": 1.971, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "grad_norm": 2.6370699405670166, | |
| "learning_rate": 9.172506869740849e-05, | |
| "loss": 1.964, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "grad_norm": 2.5607361793518066, | |
| "learning_rate": 9.042399300163484e-05, | |
| "loss": 1.9781, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 2.5376696586608887, | |
| "learning_rate": 8.90386446625952e-05, | |
| "loss": 1.9771, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.5327131748199463, | |
| "eval_runtime": 18.1107, | |
| "eval_samples_per_second": 22.086, | |
| "eval_steps_per_second": 5.522, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 4.25, | |
| "grad_norm": 2.8353099822998047, | |
| "learning_rate": 8.757191174150532e-05, | |
| "loss": 1.8681, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "grad_norm": 2.8943753242492676, | |
| "learning_rate": 8.60268519635192e-05, | |
| "loss": 1.8682, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "grad_norm": 2.9507288932800293, | |
| "learning_rate": 8.440668634323305e-05, | |
| "loss": 1.8892, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 2.7886505126953125, | |
| "learning_rate": 8.271479246977678e-05, | |
| "loss": 1.8694, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 5.25, | |
| "grad_norm": 3.3199687004089355, | |
| "learning_rate": 8.095469746549172e-05, | |
| "loss": 1.7663, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 5.5, | |
| "grad_norm": 3.080099105834961, | |
| "learning_rate": 7.913007063287361e-05, | |
| "loss": 1.7902, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 5.75, | |
| "grad_norm": 2.8006722927093506, | |
| "learning_rate": 7.724471580511021e-05, | |
| "loss": 1.7655, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 3.0085508823394775, | |
| "learning_rate": 7.530256341615994e-05, | |
| "loss": 1.7722, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.5345460176467896, | |
| "eval_runtime": 18.3119, | |
| "eval_samples_per_second": 21.844, | |
| "eval_steps_per_second": 5.461, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 6.25, | |
| "grad_norm": 3.3692119121551514, | |
| "learning_rate": 7.33076623069039e-05, | |
| "loss": 1.6605, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 6.5, | |
| "grad_norm": 3.402806282043457, | |
| "learning_rate": 7.126417128445263e-05, | |
| "loss": 1.662, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 6.75, | |
| "grad_norm": 3.5842807292938232, | |
| "learning_rate": 6.917635045220425e-05, | |
| "loss": 1.6779, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 3.314953565597534, | |
| "learning_rate": 6.704855232872843e-05, | |
| "loss": 1.6842, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 7.25, | |
| "grad_norm": 4.195573329925537, | |
| "learning_rate": 6.488521277399067e-05, | |
| "loss": 1.5449, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "grad_norm": 3.56549334526062, | |
| "learning_rate": 6.26908417418333e-05, | |
| "loss": 1.564, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 7.75, | |
| "grad_norm": 3.6120212078094482, | |
| "learning_rate": 6.0470013877991525e-05, | |
| "loss": 1.5727, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 3.4173295497894287, | |
| "learning_rate": 5.8227358983245274e-05, | |
| "loss": 1.5821, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 0.5576403737068176, | |
| "eval_runtime": 18.1898, | |
| "eval_samples_per_second": 21.99, | |
| "eval_steps_per_second": 5.498, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 8.25, | |
| "grad_norm": 4.163740158081055, | |
| "learning_rate": 5.5967552361588e-05, | |
| "loss": 1.4497, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 8.5, | |
| "grad_norm": 4.105224132537842, | |
| "learning_rate": 5.3695305073534455e-05, | |
| "loss": 1.4493, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 8.75, | |
| "grad_norm": 4.076621055603027, | |
| "learning_rate": 5.141535411488584e-05, | |
| "loss": 1.4613, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 4.06991720199585, | |
| "learning_rate": 4.913245254142751e-05, | |
| "loss": 1.475, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 9.25, | |
| "grad_norm": 4.908267021179199, | |
| "learning_rate": 4.685135956014587e-05, | |
| "loss": 1.3275, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 9.5, | |
| "grad_norm": 4.249743938446045, | |
| "learning_rate": 4.4576830607621834e-05, | |
| "loss": 1.3592, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 9.75, | |
| "grad_norm": 4.3078932762146, | |
| "learning_rate": 4.231360743628464e-05, | |
| "loss": 1.3674, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 4.63353157043457, | |
| "learning_rate": 4.00664082291931e-05, | |
| "loss": 1.3652, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 0.6032015681266785, | |
| "eval_runtime": 18.102, | |
| "eval_samples_per_second": 22.097, | |
| "eval_steps_per_second": 5.524, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 10.25, | |
| "grad_norm": 4.48530912399292, | |
| "learning_rate": 3.78399177639524e-05, | |
| "loss": 1.247, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 10.5, | |
| "grad_norm": 4.907229423522949, | |
| "learning_rate": 3.563877764627195e-05, | |
| "loss": 1.2537, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 10.75, | |
| "grad_norm": 5.136667728424072, | |
| "learning_rate": 3.34675766335243e-05, | |
| "loss": 1.2556, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "grad_norm": 4.633574962615967, | |
| "learning_rate": 3.13308410684782e-05, | |
| "loss": 1.2624, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 11.25, | |
| "grad_norm": 4.862861156463623, | |
| "learning_rate": 2.9233025443148317e-05, | |
| "loss": 1.1407, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 11.5, | |
| "grad_norm": 5.060881614685059, | |
| "learning_rate": 2.7178503112433672e-05, | |
| "loss": 1.1681, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 11.75, | |
| "grad_norm": 5.76928186416626, | |
| "learning_rate": 2.517155717690404e-05, | |
| "loss": 1.1701, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "grad_norm": 5.6520094871521, | |
| "learning_rate": 2.3216371553741295e-05, | |
| "loss": 1.1729, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_loss": 0.6796687245368958, | |
| "eval_runtime": 18.1622, | |
| "eval_samples_per_second": 22.024, | |
| "eval_steps_per_second": 5.506, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 12.25, | |
| "grad_norm": 5.579859256744385, | |
| "learning_rate": 2.131702225445008e-05, | |
| "loss": 1.0688, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 12.5, | |
| "grad_norm": 5.693879127502441, | |
| "learning_rate": 1.9477468887521627e-05, | |
| "loss": 1.0746, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 12.75, | |
| "grad_norm": 5.642828941345215, | |
| "learning_rate": 1.770154640376479e-05, | |
| "loss": 1.0922, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "grad_norm": 5.785156726837158, | |
| "learning_rate": 1.5992957101513524e-05, | |
| "loss": 1.0954, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 13.25, | |
| "grad_norm": 5.720382213592529, | |
| "learning_rate": 1.4355262908377271e-05, | |
| "loss": 1.0076, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 13.5, | |
| "grad_norm": 5.837440490722656, | |
| "learning_rate": 1.2791877955624859e-05, | |
| "loss": 1.0198, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 13.75, | |
| "grad_norm": 5.974554061889648, | |
| "learning_rate": 1.1306061460682072e-05, | |
| "loss": 1.0281, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "grad_norm": 6.06301736831665, | |
| "learning_rate": 9.90091093258102e-06, | |
| "loss": 1.0109, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_loss": 0.7651693820953369, | |
| "eval_runtime": 18.0723, | |
| "eval_samples_per_second": 22.133, | |
| "eval_steps_per_second": 5.533, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 14.25, | |
| "grad_norm": 6.1065473556518555, | |
| "learning_rate": 8.579355714525994e-06, | |
| "loss": 0.959, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 14.5, | |
| "grad_norm": 5.750560283660889, | |
| "learning_rate": 7.3441508770376975e-06, | |
| "loss": 0.9667, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 14.75, | |
| "grad_norm": 6.242477893829346, | |
| "learning_rate": 6.197871474406936e-06, | |
| "loss": 0.9692, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "grad_norm": 6.384776592254639, | |
| "learning_rate": 5.142907176431455e-06, | |
| "loss": 0.9755, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 15.25, | |
| "grad_norm": 5.764701843261719, | |
| "learning_rate": 4.181457286627316e-06, | |
| "loss": 0.9159, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 15.5, | |
| "grad_norm": 5.83530855178833, | |
| "learning_rate": 3.3155261573003195e-06, | |
| "loss": 0.9422, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 15.75, | |
| "grad_norm": 6.114677429199219, | |
| "learning_rate": 2.5469190110357475e-06, | |
| "loss": 0.939, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "grad_norm": 6.1888628005981445, | |
| "learning_rate": 1.8772381773176417e-06, | |
| "loss": 0.9391, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_loss": 0.8231673240661621, | |
| "eval_runtime": 18.0722, | |
| "eval_samples_per_second": 22.133, | |
| "eval_steps_per_second": 5.533, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 16.25, | |
| "grad_norm": 6.108009338378906, | |
| "learning_rate": 1.307879752122948e-06, | |
| "loss": 0.907, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 16.5, | |
| "grad_norm": 6.366396903991699, | |
| "learning_rate": 8.40030687454535e-07, | |
| "loss": 0.9297, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 16.75, | |
| "grad_norm": 6.0306901931762695, | |
| "learning_rate": 4.746663168804566e-07, | |
| "loss": 0.9071, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "grad_norm": 6.2290215492248535, | |
| "learning_rate": 2.1254832223808196e-07, | |
| "loss": 0.9209, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 17.25, | |
| "grad_norm": 6.210936069488525, | |
| "learning_rate": 5.4223145741943983e-08, | |
| "loss": 0.913, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 17.5, | |
| "grad_norm": 6.788633823394775, | |
| "learning_rate": 2.08508055765666e-11, | |
| "loss": 0.9034, | |
| "step": 3500 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 3500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 18, | |
| "save_steps": 400, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.323066342987053e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |