| { | |
| "best_metric": 0.034771159291267395, | |
| "best_model_checkpoint": "/gs/fs/tgh-25IAK/ue02946/PLANT/Season_based_split_performance/final_full_250304_ver2_12__s20000_lr1.00E-04_wd0.01_rwd0.005_csew0_csea0_sw0.2_csewv0_swv0.2_cw0.05_rid256_dp0.05_eid64_edp0.1_lgw0.01_bs16_facebookesm2_t33_650M_UR50D/trained_until_full/results/checkpoint-16000", | |
| "epoch": 8.0, | |
| "eval_steps": 1000, | |
| "global_step": 16000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 1.430809736251831, | |
| "learning_rate": 1.2475e-05, | |
| "loss": 0.1385, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.6614887118339539, | |
| "learning_rate": 2.4975e-05, | |
| "loss": 0.0693, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "eval_loss": 0.05656217411160469, | |
| "eval_runtime": 175.6555, | |
| "eval_samples_per_second": 40.585, | |
| "eval_steps_per_second": 1.27, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.6037147641181946, | |
| "learning_rate": 3.7475e-05, | |
| "loss": 0.0612, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.4972088038921356, | |
| "learning_rate": 4.9975e-05, | |
| "loss": 0.0577, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.06209351494908333, | |
| "eval_runtime": 175.8577, | |
| "eval_samples_per_second": 40.538, | |
| "eval_steps_per_second": 1.268, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 1.0353070497512817, | |
| "learning_rate": 4.861388888888889e-05, | |
| "loss": 0.0511, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.8515058755874634, | |
| "learning_rate": 4.7225e-05, | |
| "loss": 0.0447, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "eval_loss": 0.050771959125995636, | |
| "eval_runtime": 175.8405, | |
| "eval_samples_per_second": 40.542, | |
| "eval_steps_per_second": 1.268, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "grad_norm": 0.5539710521697998, | |
| "learning_rate": 4.5836111111111115e-05, | |
| "loss": 0.0441, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.7269946336746216, | |
| "learning_rate": 4.444722222222222e-05, | |
| "loss": 0.0409, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.0459725446999073, | |
| "eval_runtime": 175.8069, | |
| "eval_samples_per_second": 40.55, | |
| "eval_steps_per_second": 1.268, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "grad_norm": 0.25292640924453735, | |
| "learning_rate": 4.3058333333333334e-05, | |
| "loss": 0.0385, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.2268402874469757, | |
| "learning_rate": 4.166944444444445e-05, | |
| "loss": 0.04, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "eval_loss": 0.04381516948342323, | |
| "eval_runtime": 175.8092, | |
| "eval_samples_per_second": 40.55, | |
| "eval_steps_per_second": 1.268, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "grad_norm": 0.2693457305431366, | |
| "learning_rate": 4.0283333333333334e-05, | |
| "loss": 0.0372, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.383963406085968, | |
| "learning_rate": 3.889444444444445e-05, | |
| "loss": 0.0359, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.04170890897512436, | |
| "eval_runtime": 175.8179, | |
| "eval_samples_per_second": 40.548, | |
| "eval_steps_per_second": 1.268, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "grad_norm": 0.341470867395401, | |
| "learning_rate": 3.750555555555555e-05, | |
| "loss": 0.0366, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "grad_norm": 0.3173449635505676, | |
| "learning_rate": 3.611666666666667e-05, | |
| "loss": 0.0342, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "eval_loss": 0.04057968407869339, | |
| "eval_runtime": 175.921, | |
| "eval_samples_per_second": 40.524, | |
| "eval_steps_per_second": 1.268, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "grad_norm": 0.17745588719844818, | |
| "learning_rate": 3.472777777777778e-05, | |
| "loss": 0.0334, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.737916886806488, | |
| "learning_rate": 3.333888888888889e-05, | |
| "loss": 0.0338, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.03727322071790695, | |
| "eval_runtime": 175.854, | |
| "eval_samples_per_second": 40.539, | |
| "eval_steps_per_second": 1.268, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 4.25, | |
| "grad_norm": 0.4044702351093292, | |
| "learning_rate": 3.1950000000000004e-05, | |
| "loss": 0.0315, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "grad_norm": 0.2551765441894531, | |
| "learning_rate": 3.056388888888889e-05, | |
| "loss": 0.0325, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "eval_loss": 0.03956814482808113, | |
| "eval_runtime": 175.8578, | |
| "eval_samples_per_second": 40.538, | |
| "eval_steps_per_second": 1.268, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "grad_norm": 0.4832671284675598, | |
| "learning_rate": 2.9175e-05, | |
| "loss": 0.0336, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.219984769821167, | |
| "learning_rate": 2.7786111111111114e-05, | |
| "loss": 0.0311, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.037063486874103546, | |
| "eval_runtime": 175.9259, | |
| "eval_samples_per_second": 40.523, | |
| "eval_steps_per_second": 1.268, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 5.25, | |
| "grad_norm": 0.1559722125530243, | |
| "learning_rate": 2.6397222222222223e-05, | |
| "loss": 0.0303, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 5.5, | |
| "grad_norm": 0.1364031732082367, | |
| "learning_rate": 2.5008333333333332e-05, | |
| "loss": 0.0299, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 5.5, | |
| "eval_loss": 0.037668175995349884, | |
| "eval_runtime": 176.0991, | |
| "eval_samples_per_second": 40.483, | |
| "eval_steps_per_second": 1.266, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 5.75, | |
| "grad_norm": 0.20991720259189606, | |
| "learning_rate": 2.3619444444444445e-05, | |
| "loss": 0.0308, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.29334667325019836, | |
| "learning_rate": 2.2230555555555558e-05, | |
| "loss": 0.0297, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.03977835178375244, | |
| "eval_runtime": 175.799, | |
| "eval_samples_per_second": 40.552, | |
| "eval_steps_per_second": 1.268, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 6.25, | |
| "grad_norm": 0.2716386616230011, | |
| "learning_rate": 2.0841666666666667e-05, | |
| "loss": 0.0297, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 6.5, | |
| "grad_norm": 0.30008479952812195, | |
| "learning_rate": 1.9455555555555558e-05, | |
| "loss": 0.0284, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 6.5, | |
| "eval_loss": 0.036556895822286606, | |
| "eval_runtime": 175.7757, | |
| "eval_samples_per_second": 40.557, | |
| "eval_steps_per_second": 1.269, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 6.75, | |
| "grad_norm": 0.30487436056137085, | |
| "learning_rate": 1.8066666666666668e-05, | |
| "loss": 0.0279, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 0.2959464490413666, | |
| "learning_rate": 1.6677777777777777e-05, | |
| "loss": 0.0288, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 0.03538262099027634, | |
| "eval_runtime": 175.7677, | |
| "eval_samples_per_second": 40.559, | |
| "eval_steps_per_second": 1.269, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 7.25, | |
| "grad_norm": 0.16555708646774292, | |
| "learning_rate": 1.528888888888889e-05, | |
| "loss": 0.0275, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "grad_norm": 0.1510380208492279, | |
| "learning_rate": 1.3900000000000002e-05, | |
| "loss": 0.0276, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "eval_loss": 0.03512444719672203, | |
| "eval_runtime": 175.7946, | |
| "eval_samples_per_second": 40.553, | |
| "eval_steps_per_second": 1.269, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 7.75, | |
| "grad_norm": 0.7063078880310059, | |
| "learning_rate": 1.2513888888888888e-05, | |
| "loss": 0.0281, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 0.14875133335590363, | |
| "learning_rate": 1.1125000000000001e-05, | |
| "loss": 0.0269, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 0.034771159291267395, | |
| "eval_runtime": 175.7842, | |
| "eval_samples_per_second": 40.555, | |
| "eval_steps_per_second": 1.269, | |
| "step": 16000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 20000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |