| { | |
| "best_global_step": 14356, | |
| "best_metric": 0.8855625217694183, | |
| "best_model_checkpoint": "./results/checkpoint-14356", | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 14356, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06965728615213151, | |
| "grad_norm": 13.080974578857422, | |
| "learning_rate": 4.633240482822655e-06, | |
| "loss": 6.0849443359375, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.13931457230426303, | |
| "grad_norm": 13.586039543151855, | |
| "learning_rate": 9.275766016713092e-06, | |
| "loss": 2.256979736328125, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.20897185845639454, | |
| "grad_norm": 9.891500473022461, | |
| "learning_rate": 1.3918291550603529e-05, | |
| "loss": 0.8005918579101563, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.27862914460852606, | |
| "grad_norm": 11.163674354553223, | |
| "learning_rate": 1.8560817084493966e-05, | |
| "loss": 0.5500891723632813, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.3482864307606576, | |
| "grad_norm": 9.168044090270996, | |
| "learning_rate": 1.9643962848297215e-05, | |
| "loss": 0.4499309387207031, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.4179437169127891, | |
| "grad_norm": 6.460792064666748, | |
| "learning_rate": 1.9127966976264192e-05, | |
| "loss": 0.39355935668945313, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.4876010030649206, | |
| "grad_norm": 6.611826419830322, | |
| "learning_rate": 1.8611971104231166e-05, | |
| "loss": 0.3529027404785156, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.5572582892170521, | |
| "grad_norm": 4.68829870223999, | |
| "learning_rate": 1.8095975232198144e-05, | |
| "loss": 0.31053646850585936, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.6269155753691836, | |
| "grad_norm": 6.321001052856445, | |
| "learning_rate": 1.757997936016512e-05, | |
| "loss": 0.2609405517578125, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.6965728615213151, | |
| "grad_norm": 4.9213080406188965, | |
| "learning_rate": 1.7063983488132095e-05, | |
| "loss": 0.24563502502441406, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.7662301476734467, | |
| "grad_norm": 4.064526557922363, | |
| "learning_rate": 1.6547987616099073e-05, | |
| "loss": 0.24585693359375, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.8358874338255782, | |
| "grad_norm": 3.594541549682617, | |
| "learning_rate": 1.603199174406605e-05, | |
| "loss": 0.22096328735351561, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.9055447199777097, | |
| "grad_norm": 3.2703857421875, | |
| "learning_rate": 1.5515995872033025e-05, | |
| "loss": 0.21973855590820313, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.9752020061298412, | |
| "grad_norm": 5.636610984802246, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 0.20794932556152343, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_exact_match_accuracy": 0.864158829676071, | |
| "eval_loss": 0.1834629476070404, | |
| "eval_runtime": 144.9347, | |
| "eval_samples_per_second": 396.179, | |
| "eval_steps_per_second": 12.385, | |
| "step": 7178 | |
| }, | |
| { | |
| "epoch": 1.0448592922819726, | |
| "grad_norm": 2.9119057655334473, | |
| "learning_rate": 1.4484004127966978e-05, | |
| "loss": 0.1765029296875, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.1145165784341042, | |
| "grad_norm": 1.8465490341186523, | |
| "learning_rate": 1.3968008255933954e-05, | |
| "loss": 0.1671279602050781, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.1841738645862356, | |
| "grad_norm": 2.925508975982666, | |
| "learning_rate": 1.345201238390093e-05, | |
| "loss": 0.16174620056152345, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.2538311507383673, | |
| "grad_norm": 4.677269458770752, | |
| "learning_rate": 1.2936016511867905e-05, | |
| "loss": 0.15153741455078126, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.3234884368904987, | |
| "grad_norm": 3.001206398010254, | |
| "learning_rate": 1.2420020639834883e-05, | |
| "loss": 0.15099703979492188, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.3931457230426303, | |
| "grad_norm": 2.6815640926361084, | |
| "learning_rate": 1.1904024767801859e-05, | |
| "loss": 0.15732374572753907, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.4628030091947617, | |
| "grad_norm": 6.781091690063477, | |
| "learning_rate": 1.1388028895768834e-05, | |
| "loss": 0.15630357360839844, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.5324602953468933, | |
| "grad_norm": 1.550376296043396, | |
| "learning_rate": 1.087203302373581e-05, | |
| "loss": 0.14910353088378905, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.6021175814990247, | |
| "grad_norm": 2.9026455879211426, | |
| "learning_rate": 1.0356037151702788e-05, | |
| "loss": 0.1490457763671875, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.6717748676511563, | |
| "grad_norm": 2.2712409496307373, | |
| "learning_rate": 9.840041279669764e-06, | |
| "loss": 0.13655984497070311, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.741432153803288, | |
| "grad_norm": 2.2995333671569824, | |
| "learning_rate": 9.32404540763674e-06, | |
| "loss": 0.14603111267089844, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.8110894399554194, | |
| "grad_norm": 3.2355263233184814, | |
| "learning_rate": 8.808049535603715e-06, | |
| "loss": 0.14294992065429687, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.8807467261075508, | |
| "grad_norm": 3.406935214996338, | |
| "learning_rate": 8.292053663570691e-06, | |
| "loss": 0.13857838439941406, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.9504040122596824, | |
| "grad_norm": 5.436853408813477, | |
| "learning_rate": 7.776057791537668e-06, | |
| "loss": 0.1319345703125, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_exact_match_accuracy": 0.8855625217694183, | |
| "eval_loss": 0.14376914501190186, | |
| "eval_runtime": 142.2968, | |
| "eval_samples_per_second": 403.523, | |
| "eval_steps_per_second": 12.614, | |
| "step": 14356 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 21534, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 1, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.956498782735565e+16, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |