| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.987146529562982, | |
| "eval_steps": 500, | |
| "global_step": 24, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04113110539845758, | |
| "grad_norm": 11.870390470134398, | |
| "learning_rate": 0.0, | |
| "loss": 1.9565, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.08226221079691516, | |
| "grad_norm": 12.009526817812961, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 1.9718, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.12339331619537275, | |
| "grad_norm": 10.36223708009221, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 1.9117, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.16452442159383032, | |
| "grad_norm": 6.001358940559066, | |
| "learning_rate": 1e-05, | |
| "loss": 1.6466, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.20565552699228792, | |
| "grad_norm": 5.288343588799703, | |
| "learning_rate": 9.944154131125643e-06, | |
| "loss": 1.592, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.2467866323907455, | |
| "grad_norm": 4.932448230700685, | |
| "learning_rate": 9.777864028930705e-06, | |
| "loss": 1.4903, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.2879177377892031, | |
| "grad_norm": 8.254123753090488, | |
| "learning_rate": 9.504844339512096e-06, | |
| "loss": 1.4336, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.32904884318766064, | |
| "grad_norm": 5.5899004421905865, | |
| "learning_rate": 9.131193871579975e-06, | |
| "loss": 1.3946, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.37017994858611825, | |
| "grad_norm": 4.385399717154605, | |
| "learning_rate": 8.665259359149132e-06, | |
| "loss": 1.3505, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.41131105398457585, | |
| "grad_norm": 3.4785474789966218, | |
| "learning_rate": 8.117449009293668e-06, | |
| "loss": 1.2975, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.4524421593830334, | |
| "grad_norm": 3.678459810428862, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 1.292, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.493573264781491, | |
| "grad_norm": 3.2622487154200623, | |
| "learning_rate": 6.8267051218319766e-06, | |
| "loss": 1.2831, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.5347043701799485, | |
| "grad_norm": 3.256780117492594, | |
| "learning_rate": 6.112604669781572e-06, | |
| "loss": 1.2655, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.5758354755784062, | |
| "grad_norm": 3.0556247184293808, | |
| "learning_rate": 5.373650467932122e-06, | |
| "loss": 1.2249, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.6169665809768637, | |
| "grad_norm": 2.565564759239293, | |
| "learning_rate": 4.626349532067879e-06, | |
| "loss": 1.216, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.6580976863753213, | |
| "grad_norm": 2.551619545114463, | |
| "learning_rate": 3.887395330218429e-06, | |
| "loss": 1.2203, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.699228791773779, | |
| "grad_norm": 2.1687201894188677, | |
| "learning_rate": 3.173294878168025e-06, | |
| "loss": 1.1852, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.7403598971722365, | |
| "grad_norm": 2.362660106641768, | |
| "learning_rate": 2.5000000000000015e-06, | |
| "loss": 1.2141, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.781491002570694, | |
| "grad_norm": 2.4076108934592817, | |
| "learning_rate": 1.8825509907063328e-06, | |
| "loss": 1.1936, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.8226221079691517, | |
| "grad_norm": 2.1775779534988646, | |
| "learning_rate": 1.3347406408508695e-06, | |
| "loss": 1.203, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.8637532133676092, | |
| "grad_norm": 2.33920988457465, | |
| "learning_rate": 8.688061284200266e-07, | |
| "loss": 1.1874, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.9048843187660668, | |
| "grad_norm": 2.276990062852866, | |
| "learning_rate": 4.951556604879049e-07, | |
| "loss": 1.179, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.9460154241645244, | |
| "grad_norm": 2.143656166201497, | |
| "learning_rate": 2.2213597106929608e-07, | |
| "loss": 1.2445, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.987146529562982, | |
| "grad_norm": 1.9960258314108954, | |
| "learning_rate": 5.584586887435739e-08, | |
| "loss": 1.1723, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.987146529562982, | |
| "step": 24, | |
| "total_flos": 2429732487168.0, | |
| "train_loss": 1.380241756637891, | |
| "train_runtime": 362.534, | |
| "train_samples_per_second": 2.146, | |
| "train_steps_per_second": 0.066 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 24, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2429732487168.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |