| { | |
| "best_metric": 3.6935930252075195, | |
| "best_model_checkpoint": "models/GPT2_bigram_function_53/checkpoint-64200", | |
| "epoch": 10.0, | |
| "global_step": 64200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 1e-05, | |
| "loss": 7.7918, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 2e-05, | |
| "loss": 6.0964, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 3e-05, | |
| "loss": 5.6317, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 4e-05, | |
| "loss": 5.3254, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 5e-05, | |
| "loss": 5.0882, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 6e-05, | |
| "loss": 4.8992, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.2563830284573207, | |
| "eval_loss": 4.684994697570801, | |
| "eval_runtime": 1.9822, | |
| "eval_samples_per_second": 594.779, | |
| "eval_steps_per_second": 5.045, | |
| "step": 6420 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 7e-05, | |
| "loss": 4.7319, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 8e-05, | |
| "loss": 4.6025, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 9e-05, | |
| "loss": 4.5032, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 0.0001, | |
| "loss": 4.4224, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 9.81568265682657e-05, | |
| "loss": 4.3523, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 9.631180811808118e-05, | |
| "loss": 4.291, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.29881188515557694, | |
| "eval_loss": 4.1665873527526855, | |
| "eval_runtime": 1.9737, | |
| "eval_samples_per_second": 597.341, | |
| "eval_steps_per_second": 5.067, | |
| "step": 12840 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "learning_rate": 9.446863468634687e-05, | |
| "loss": 4.2354, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "learning_rate": 9.262361623616237e-05, | |
| "loss": 4.1626, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "learning_rate": 9.078044280442805e-05, | |
| "loss": 4.133, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "learning_rate": 8.893542435424355e-05, | |
| "loss": 4.1083, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "learning_rate": 8.709225092250924e-05, | |
| "loss": 4.0822, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 8.524723247232472e-05, | |
| "loss": 4.059, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "learning_rate": 8.340405904059041e-05, | |
| "loss": 4.0377, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.3204236874970781, | |
| "eval_loss": 3.9776782989501953, | |
| "eval_runtime": 1.993, | |
| "eval_samples_per_second": 591.557, | |
| "eval_steps_per_second": 5.017, | |
| "step": 19260 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "learning_rate": 8.155904059040591e-05, | |
| "loss": 3.9745, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 3.27, | |
| "learning_rate": 7.971586715867159e-05, | |
| "loss": 3.9534, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "learning_rate": 7.787084870848709e-05, | |
| "loss": 3.9422, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 3.58, | |
| "learning_rate": 7.602767527675278e-05, | |
| "loss": 3.9321, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 3.74, | |
| "learning_rate": 7.418265682656826e-05, | |
| "loss": 3.9216, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 3.89, | |
| "learning_rate": 7.233948339483395e-05, | |
| "loss": 3.9117, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.3324851569126378, | |
| "eval_loss": 3.8764491081237793, | |
| "eval_runtime": 1.9527, | |
| "eval_samples_per_second": 603.791, | |
| "eval_steps_per_second": 5.121, | |
| "step": 25680 | |
| }, | |
| { | |
| "epoch": 4.05, | |
| "learning_rate": 7.049446494464945e-05, | |
| "loss": 3.8809, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 4.21, | |
| "learning_rate": 6.865129151291513e-05, | |
| "loss": 3.8325, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 4.36, | |
| "learning_rate": 6.680627306273063e-05, | |
| "loss": 3.8333, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 4.52, | |
| "learning_rate": 6.496309963099632e-05, | |
| "loss": 3.8293, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 4.67, | |
| "learning_rate": 6.311808118081182e-05, | |
| "loss": 3.8267, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 4.83, | |
| "learning_rate": 6.12749077490775e-05, | |
| "loss": 3.8214, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 4.98, | |
| "learning_rate": 5.942988929889299e-05, | |
| "loss": 3.813, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.34075320737579556, | |
| "eval_loss": 3.8145906925201416, | |
| "eval_runtime": 1.9668, | |
| "eval_samples_per_second": 599.436, | |
| "eval_steps_per_second": 5.084, | |
| "step": 32100 | |
| }, | |
| { | |
| "epoch": 5.14, | |
| "learning_rate": 5.758671586715867e-05, | |
| "loss": 3.7499, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 5.3, | |
| "learning_rate": 5.574169741697417e-05, | |
| "loss": 3.7499, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 5.45, | |
| "learning_rate": 5.389852398523986e-05, | |
| "loss": 3.7502, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 5.61, | |
| "learning_rate": 5.205350553505536e-05, | |
| "loss": 3.7521, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 5.76, | |
| "learning_rate": 5.0210332103321035e-05, | |
| "loss": 3.7475, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 5.92, | |
| "learning_rate": 4.836531365313653e-05, | |
| "loss": 3.7448, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.3462897290510442, | |
| "eval_loss": 3.770218849182129, | |
| "eval_runtime": 1.9633, | |
| "eval_samples_per_second": 600.533, | |
| "eval_steps_per_second": 5.094, | |
| "step": 38520 | |
| }, | |
| { | |
| "epoch": 6.07, | |
| "learning_rate": 4.652214022140222e-05, | |
| "loss": 3.709, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 6.23, | |
| "learning_rate": 4.4677121771217715e-05, | |
| "loss": 3.6814, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 6.39, | |
| "learning_rate": 4.283210332103321e-05, | |
| "loss": 3.6846, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 6.54, | |
| "learning_rate": 4.098892988929889e-05, | |
| "loss": 3.6875, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 6.7, | |
| "learning_rate": 3.9143911439114395e-05, | |
| "loss": 3.6871, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 6.85, | |
| "learning_rate": 3.7300738007380073e-05, | |
| "loss": 3.6872, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.3503569687376864, | |
| "eval_loss": 3.7413458824157715, | |
| "eval_runtime": 2.125, | |
| "eval_samples_per_second": 554.816, | |
| "eval_steps_per_second": 4.706, | |
| "step": 44940 | |
| }, | |
| { | |
| "epoch": 7.01, | |
| "learning_rate": 3.545571955719557e-05, | |
| "loss": 3.6805, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 7.17, | |
| "learning_rate": 3.3612546125461256e-05, | |
| "loss": 3.624, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 7.32, | |
| "learning_rate": 3.1767527675276754e-05, | |
| "loss": 3.6295, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 7.48, | |
| "learning_rate": 2.9924354243542435e-05, | |
| "loss": 3.6334, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 7.63, | |
| "learning_rate": 2.8079335793357936e-05, | |
| "loss": 3.634, | |
| "step": 49000 | |
| }, | |
| { | |
| "epoch": 7.79, | |
| "learning_rate": 2.6236162361623618e-05, | |
| "loss": 3.6342, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 7.94, | |
| "learning_rate": 2.4391143911439115e-05, | |
| "loss": 3.634, | |
| "step": 51000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.3536962459845191, | |
| "eval_loss": 3.716168165206909, | |
| "eval_runtime": 1.9595, | |
| "eval_samples_per_second": 601.696, | |
| "eval_steps_per_second": 5.103, | |
| "step": 51360 | |
| }, | |
| { | |
| "epoch": 8.1, | |
| "learning_rate": 2.2547970479704797e-05, | |
| "loss": 3.5964, | |
| "step": 52000 | |
| }, | |
| { | |
| "epoch": 8.26, | |
| "learning_rate": 2.0702952029520294e-05, | |
| "loss": 3.5828, | |
| "step": 53000 | |
| }, | |
| { | |
| "epoch": 8.41, | |
| "learning_rate": 1.885977859778598e-05, | |
| "loss": 3.5856, | |
| "step": 54000 | |
| }, | |
| { | |
| "epoch": 8.57, | |
| "learning_rate": 1.7014760147601477e-05, | |
| "loss": 3.5886, | |
| "step": 55000 | |
| }, | |
| { | |
| "epoch": 8.72, | |
| "learning_rate": 1.5171586715867158e-05, | |
| "loss": 3.5875, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 8.88, | |
| "learning_rate": 1.3326568265682656e-05, | |
| "loss": 3.5866, | |
| "step": 57000 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.3563008822370486, | |
| "eval_loss": 3.7014899253845215, | |
| "eval_runtime": 1.9556, | |
| "eval_samples_per_second": 602.893, | |
| "eval_steps_per_second": 5.114, | |
| "step": 57780 | |
| }, | |
| { | |
| "epoch": 9.03, | |
| "learning_rate": 1.1481549815498155e-05, | |
| "loss": 3.5754, | |
| "step": 58000 | |
| }, | |
| { | |
| "epoch": 9.19, | |
| "learning_rate": 9.638376383763839e-06, | |
| "loss": 3.546, | |
| "step": 59000 | |
| }, | |
| { | |
| "epoch": 9.35, | |
| "learning_rate": 7.793357933579336e-06, | |
| "loss": 3.5483, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 9.5, | |
| "learning_rate": 5.9501845018450185e-06, | |
| "loss": 3.5489, | |
| "step": 61000 | |
| }, | |
| { | |
| "epoch": 9.66, | |
| "learning_rate": 4.105166051660517e-06, | |
| "loss": 3.5492, | |
| "step": 62000 | |
| }, | |
| { | |
| "epoch": 9.81, | |
| "learning_rate": 2.2619926199261997e-06, | |
| "loss": 3.5458, | |
| "step": 63000 | |
| }, | |
| { | |
| "epoch": 9.97, | |
| "learning_rate": 4.1697416974169745e-07, | |
| "loss": 3.5458, | |
| "step": 64000 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.35793712808799666, | |
| "eval_loss": 3.6935930252075195, | |
| "eval_runtime": 1.9737, | |
| "eval_samples_per_second": 597.355, | |
| "eval_steps_per_second": 5.067, | |
| "step": 64200 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 64200, | |
| "total_flos": 5.367983505408e+17, | |
| "train_loss": 4.02217033124787, | |
| "train_runtime": 28143.482, | |
| "train_samples_per_second": 291.989, | |
| "train_steps_per_second": 2.281 | |
| } | |
| ], | |
| "max_steps": 64200, | |
| "num_train_epochs": 10, | |
| "total_flos": 5.367983505408e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |