| { | |
| "best_metric": 0.0006066389032639563, | |
| "best_model_checkpoint": "./results/checkpoint-1500", | |
| "epoch": 6.0, | |
| "eval_steps": 500, | |
| "global_step": 1500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 37104760.0, | |
| "learning_rate": 1.25e-05, | |
| "loss": 1229363.12, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 41486.3671875, | |
| "learning_rate": 2.5e-05, | |
| "loss": 42265.33, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 1.13579608296277e-06, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.8482, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.03436470404267311, | |
| "learning_rate": 5e-05, | |
| "loss": 1.9227, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.033596742898225784, | |
| "learning_rate": 4.891304347826087e-05, | |
| "loss": 0.0034, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.007859374396502972, | |
| "eval_runtime": 3.0831, | |
| "eval_samples_per_second": 324.344, | |
| "eval_steps_per_second": 20.434, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 0.00011137876572320238, | |
| "learning_rate": 4.782608695652174e-05, | |
| "loss": 0.0091, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 6.665806722594425e-05, | |
| "learning_rate": 4.673913043478261e-05, | |
| "loss": 0.0072, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 2.3853695893194526e-05, | |
| "learning_rate": 4.565217391304348e-05, | |
| "loss": 0.0296, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 8.297997555928305e-05, | |
| "learning_rate": 4.456521739130435e-05, | |
| "loss": 0.0156, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 3.4365089049970265e-06, | |
| "learning_rate": 4.347826086956522e-05, | |
| "loss": 0.02, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.020259028300642967, | |
| "eval_runtime": 3.0938, | |
| "eval_samples_per_second": 323.23, | |
| "eval_steps_per_second": 20.363, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "grad_norm": 0.08514408767223358, | |
| "learning_rate": 4.239130434782609e-05, | |
| "loss": 0.0085, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 6.856403222954555e-10, | |
| "learning_rate": 4.130434782608696e-05, | |
| "loss": 0.0055, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 0.06340835243463516, | |
| "learning_rate": 4.021739130434783e-05, | |
| "loss": 0.0004, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 1.4290629226851315e-12, | |
| "learning_rate": 3.91304347826087e-05, | |
| "loss": 0.0014, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 2.299859962033319e-14, | |
| "learning_rate": 3.804347826086957e-05, | |
| "loss": 0.0101, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.007910430431365967, | |
| "eval_runtime": 3.113, | |
| "eval_samples_per_second": 321.23, | |
| "eval_steps_per_second": 20.237, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "grad_norm": 3.298977389931679e-05, | |
| "learning_rate": 3.695652173913043e-05, | |
| "loss": 0.0, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "grad_norm": 5.847134164653142e-14, | |
| "learning_rate": 3.58695652173913e-05, | |
| "loss": 0.0, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "grad_norm": 4.566483909757002e-13, | |
| "learning_rate": 3.478260869565218e-05, | |
| "loss": 0.0033, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "grad_norm": 2.20342698753484e-07, | |
| "learning_rate": 3.369565217391305e-05, | |
| "loss": 0.0, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 3.727204145320684e-08, | |
| "learning_rate": 3.260869565217392e-05, | |
| "loss": 0.0056, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 2.9520909786224365, | |
| "eval_runtime": 3.0518, | |
| "eval_samples_per_second": 327.672, | |
| "eval_steps_per_second": 20.643, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 4.2, | |
| "grad_norm": 2.1178482578765312e-11, | |
| "learning_rate": 3.152173913043479e-05, | |
| "loss": 0.0, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 4.4, | |
| "grad_norm": 8.027289004530758e-05, | |
| "learning_rate": 3.0434782608695656e-05, | |
| "loss": 0.0163, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 4.6, | |
| "grad_norm": 0.0030259895138442516, | |
| "learning_rate": 2.9347826086956526e-05, | |
| "loss": 0.008, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 4.8, | |
| "grad_norm": 1.1572946106205156e-13, | |
| "learning_rate": 2.826086956521739e-05, | |
| "loss": 0.0, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 1.2053489716751242e-12, | |
| "learning_rate": 2.7173913043478262e-05, | |
| "loss": 0.0015, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 3.6528851985931396, | |
| "eval_runtime": 3.0957, | |
| "eval_samples_per_second": 323.027, | |
| "eval_steps_per_second": 20.351, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 5.2, | |
| "grad_norm": 1.2785525947429238e-13, | |
| "learning_rate": 2.608695652173913e-05, | |
| "loss": 0.0022, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 5.4, | |
| "grad_norm": 3.6872038083370084e-13, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.0, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 5.6, | |
| "grad_norm": 6.94073501472138e-14, | |
| "learning_rate": 2.391304347826087e-05, | |
| "loss": 0.0, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 5.8, | |
| "grad_norm": 4.947420945144998e-12, | |
| "learning_rate": 2.282608695652174e-05, | |
| "loss": 0.0, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 9.953177876071556e-14, | |
| "learning_rate": 2.173913043478261e-05, | |
| "loss": 0.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.0006066389032639563, | |
| "eval_runtime": 3.111, | |
| "eval_samples_per_second": 321.436, | |
| "eval_steps_per_second": 20.25, | |
| "step": 1500 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 376251125760000.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |