| { | |
| "best_metric": 0.41339462995529175, | |
| "best_model_checkpoint": "./results/checkpoint-254", | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 254, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 3.2418267726898193, | |
| "learning_rate": 4.0000000000000003e-07, | |
| "loss": 0.6987, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 3.6446964740753174, | |
| "learning_rate": 8.000000000000001e-07, | |
| "loss": 0.7005, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 2.4721481800079346, | |
| "learning_rate": 1.2000000000000002e-06, | |
| "loss": 0.6916, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 1.413644790649414, | |
| "learning_rate": 1.6000000000000001e-06, | |
| "loss": 0.6915, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 3.0073206424713135, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 0.7069, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "grad_norm": 1.9428147077560425, | |
| "learning_rate": 2.4000000000000003e-06, | |
| "loss": 0.6953, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 9.055305480957031, | |
| "learning_rate": 2.8000000000000003e-06, | |
| "loss": 0.7061, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 1.2461938858032227, | |
| "learning_rate": 3.2000000000000003e-06, | |
| "loss": 0.7006, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "grad_norm": 6.694257736206055, | |
| "learning_rate": 3.6000000000000003e-06, | |
| "loss": 0.6883, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "grad_norm": 2.026824474334717, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.6936, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "grad_norm": 6.7909746170043945, | |
| "learning_rate": 4.4e-06, | |
| "loss": 0.6735, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "grad_norm": 4.953695774078369, | |
| "learning_rate": 4.800000000000001e-06, | |
| "loss": 0.6808, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.6461184620857239, | |
| "eval_runtime": 4.1065, | |
| "eval_samples_per_second": 54.791, | |
| "eval_steps_per_second": 3.653, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "grad_norm": 7.156314849853516, | |
| "learning_rate": 5.2e-06, | |
| "loss": 0.669, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 11.781147003173828, | |
| "learning_rate": 5.600000000000001e-06, | |
| "loss": 0.6497, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "grad_norm": 3.607640266418457, | |
| "learning_rate": 6e-06, | |
| "loss": 0.6412, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "grad_norm": 11.088181495666504, | |
| "learning_rate": 6.4000000000000006e-06, | |
| "loss": 0.6112, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "grad_norm": 3.7018752098083496, | |
| "learning_rate": 6.800000000000001e-06, | |
| "loss": 0.5593, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "grad_norm": 6.569700717926025, | |
| "learning_rate": 7.2000000000000005e-06, | |
| "loss": 0.4839, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 11.416254997253418, | |
| "learning_rate": 7.600000000000001e-06, | |
| "loss": 0.5134, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "grad_norm": 11.640515327453613, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.5538, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "grad_norm": 3.7958412170410156, | |
| "learning_rate": 8.400000000000001e-06, | |
| "loss": 0.356, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "grad_norm": 9.580900192260742, | |
| "learning_rate": 8.8e-06, | |
| "loss": 0.413, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "grad_norm": 4.285351276397705, | |
| "learning_rate": 9.200000000000002e-06, | |
| "loss": 0.3792, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "grad_norm": 16.688804626464844, | |
| "learning_rate": 9.600000000000001e-06, | |
| "loss": 0.368, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "grad_norm": 28.58218765258789, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1859, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.41339462995529175, | |
| "eval_runtime": 4.1035, | |
| "eval_samples_per_second": 54.832, | |
| "eval_steps_per_second": 3.655, | |
| "step": 254 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 762, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 500, | |
| "total_flos": 1064021107875840.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |