| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.986013986013986, | |
| "eval_steps": 36, | |
| "global_step": 142, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.013986013986013986, | |
| "eval_loss": 0.8843945860862732, | |
| "eval_runtime": 7.527, | |
| "eval_samples_per_second": 3.986, | |
| "eval_steps_per_second": 1.063, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.13986013986013987, | |
| "grad_norm": 0.30088093876838684, | |
| "learning_rate": 0.0002, | |
| "loss": 0.78, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.27972027972027974, | |
| "grad_norm": 0.16369399428367615, | |
| "learning_rate": 0.00019718115683235417, | |
| "loss": 0.1395, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.4195804195804196, | |
| "grad_norm": 0.08310074359178543, | |
| "learning_rate": 0.00018888354486549237, | |
| "loss": 0.0605, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.5034965034965035, | |
| "eval_loss": 0.02842050977051258, | |
| "eval_runtime": 7.5377, | |
| "eval_samples_per_second": 3.98, | |
| "eval_steps_per_second": 1.061, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.5594405594405595, | |
| "grad_norm": 0.12133278697729111, | |
| "learning_rate": 0.00017557495743542585, | |
| "loss": 0.0527, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.6993006993006993, | |
| "grad_norm": 0.11081533133983612, | |
| "learning_rate": 0.00015800569095711982, | |
| "loss": 0.0447, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.8391608391608392, | |
| "grad_norm": 0.07465693354606628, | |
| "learning_rate": 0.00013716624556603274, | |
| "loss": 0.0408, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9790209790209791, | |
| "grad_norm": 0.15174183249473572, | |
| "learning_rate": 0.00011423148382732853, | |
| "loss": 0.0526, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.006993006993007, | |
| "eval_loss": 0.024295032024383545, | |
| "eval_runtime": 7.5406, | |
| "eval_samples_per_second": 3.978, | |
| "eval_steps_per_second": 1.061, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 1.118881118881119, | |
| "grad_norm": 0.060762133449316025, | |
| "learning_rate": 9.049439566958175e-05, | |
| "loss": 0.047, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.2587412587412588, | |
| "grad_norm": 0.07754148542881012, | |
| "learning_rate": 6.729320366825784e-05, | |
| "loss": 0.0378, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.3986013986013985, | |
| "grad_norm": 0.11032623797655106, | |
| "learning_rate": 4.593591825444028e-05, | |
| "loss": 0.0301, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.5104895104895104, | |
| "eval_loss": 0.016936320811510086, | |
| "eval_runtime": 7.5862, | |
| "eval_samples_per_second": 3.955, | |
| "eval_steps_per_second": 1.055, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 1.5384615384615383, | |
| "grad_norm": 0.07133723795413971, | |
| "learning_rate": 2.7626596189492983e-05, | |
| "loss": 0.0311, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.6783216783216783, | |
| "grad_norm": 0.1356527954339981, | |
| "learning_rate": 1.339745962155613e-05, | |
| "loss": 0.037, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.8181818181818183, | |
| "grad_norm": 0.04838186502456665, | |
| "learning_rate": 4.050702638550275e-06, | |
| "loss": 0.0253, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.958041958041958, | |
| "grad_norm": 0.03206559643149376, | |
| "learning_rate": 1.1326608169920372e-07, | |
| "loss": 0.0299, | |
| "step": 140 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 142, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 71, | |
| "total_flos": 9.310350164675789e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |