| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.9607843137254903, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.9411764705882354e-05, | |
| "loss": 1.2929, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 2.8823529411764707e-05, | |
| "loss": 1.3741, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 2.823529411764706e-05, | |
| "loss": 0.9937, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 2.764705882352941e-05, | |
| "loss": 0.7573, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 2.7058823529411766e-05, | |
| "loss": 1.4795, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 2.647058823529412e-05, | |
| "loss": 1.2939, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 2.5882352941176472e-05, | |
| "loss": 0.7816, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 2.5294117647058825e-05, | |
| "loss": 3.2073, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 2.4705882352941174e-05, | |
| "loss": 1.9018, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 2.411764705882353e-05, | |
| "loss": 1.7244, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 2.3529411764705884e-05, | |
| "loss": 0.3282, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 2.2941176470588233e-05, | |
| "loss": 0.7277, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 2.235294117647059e-05, | |
| "loss": 0.9595, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 2.1764705882352943e-05, | |
| "loss": 1.5087, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 2.1176470588235296e-05, | |
| "loss": 1.1413, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 2.058823529411765e-05, | |
| "loss": 1.7349, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 1.9999999999999998e-05, | |
| "loss": 1.2754, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 1.9411764705882355e-05, | |
| "loss": 0.4958, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 1.8823529411764708e-05, | |
| "loss": 1.2852, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 1.8235294117647057e-05, | |
| "loss": 2.063, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 1.7647058823529414e-05, | |
| "loss": 0.8745, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 1.7058823529411763e-05, | |
| "loss": 1.6477, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 1.647058823529412e-05, | |
| "loss": 1.4984, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 1.5882352941176473e-05, | |
| "loss": 2.1564, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 1.5294117647058822e-05, | |
| "loss": 1.5632, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 1.4705882352941177e-05, | |
| "loss": 1.0398, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 1.411764705882353e-05, | |
| "loss": 2.0072, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 1.3529411764705883e-05, | |
| "loss": 0.4885, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 1.2941176470588236e-05, | |
| "loss": 1.3477, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 1.2352941176470587e-05, | |
| "loss": 2.0612, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 1.1764705882352942e-05, | |
| "loss": 1.1766, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 1.1176470588235295e-05, | |
| "loss": 1.5938, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 1.0588235294117648e-05, | |
| "loss": 1.7661, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 9.999999999999999e-06, | |
| "loss": 2.1266, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 9.411764705882354e-06, | |
| "loss": 1.0121, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 8.823529411764707e-06, | |
| "loss": 2.1273, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 8.23529411764706e-06, | |
| "loss": 0.6538, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 7.647058823529411e-06, | |
| "loss": 3.7878, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 7.058823529411765e-06, | |
| "loss": 2.1304, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 6.470588235294118e-06, | |
| "loss": 0.6084, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 5.882352941176471e-06, | |
| "loss": 1.7258, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 5.294117647058824e-06, | |
| "loss": 1.0022, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 4.705882352941177e-06, | |
| "loss": 1.7453, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 4.11764705882353e-06, | |
| "loss": 2.1497, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 3.5294117647058825e-06, | |
| "loss": 1.6716, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 2.9411764705882355e-06, | |
| "loss": 1.6858, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 2.3529411764705885e-06, | |
| "loss": 1.6145, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 1.7647058823529412e-06, | |
| "loss": 2.4272, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 1.1764705882352942e-06, | |
| "loss": 1.1166, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 5.882352941176471e-07, | |
| "loss": 1.4319, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 510, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "total_flos": 0.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |