| { | |
| "best_global_step": 2000, | |
| "best_metric": 0.3400862027176887, | |
| "best_model_checkpoint": "models/test-finetuned-sw-en/checkpoint-2000", | |
| "epoch": 0.350385423966363, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.008759635599159075, | |
| "grad_norm": 100.36164855957031, | |
| "learning_rate": 2.5744308231173378e-06, | |
| "loss": 14.1521, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.01751927119831815, | |
| "grad_norm": 11.487061500549316, | |
| "learning_rate": 5.201401050788091e-06, | |
| "loss": 5.8065, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.026278906797477224, | |
| "grad_norm": 10.166085243225098, | |
| "learning_rate": 7.828371278458844e-06, | |
| "loss": 4.411, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.0350385423966363, | |
| "grad_norm": 9.804460525512695, | |
| "learning_rate": 1.0455341506129598e-05, | |
| "loss": 4.1303, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.04379817799579538, | |
| "grad_norm": 8.944438934326172, | |
| "learning_rate": 1.308231173380035e-05, | |
| "loss": 4.0012, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.05255781359495445, | |
| "grad_norm": 9.135123252868652, | |
| "learning_rate": 1.5709281961471103e-05, | |
| "loss": 3.8622, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.06131744919411353, | |
| "grad_norm": 8.474159240722656, | |
| "learning_rate": 1.8336252189141855e-05, | |
| "loss": 3.749, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.0700770847932726, | |
| "grad_norm": 8.502720832824707, | |
| "learning_rate": 2.096322241681261e-05, | |
| "loss": 3.6698, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.07883672039243167, | |
| "grad_norm": 8.237638473510742, | |
| "learning_rate": 2.3590192644483363e-05, | |
| "loss": 3.5605, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.08759635599159075, | |
| "grad_norm": 8.184097290039062, | |
| "learning_rate": 2.6217162872154118e-05, | |
| "loss": 3.4528, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.08759635599159075, | |
| "eval_bleu": 0.18452195716495454, | |
| "eval_chrf": 37.579073815894574, | |
| "eval_loss": 3.3051514625549316, | |
| "eval_model_preparation_time": 0.0024, | |
| "eval_runtime": 312.3723, | |
| "eval_samples_per_second": 68.79, | |
| "eval_steps_per_second": 2.151, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.09635599159074983, | |
| "grad_norm": 8.083911895751953, | |
| "learning_rate": 2.884413309982487e-05, | |
| "loss": 3.4183, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.1051156271899089, | |
| "grad_norm": 7.500244617462158, | |
| "learning_rate": 2.9997800888103602e-05, | |
| "loss": 3.3458, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.11387526278906797, | |
| "grad_norm": 8.195356369018555, | |
| "learning_rate": 2.9982937261717248e-05, | |
| "loss": 3.3022, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.12263489838822705, | |
| "grad_norm": 7.267704010009766, | |
| "learning_rate": 2.9954065389845778e-05, | |
| "loss": 3.2882, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.13139453398738613, | |
| "grad_norm": 8.064687728881836, | |
| "learning_rate": 2.9911212266146163e-05, | |
| "loss": 3.1957, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.1401541695865452, | |
| "grad_norm": 7.6160688400268555, | |
| "learning_rate": 2.985441795599852e-05, | |
| "loss": 3.2081, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.14891380518570427, | |
| "grad_norm": 8.32299518585205, | |
| "learning_rate": 2.978373555904712e-05, | |
| "loss": 3.117, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.15767344078486334, | |
| "grad_norm": 7.199745178222656, | |
| "learning_rate": 2.9699231159555054e-05, | |
| "loss": 3.1089, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.16643307638402244, | |
| "grad_norm": 7.270870685577393, | |
| "learning_rate": 2.9600983764618996e-05, | |
| "loss": 3.0805, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.1751927119831815, | |
| "grad_norm": 7.554959297180176, | |
| "learning_rate": 2.9489085230301778e-05, | |
| "loss": 3.0778, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.1751927119831815, | |
| "eval_bleu": 0.2956498402636781, | |
| "eval_chrf": 48.85756661559643, | |
| "eval_loss": 2.9274492263793945, | |
| "eval_model_preparation_time": 0.0024, | |
| "eval_runtime": 292.5539, | |
| "eval_samples_per_second": 73.45, | |
| "eval_steps_per_second": 2.297, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.18395234758234058, | |
| "grad_norm": 8.069275856018066, | |
| "learning_rate": 2.9363640175751887e-05, | |
| "loss": 3.0401, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.19271198318149965, | |
| "grad_norm": 7.83376932144165, | |
| "learning_rate": 2.922476588539015e-05, | |
| "loss": 3.0312, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.20147161878065872, | |
| "grad_norm": 7.48977518081665, | |
| "learning_rate": 2.9072592199255066e-05, | |
| "loss": 2.993, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.2102312543798178, | |
| "grad_norm": 7.3794779777526855, | |
| "learning_rate": 2.8907261391609325e-05, | |
| "loss": 3.0023, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.21899088997897687, | |
| "grad_norm": 7.881261825561523, | |
| "learning_rate": 2.8728928037920966e-05, | |
| "loss": 2.9784, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.22775052557813594, | |
| "grad_norm": 6.710996150970459, | |
| "learning_rate": 2.853775887034356e-05, | |
| "loss": 2.9662, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.23651016117729504, | |
| "grad_norm": 7.729732990264893, | |
| "learning_rate": 2.8333932621830594e-05, | |
| "loss": 2.972, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.2452697967764541, | |
| "grad_norm": 6.742512226104736, | |
| "learning_rate": 2.8117639859029685e-05, | |
| "loss": 2.9373, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.25402943237561315, | |
| "grad_norm": 6.744399547576904, | |
| "learning_rate": 2.7889082804112972e-05, | |
| "loss": 2.9438, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.26278906797477225, | |
| "grad_norm": 6.606257438659668, | |
| "learning_rate": 2.764847514571017e-05, | |
| "loss": 2.9562, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.26278906797477225, | |
| "eval_bleu": 0.32395940047060856, | |
| "eval_chrf": 51.97366756294741, | |
| "eval_loss": 2.8271327018737793, | |
| "eval_model_preparation_time": 0.0024, | |
| "eval_runtime": 299.5193, | |
| "eval_samples_per_second": 71.742, | |
| "eval_steps_per_second": 2.244, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.27154870357393135, | |
| "grad_norm": 7.149145603179932, | |
| "learning_rate": 2.7396041839121136e-05, | |
| "loss": 2.9199, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.2803083391730904, | |
| "grad_norm": 7.462961673736572, | |
| "learning_rate": 2.7132018895994697e-05, | |
| "loss": 2.9294, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.2890679747722495, | |
| "grad_norm": 6.644341468811035, | |
| "learning_rate": 2.685665316367035e-05, | |
| "loss": 2.9246, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.29782761037140854, | |
| "grad_norm": 6.9484429359436035, | |
| "learning_rate": 2.6570202094389226e-05, | |
| "loss": 2.9154, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.30658724597056763, | |
| "grad_norm": 6.633726119995117, | |
| "learning_rate": 2.6272933504589965e-05, | |
| "loss": 2.8688, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.3153468815697267, | |
| "grad_norm": 7.676279544830322, | |
| "learning_rate": 2.5965125324514702e-05, | |
| "loss": 2.9137, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.3241065171688858, | |
| "grad_norm": 6.953280448913574, | |
| "learning_rate": 2.564706533835911e-05, | |
| "loss": 2.8974, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.3328661527680449, | |
| "grad_norm": 7.126858711242676, | |
| "learning_rate": 2.5319050915209592e-05, | |
| "loss": 2.9095, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.3416257883672039, | |
| "grad_norm": 6.376485824584961, | |
| "learning_rate": 2.498138873101906e-05, | |
| "loss": 2.8332, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.350385423966363, | |
| "grad_norm": 7.114526748657227, | |
| "learning_rate": 2.4634394481881312e-05, | |
| "loss": 2.8781, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.350385423966363, | |
| "eval_bleu": 0.3400862027176887, | |
| "eval_chrf": 53.63135686349859, | |
| "eval_loss": 2.7728657722473145, | |
| "eval_model_preparation_time": 0.0024, | |
| "eval_runtime": 459.505, | |
| "eval_samples_per_second": 46.763, | |
| "eval_steps_per_second": 1.462, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 5708, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 3, | |
| "early_stopping_threshold": 0.001 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1521367092559872.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |