| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 1.978723404255319, |
| "eval_steps": 500, |
| "global_step": 186, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.05319148936170213, |
| "grad_norm": 2.484375, |
| "learning_rate": 7.72058090429322e-06, |
| "loss": 1.0185, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.10638297872340426, |
| "grad_norm": 1.921875, |
| "learning_rate": 1.7371307034659745e-05, |
| "loss": 0.9887, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.1595744680851064, |
| "grad_norm": 1.609375, |
| "learning_rate": 2.702203316502627e-05, |
| "loss": 0.9219, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.2127659574468085, |
| "grad_norm": 2.0, |
| "learning_rate": 3.667275929539279e-05, |
| "loss": 0.9248, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.26595744680851063, |
| "grad_norm": 1.71875, |
| "learning_rate": 4.632348542575932e-05, |
| "loss": 0.8551, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.3191489361702128, |
| "grad_norm": 1.8125, |
| "learning_rate": 5.597421155612585e-05, |
| "loss": 0.8737, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.3723404255319149, |
| "grad_norm": 1.859375, |
| "learning_rate": 6.562493768649237e-05, |
| "loss": 0.834, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.425531914893617, |
| "grad_norm": 1.6953125, |
| "learning_rate": 6.746968406392054e-05, |
| "loss": 0.8323, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.4787234042553192, |
| "grad_norm": 1.484375, |
| "learning_rate": 6.712373778909143e-05, |
| "loss": 0.8278, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.5319148936170213, |
| "grad_norm": 1.4921875, |
| "learning_rate": 6.651554705487648e-05, |
| "loss": 0.7397, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.5851063829787234, |
| "grad_norm": 1.375, |
| "learning_rate": 6.565151680308889e-05, |
| "loss": 0.8036, |
| "step": 55 |
| }, |
| { |
| "epoch": 0.6382978723404256, |
| "grad_norm": 1.515625, |
| "learning_rate": 6.454074625733765e-05, |
| "loss": 0.7971, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.6914893617021277, |
| "grad_norm": 1.390625, |
| "learning_rate": 6.319493309782911e-05, |
| "loss": 0.7346, |
| "step": 65 |
| }, |
| { |
| "epoch": 0.7446808510638298, |
| "grad_norm": 1.3828125, |
| "learning_rate": 6.162825027145833e-05, |
| "loss": 0.7127, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.7978723404255319, |
| "grad_norm": 1.3359375, |
| "learning_rate": 5.985719673451993e-05, |
| "loss": 0.6861, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.851063829787234, |
| "grad_norm": 1.2265625, |
| "learning_rate": 5.790042369988838e-05, |
| "loss": 0.6625, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.9042553191489362, |
| "grad_norm": 1.2265625, |
| "learning_rate": 5.577853821848267e-05, |
| "loss": 0.6033, |
| "step": 85 |
| }, |
| { |
| "epoch": 0.9574468085106383, |
| "grad_norm": 1.5234375, |
| "learning_rate": 5.351388616352658e-05, |
| "loss": 0.5906, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.9893617021276596, |
| "eval_loss": 0.6196732521057129, |
| "eval_runtime": 18.6318, |
| "eval_samples_per_second": 15.672, |
| "eval_steps_per_second": 7.836, |
| "step": 93 |
| }, |
| { |
| "epoch": 1.0106382978723405, |
| "grad_norm": 1.078125, |
| "learning_rate": 5.1130316903027684e-05, |
| "loss": 0.5445, |
| "step": 95 |
| }, |
| { |
| "epoch": 1.0638297872340425, |
| "grad_norm": 1.328125, |
| "learning_rate": 4.865293213874228e-05, |
| "loss": 0.3503, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.1170212765957448, |
| "grad_norm": 0.98046875, |
| "learning_rate": 4.610782155663802e-05, |
| "loss": 0.3896, |
| "step": 105 |
| }, |
| { |
| "epoch": 1.1702127659574468, |
| "grad_norm": 1.1796875, |
| "learning_rate": 4.352178807275659e-05, |
| "loss": 0.3792, |
| "step": 110 |
| }, |
| { |
| "epoch": 1.2234042553191489, |
| "grad_norm": 1.03125, |
| "learning_rate": 4.09220655679505e-05, |
| "loss": 0.3483, |
| "step": 115 |
| }, |
| { |
| "epoch": 1.2765957446808511, |
| "grad_norm": 1.1875, |
| "learning_rate": 3.833603208406907e-05, |
| "loss": 0.3657, |
| "step": 120 |
| }, |
| { |
| "epoch": 1.3297872340425532, |
| "grad_norm": 1.21875, |
| "learning_rate": 3.579092150196482e-05, |
| "loss": 0.3025, |
| "step": 125 |
| }, |
| { |
| "epoch": 1.3829787234042552, |
| "grad_norm": 0.96484375, |
| "learning_rate": 3.3313536737679405e-05, |
| "loss": 0.3495, |
| "step": 130 |
| }, |
| { |
| "epoch": 1.4361702127659575, |
| "grad_norm": 1.0703125, |
| "learning_rate": 3.0929967477180524e-05, |
| "loss": 0.288, |
| "step": 135 |
| }, |
| { |
| "epoch": 1.4893617021276595, |
| "grad_norm": 0.93359375, |
| "learning_rate": 2.866531542222443e-05, |
| "loss": 0.3082, |
| "step": 140 |
| }, |
| { |
| "epoch": 1.5425531914893615, |
| "grad_norm": 1.03125, |
| "learning_rate": 2.6543429940818712e-05, |
| "loss": 0.3197, |
| "step": 145 |
| }, |
| { |
| "epoch": 1.5957446808510638, |
| "grad_norm": 1.03125, |
| "learning_rate": 2.4586656906187163e-05, |
| "loss": 0.3072, |
| "step": 150 |
| }, |
| { |
| "epoch": 1.648936170212766, |
| "grad_norm": 1.0625, |
| "learning_rate": 2.2815603369248764e-05, |
| "loss": 0.3164, |
| "step": 155 |
| }, |
| { |
| "epoch": 1.702127659574468, |
| "grad_norm": 1.2578125, |
| "learning_rate": 2.1248920542877975e-05, |
| "loss": 0.2973, |
| "step": 160 |
| }, |
| { |
| "epoch": 1.7553191489361701, |
| "grad_norm": 0.87109375, |
| "learning_rate": 1.990310738336945e-05, |
| "loss": 0.2977, |
| "step": 165 |
| }, |
| { |
| "epoch": 1.8085106382978724, |
| "grad_norm": 0.8984375, |
| "learning_rate": 1.8792336837618205e-05, |
| "loss": 0.2914, |
| "step": 170 |
| }, |
| { |
| "epoch": 1.8617021276595744, |
| "grad_norm": 1.015625, |
| "learning_rate": 1.7928306585830618e-05, |
| "loss": 0.3164, |
| "step": 175 |
| }, |
| { |
| "epoch": 1.9148936170212765, |
| "grad_norm": 1.046875, |
| "learning_rate": 1.732011585161567e-05, |
| "loss": 0.2888, |
| "step": 180 |
| }, |
| { |
| "epoch": 1.9680851063829787, |
| "grad_norm": 0.87890625, |
| "learning_rate": 1.6974169576786553e-05, |
| "loss": 0.3215, |
| "step": 185 |
| }, |
| { |
| "epoch": 1.978723404255319, |
| "eval_loss": 0.44498831033706665, |
| "eval_runtime": 18.5042, |
| "eval_samples_per_second": 15.78, |
| "eval_steps_per_second": 7.89, |
| "step": 186 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 188, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 2, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 9.049813249163264e+17, |
| "train_batch_size": 14, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|