| { |
| "best_metric": 28.5073, |
| "best_model_checkpoint": "/content/drive/MyDrive/checkpoint-26201", |
| "epoch": 7.0, |
| "eval_steps": 500, |
| "global_step": 26201, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.1335826876836762, |
| "grad_norm": 5.345764636993408, |
| "learning_rate": 3.7102997922232118e-06, |
| "loss": 3.9988, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.2671653753673524, |
| "grad_norm": 5.3753743171691895, |
| "learning_rate": 7.4205995844464235e-06, |
| "loss": 3.2671, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.4007480630510286, |
| "grad_norm": 3.7295212745666504, |
| "learning_rate": 1.1130899376669635e-05, |
| "loss": 3.1006, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.5343307507347048, |
| "grad_norm": 4.027897834777832, |
| "learning_rate": 1.4841199168892847e-05, |
| "loss": 2.9767, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.667913438418381, |
| "grad_norm": 3.9697580337524414, |
| "learning_rate": 1.855149896111606e-05, |
| "loss": 2.8914, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.8014961261020572, |
| "grad_norm": 3.5195724964141846, |
| "learning_rate": 2.226179875333927e-05, |
| "loss": 2.8273, |
| "step": 3000 |
| }, |
| { |
| "epoch": 0.9350788137857333, |
| "grad_norm": 3.447735071182251, |
| "learning_rate": 2.5972098545562486e-05, |
| "loss": 2.7964, |
| "step": 3500 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_loss": 2.2515146732330322, |
| "eval_rouge1": 25.9968, |
| "eval_rouge2": 10.7383, |
| "eval_rougeL": 22.8305, |
| "eval_runtime": 801.3334, |
| "eval_samples_per_second": 5.851, |
| "eval_steps_per_second": 0.585, |
| "step": 3743 |
| }, |
| { |
| "epoch": 1.0686615014694096, |
| "grad_norm": 3.5466370582580566, |
| "learning_rate": 2.9682398337785694e-05, |
| "loss": 2.7258, |
| "step": 4000 |
| }, |
| { |
| "epoch": 1.2022441891530857, |
| "grad_norm": 3.380523681640625, |
| "learning_rate": 3.33926981300089e-05, |
| "loss": 2.6799, |
| "step": 4500 |
| }, |
| { |
| "epoch": 1.3358268768367618, |
| "grad_norm": 3.3388278484344482, |
| "learning_rate": 3.710299792223212e-05, |
| "loss": 2.6635, |
| "step": 5000 |
| }, |
| { |
| "epoch": 1.4694095645204381, |
| "grad_norm": 4.365349292755127, |
| "learning_rate": 4.0813297714455326e-05, |
| "loss": 2.653, |
| "step": 5500 |
| }, |
| { |
| "epoch": 1.6029922522041145, |
| "grad_norm": 2.9895472526550293, |
| "learning_rate": 4.452359750667854e-05, |
| "loss": 2.6307, |
| "step": 6000 |
| }, |
| { |
| "epoch": 1.7365749398877905, |
| "grad_norm": 3.2920846939086914, |
| "learning_rate": 4.8233897298901756e-05, |
| "loss": 2.6279, |
| "step": 6500 |
| }, |
| { |
| "epoch": 1.8701576275714666, |
| "grad_norm": 2.7936699390411377, |
| "learning_rate": 4.708305499888667e-05, |
| "loss": 2.6157, |
| "step": 7000 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_loss": 2.2093820571899414, |
| "eval_rouge1": 27.1603, |
| "eval_rouge2": 11.6742, |
| "eval_rougeL": 23.9152, |
| "eval_runtime": 800.4224, |
| "eval_samples_per_second": 5.858, |
| "eval_steps_per_second": 0.586, |
| "step": 7486 |
| }, |
| { |
| "epoch": 2.003740315255143, |
| "grad_norm": 2.9695687294006348, |
| "learning_rate": 4.1516366065464264e-05, |
| "loss": 2.5275, |
| "step": 7500 |
| }, |
| { |
| "epoch": 2.1373230029388193, |
| "grad_norm": 2.902341842651367, |
| "learning_rate": 3.594967713204186e-05, |
| "loss": 2.434, |
| "step": 8000 |
| }, |
| { |
| "epoch": 2.270905690622495, |
| "grad_norm": 2.9437291622161865, |
| "learning_rate": 3.0382988198619462e-05, |
| "loss": 2.4224, |
| "step": 8500 |
| }, |
| { |
| "epoch": 2.4044883783061715, |
| "grad_norm": 3.196054458618164, |
| "learning_rate": 2.4816299265197063e-05, |
| "loss": 2.4284, |
| "step": 9000 |
| }, |
| { |
| "epoch": 2.5380710659898478, |
| "grad_norm": 2.9762144088745117, |
| "learning_rate": 1.924961033177466e-05, |
| "loss": 2.4025, |
| "step": 9500 |
| }, |
| { |
| "epoch": 2.6716537536735236, |
| "grad_norm": 2.9706368446350098, |
| "learning_rate": 1.3682921398352261e-05, |
| "loss": 2.4134, |
| "step": 10000 |
| }, |
| { |
| "epoch": 2.8052364413572, |
| "grad_norm": 3.00219464302063, |
| "learning_rate": 8.11623246492986e-06, |
| "loss": 2.393, |
| "step": 10500 |
| }, |
| { |
| "epoch": 2.9388191290408763, |
| "grad_norm": 3.0556745529174805, |
| "learning_rate": 2.5495435315074595e-06, |
| "loss": 2.4, |
| "step": 11000 |
| }, |
| { |
| "epoch": 3.0, |
| "eval_loss": 2.151719093322754, |
| "eval_rouge1": 28.0829, |
| "eval_rouge2": 12.389, |
| "eval_rougeL": 24.8018, |
| "eval_runtime": 828.6501, |
| "eval_samples_per_second": 5.659, |
| "eval_steps_per_second": 0.566, |
| "step": 11229 |
| }, |
| { |
| "epoch": 3.0724018167245526, |
| "grad_norm": 2.9094104766845703, |
| "learning_rate": 2.4396651527295397e-05, |
| "loss": 2.3046, |
| "step": 11500 |
| }, |
| { |
| "epoch": 3.205984504408229, |
| "grad_norm": 3.2464346885681152, |
| "learning_rate": 2.3283462463264763e-05, |
| "loss": 2.3092, |
| "step": 12000 |
| }, |
| { |
| "epoch": 3.339567192091905, |
| "grad_norm": 2.8669161796569824, |
| "learning_rate": 2.2170273399234126e-05, |
| "loss": 2.311, |
| "step": 12500 |
| }, |
| { |
| "epoch": 3.473149879775581, |
| "grad_norm": 2.9395506381988525, |
| "learning_rate": 2.1057084335203492e-05, |
| "loss": 2.3464, |
| "step": 13000 |
| }, |
| { |
| "epoch": 3.6067325674592574, |
| "grad_norm": 2.589228630065918, |
| "learning_rate": 1.994389527117286e-05, |
| "loss": 2.3138, |
| "step": 13500 |
| }, |
| { |
| "epoch": 3.7403152551429333, |
| "grad_norm": 3.119093418121338, |
| "learning_rate": 1.883070620714222e-05, |
| "loss": 2.3379, |
| "step": 14000 |
| }, |
| { |
| "epoch": 3.8738979428266096, |
| "grad_norm": 3.049482822418213, |
| "learning_rate": 1.7717517143111588e-05, |
| "loss": 2.3203, |
| "step": 14500 |
| }, |
| { |
| "epoch": 4.0, |
| "eval_loss": 2.1520700454711914, |
| "eval_rouge1": 27.9025, |
| "eval_rouge2": 12.1745, |
| "eval_rougeL": 24.6421, |
| "eval_runtime": 806.5195, |
| "eval_samples_per_second": 5.814, |
| "eval_steps_per_second": 0.582, |
| "step": 14972 |
| }, |
| { |
| "epoch": 4.007480630510286, |
| "grad_norm": 2.8268468379974365, |
| "learning_rate": 1.6604328079080954e-05, |
| "loss": 2.3081, |
| "step": 15000 |
| }, |
| { |
| "epoch": 4.141063318193962, |
| "grad_norm": 3.0538229942321777, |
| "learning_rate": 1.5491139015050317e-05, |
| "loss": 2.21, |
| "step": 15500 |
| }, |
| { |
| "epoch": 4.2746460058776385, |
| "grad_norm": 2.8657615184783936, |
| "learning_rate": 1.4377949951019681e-05, |
| "loss": 2.2212, |
| "step": 16000 |
| }, |
| { |
| "epoch": 4.408228693561314, |
| "grad_norm": 3.4228334426879883, |
| "learning_rate": 1.3264760886989048e-05, |
| "loss": 2.2211, |
| "step": 16500 |
| }, |
| { |
| "epoch": 4.54181138124499, |
| "grad_norm": 2.928118944168091, |
| "learning_rate": 1.2151571822958412e-05, |
| "loss": 2.2231, |
| "step": 17000 |
| }, |
| { |
| "epoch": 4.675394068928667, |
| "grad_norm": 3.1149392127990723, |
| "learning_rate": 1.1038382758927777e-05, |
| "loss": 2.2522, |
| "step": 17500 |
| }, |
| { |
| "epoch": 4.808976756612343, |
| "grad_norm": 3.263375759124756, |
| "learning_rate": 9.925193694897141e-06, |
| "loss": 2.2262, |
| "step": 18000 |
| }, |
| { |
| "epoch": 4.94255944429602, |
| "grad_norm": 2.9795596599578857, |
| "learning_rate": 8.812004630866506e-06, |
| "loss": 2.2432, |
| "step": 18500 |
| }, |
| { |
| "epoch": 5.0, |
| "eval_loss": 2.1519482135772705, |
| "eval_rouge1": 28.2114, |
| "eval_rouge2": 12.4638, |
| "eval_rougeL": 24.9195, |
| "eval_runtime": 801.5692, |
| "eval_samples_per_second": 5.85, |
| "eval_steps_per_second": 0.585, |
| "step": 18715 |
| }, |
| { |
| "epoch": 5.0761421319796955, |
| "grad_norm": 3.917699098587036, |
| "learning_rate": 2.4619289340101523e-05, |
| "loss": 2.1744, |
| "step": 19000 |
| }, |
| { |
| "epoch": 5.209724819663371, |
| "grad_norm": 3.097011089324951, |
| "learning_rate": 2.3951375901683144e-05, |
| "loss": 2.187, |
| "step": 19500 |
| }, |
| { |
| "epoch": 5.343307507347048, |
| "grad_norm": 3.4347126483917236, |
| "learning_rate": 2.3283462463264763e-05, |
| "loss": 2.2042, |
| "step": 20000 |
| }, |
| { |
| "epoch": 5.476890195030724, |
| "grad_norm": 3.750795364379883, |
| "learning_rate": 2.261554902484638e-05, |
| "loss": 2.2261, |
| "step": 20500 |
| }, |
| { |
| "epoch": 5.6104728827144, |
| "grad_norm": 3.1423909664154053, |
| "learning_rate": 2.1947635586428e-05, |
| "loss": 2.1995, |
| "step": 21000 |
| }, |
| { |
| "epoch": 5.744055570398077, |
| "grad_norm": 3.2796082496643066, |
| "learning_rate": 2.1279722148009618e-05, |
| "loss": 2.2165, |
| "step": 21500 |
| }, |
| { |
| "epoch": 5.8776382580817526, |
| "grad_norm": 3.300898313522339, |
| "learning_rate": 2.0611808709591237e-05, |
| "loss": 2.2252, |
| "step": 22000 |
| }, |
| { |
| "epoch": 6.0, |
| "eval_loss": 2.169365644454956, |
| "eval_rouge1": 28.1516, |
| "eval_rouge2": 12.3777, |
| "eval_rougeL": 24.7436, |
| "eval_runtime": 795.9208, |
| "eval_samples_per_second": 5.891, |
| "eval_steps_per_second": 0.589, |
| "step": 22458 |
| }, |
| { |
| "epoch": 6.011220945765428, |
| "grad_norm": 3.3653814792633057, |
| "learning_rate": 1.994389527117286e-05, |
| "loss": 2.044, |
| "step": 22500 |
| }, |
| { |
| "epoch": 6.144803633449105, |
| "grad_norm": 3.162733554840088, |
| "learning_rate": 1.9275981832754473e-05, |
| "loss": 2.1435, |
| "step": 23000 |
| }, |
| { |
| "epoch": 6.278386321132781, |
| "grad_norm": 2.9544992446899414, |
| "learning_rate": 1.8608068394336095e-05, |
| "loss": 2.1289, |
| "step": 23500 |
| }, |
| { |
| "epoch": 6.411969008816458, |
| "grad_norm": 3.7476911544799805, |
| "learning_rate": 1.7940154955917714e-05, |
| "loss": 2.1454, |
| "step": 24000 |
| }, |
| { |
| "epoch": 6.545551696500134, |
| "grad_norm": 3.191617727279663, |
| "learning_rate": 1.7272241517499332e-05, |
| "loss": 2.1435, |
| "step": 24500 |
| }, |
| { |
| "epoch": 6.67913438418381, |
| "grad_norm": 3.208509922027588, |
| "learning_rate": 1.6604328079080954e-05, |
| "loss": 2.1357, |
| "step": 25000 |
| }, |
| { |
| "epoch": 6.812717071867486, |
| "grad_norm": 3.2755982875823975, |
| "learning_rate": 1.593641464066257e-05, |
| "loss": 2.1371, |
| "step": 25500 |
| }, |
| { |
| "epoch": 6.946299759551162, |
| "grad_norm": 3.220367431640625, |
| "learning_rate": 1.526850120224419e-05, |
| "loss": 2.1313, |
| "step": 26000 |
| }, |
| { |
| "epoch": 7.0, |
| "eval_loss": 2.18220853805542, |
| "eval_rouge1": 28.5073, |
| "eval_rouge2": 12.6157, |
| "eval_rougeL": 25.1244, |
| "eval_runtime": 795.4817, |
| "eval_samples_per_second": 5.895, |
| "eval_steps_per_second": 0.59, |
| "step": 26201 |
| } |
| ], |
| "logging_steps": 500, |
| "max_steps": 37430, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 10, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.5381250056456192e+17, |
| "train_batch_size": 10, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|