Upload DistilGPT-2 MAXIMUM - Best possible training (LoRA r=32, 15 epochs, 500 murlis)
ea1d0fd verified | { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 14.0, | |
| "eval_steps": 500, | |
| "global_step": 308, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.46511627906976744, | |
| "grad_norm": 0.8585774898529053, | |
| "learning_rate": 2.25e-06, | |
| "loss": 4.6803, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.9302325581395349, | |
| "grad_norm": 0.6986469626426697, | |
| "learning_rate": 4.75e-06, | |
| "loss": 4.6484, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.372093023255814, | |
| "grad_norm": 0.9079183340072632, | |
| "learning_rate": 7.25e-06, | |
| "loss": 4.692, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.8372093023255816, | |
| "grad_norm": 0.9651162624359131, | |
| "learning_rate": 9.750000000000002e-06, | |
| "loss": 4.5727, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.2790697674418605, | |
| "grad_norm": 1.0011574029922485, | |
| "learning_rate": 1.225e-05, | |
| "loss": 4.5349, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.744186046511628, | |
| "grad_norm": 0.8613724708557129, | |
| "learning_rate": 1.475e-05, | |
| "loss": 4.4834, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 3.186046511627907, | |
| "grad_norm": 0.7298063039779663, | |
| "learning_rate": 1.725e-05, | |
| "loss": 4.3632, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 3.6511627906976747, | |
| "grad_norm": 0.9772608876228333, | |
| "learning_rate": 1.9750000000000002e-05, | |
| "loss": 4.2663, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 4.093023255813954, | |
| "grad_norm": 0.9942960143089294, | |
| "learning_rate": 2.2250000000000002e-05, | |
| "loss": 4.0867, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 4.558139534883721, | |
| "grad_norm": 1.3665835857391357, | |
| "learning_rate": 2.4750000000000002e-05, | |
| "loss": 3.8939, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 1.0870517492294312, | |
| "learning_rate": 2.725e-05, | |
| "loss": 3.6812, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 5.465116279069767, | |
| "grad_norm": 1.2463775873184204, | |
| "learning_rate": 2.975e-05, | |
| "loss": 3.4444, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 5.930232558139535, | |
| "grad_norm": 1.569264531135559, | |
| "learning_rate": 3.2250000000000005e-05, | |
| "loss": 3.1378, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 6.372093023255814, | |
| "grad_norm": 1.720213532447815, | |
| "learning_rate": 3.475e-05, | |
| "loss": 2.925, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 6.837209302325581, | |
| "grad_norm": 1.5594395399093628, | |
| "learning_rate": 3.7250000000000004e-05, | |
| "loss": 2.5943, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 7.27906976744186, | |
| "grad_norm": 1.1726150512695312, | |
| "learning_rate": 3.9750000000000004e-05, | |
| "loss": 2.4226, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 7.7441860465116275, | |
| "grad_norm": 1.3921430110931396, | |
| "learning_rate": 4.2250000000000004e-05, | |
| "loss": 2.1763, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 8.186046511627907, | |
| "grad_norm": 1.3491045236587524, | |
| "learning_rate": 4.4750000000000004e-05, | |
| "loss": 2.1677, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 8.651162790697674, | |
| "grad_norm": 0.9162717461585999, | |
| "learning_rate": 4.7249999999999997e-05, | |
| "loss": 1.9753, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 9.093023255813954, | |
| "grad_norm": 1.0425995588302612, | |
| "learning_rate": 4.975e-05, | |
| "loss": 2.1817, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 9.55813953488372, | |
| "grad_norm": 0.9056143164634705, | |
| "learning_rate": 4.9411026970731805e-05, | |
| "loss": 1.9631, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 1.2772588729858398, | |
| "learning_rate": 4.7410673432392596e-05, | |
| "loss": 1.8078, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 10.465116279069768, | |
| "grad_norm": 1.1431177854537964, | |
| "learning_rate": 4.410789301364621e-05, | |
| "loss": 1.9238, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 10.930232558139535, | |
| "grad_norm": 0.9406613707542419, | |
| "learning_rate": 3.9694631307311836e-05, | |
| "loss": 1.7594, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 11.372093023255815, | |
| "grad_norm": 0.8469231724739075, | |
| "learning_rate": 3.442737104220801e-05, | |
| "loss": 1.9515, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 11.837209302325581, | |
| "grad_norm": 1.383780598640442, | |
| "learning_rate": 2.8612226239230532e-05, | |
| "loss": 1.6711, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 12.279069767441861, | |
| "grad_norm": 0.8472384214401245, | |
| "learning_rate": 2.2587151977137122e-05, | |
| "loss": 1.7611, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 12.744186046511627, | |
| "grad_norm": 0.8573427796363831, | |
| "learning_rate": 1.6702303671786797e-05, | |
| "loss": 1.734, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 13.186046511627907, | |
| "grad_norm": 0.9242410659790039, | |
| "learning_rate": 1.1299687316133256e-05, | |
| "loss": 1.7234, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 13.651162790697674, | |
| "grad_norm": 0.9648193717002869, | |
| "learning_rate": 6.69328333505567e-06, | |
| "loss": 1.7062, | |
| "step": 300 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 330, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 15, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 664107774640128.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |