| { | |
| "best_metric": 0.42746666666666666, | |
| "best_model_checkpoint": "./vit_finetuned_models_dataset/CIFAR100/50_from_100/facebook_dino-vitb16/model_idx_0872/checkpoints/checkpoint-2997", | |
| "epoch": 9.0, | |
| "eval_steps": 500, | |
| "global_step": 2997, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 19.892248153686523, | |
| "learning_rate": 0.00048519088401615274, | |
| "loss": 3.3906, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.19813333333333333, | |
| "eval_loss": 3.0404000282287598, | |
| "eval_runtime": 14.53, | |
| "eval_samples_per_second": 258.087, | |
| "eval_steps_per_second": 4.061, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 17.932193756103516, | |
| "learning_rate": 0.0004420155125428249, | |
| "loss": 2.8852, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.24826666666666666, | |
| "eval_loss": 2.8103723526000977, | |
| "eval_runtime": 12.5871, | |
| "eval_samples_per_second": 297.924, | |
| "eval_steps_per_second": 4.687, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 17.085769653320312, | |
| "learning_rate": 0.00037568023640968044, | |
| "loss": 2.7019, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.27973333333333333, | |
| "eval_loss": 2.646592617034912, | |
| "eval_runtime": 11.4333, | |
| "eval_samples_per_second": 327.988, | |
| "eval_steps_per_second": 5.16, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 11.418907165527344, | |
| "learning_rate": 0.00029418606892278544, | |
| "loss": 2.5244, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.31306666666666666, | |
| "eval_loss": 2.518843173980713, | |
| "eval_runtime": 11.4389, | |
| "eval_samples_per_second": 327.828, | |
| "eval_steps_per_second": 5.158, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 15.877795219421387, | |
| "learning_rate": 0.0002073624094068776, | |
| "loss": 2.3608, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.3576, | |
| "eval_loss": 2.349031448364258, | |
| "eval_runtime": 11.0657, | |
| "eval_samples_per_second": 338.885, | |
| "eval_steps_per_second": 5.332, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 10.192533493041992, | |
| "learning_rate": 0.00012568147258031897, | |
| "loss": 2.1789, | |
| "step": 1998 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.40346666666666664, | |
| "eval_loss": 2.2206473350524902, | |
| "eval_runtime": 10.7598, | |
| "eval_samples_per_second": 348.518, | |
| "eval_steps_per_second": 5.483, | |
| "step": 1998 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 22.338058471679688, | |
| "learning_rate": 5.899518490663266e-05, | |
| "loss": 2.0011, | |
| "step": 2331 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.41013333333333335, | |
| "eval_loss": 2.191847801208496, | |
| "eval_runtime": 10.9215, | |
| "eval_samples_per_second": 343.36, | |
| "eval_steps_per_second": 5.402, | |
| "step": 2331 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 24.35569190979004, | |
| "learning_rate": 1.534689686405272e-05, | |
| "loss": 1.8534, | |
| "step": 2664 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.4256, | |
| "eval_loss": 2.1321804523468018, | |
| "eval_runtime": 11.2827, | |
| "eval_samples_per_second": 332.368, | |
| "eval_steps_per_second": 5.229, | |
| "step": 2664 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 2.4406826496124268, | |
| "learning_rate": 1.2361706385188854e-09, | |
| "loss": 1.7442, | |
| "step": 2997 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.42746666666666666, | |
| "eval_loss": 2.145380735397339, | |
| "eval_runtime": 10.6162, | |
| "eval_samples_per_second": 353.235, | |
| "eval_steps_per_second": 5.558, | |
| "step": 2997 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 2997, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 9, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.482671885603328e+19, | |
| "train_batch_size": 64, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |