| { | |
| "best_metric": 0.9424, | |
| "best_model_checkpoint": "./vit_finetuned_models_dataset/CIFAR100/50_from_100/facebook_dino-vitb16/model_idx_0187/checkpoints/checkpoint-2331", | |
| "epoch": 7.0, | |
| "eval_steps": 500, | |
| "global_step": 2331, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 2.908839464187622, | |
| "learning_rate": 2.886495269365963e-05, | |
| "loss": 0.7643, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.8866666666666667, | |
| "eval_loss": 0.3777833878993988, | |
| "eval_runtime": 6.8506, | |
| "eval_samples_per_second": 547.397, | |
| "eval_steps_per_second": 8.612, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 3.489156484603882, | |
| "learning_rate": 2.5619102456015873e-05, | |
| "loss": 0.1746, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9018666666666667, | |
| "eval_loss": 0.3219033181667328, | |
| "eval_runtime": 7.2088, | |
| "eval_samples_per_second": 520.196, | |
| "eval_steps_per_second": 8.184, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 3.5666627883911133, | |
| "learning_rate": 2.0756590131847176e-05, | |
| "loss": 0.0783, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9154666666666667, | |
| "eval_loss": 0.30058178305625916, | |
| "eval_runtime": 7.4461, | |
| "eval_samples_per_second": 503.618, | |
| "eval_steps_per_second": 7.924, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.10608753561973572, | |
| "learning_rate": 1.5017689143724237e-05, | |
| "loss": 0.0331, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9232, | |
| "eval_loss": 0.3046155273914337, | |
| "eval_runtime": 7.3143, | |
| "eval_samples_per_second": 512.695, | |
| "eval_steps_per_second": 8.066, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 2.3222718238830566, | |
| "learning_rate": 9.276095143821772e-06, | |
| "loss": 0.013, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9346666666666666, | |
| "eval_loss": 0.2656921148300171, | |
| "eval_runtime": 7.7004, | |
| "eval_samples_per_second": 486.985, | |
| "eval_steps_per_second": 7.662, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.0015815470833331347, | |
| "learning_rate": 4.4059137709457125e-06, | |
| "loss": 0.0037, | |
| "step": 1998 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.9373333333333334, | |
| "eval_loss": 0.2541326880455017, | |
| "eval_runtime": 7.3734, | |
| "eval_samples_per_second": 508.587, | |
| "eval_steps_per_second": 8.002, | |
| "step": 1998 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 0.018114319071173668, | |
| "learning_rate": 1.1485859908123557e-06, | |
| "loss": 0.0014, | |
| "step": 2331 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.9424, | |
| "eval_loss": 0.2530508041381836, | |
| "eval_runtime": 7.9492, | |
| "eval_samples_per_second": 471.744, | |
| "eval_steps_per_second": 7.422, | |
| "step": 2331 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 2664, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.153189244358144e+19, | |
| "train_batch_size": 64, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |