| { | |
| "best_metric": 0.932, | |
| "best_model_checkpoint": "./vit_finetuned_models_dataset/CIFAR100/50_from_100/facebook_dino-vitb16/model_idx_0037/checkpoints/checkpoint-2331", | |
| "epoch": 7.0, | |
| "eval_steps": 500, | |
| "global_step": 2331, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 6.098376750946045, | |
| "learning_rate": 8.659485808097889e-05, | |
| "loss": 0.8039, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.8533333333333334, | |
| "eval_loss": 0.5031341314315796, | |
| "eval_runtime": 11.2743, | |
| "eval_samples_per_second": 332.615, | |
| "eval_steps_per_second": 5.233, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 5.069782257080078, | |
| "learning_rate": 7.685730736804761e-05, | |
| "loss": 0.2672, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.8688, | |
| "eval_loss": 0.450407475233078, | |
| "eval_runtime": 11.4076, | |
| "eval_samples_per_second": 328.73, | |
| "eval_steps_per_second": 5.172, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 2.562358856201172, | |
| "learning_rate": 6.226977039554153e-05, | |
| "loss": 0.1403, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.8581333333333333, | |
| "eval_loss": 0.5272783637046814, | |
| "eval_runtime": 11.3315, | |
| "eval_samples_per_second": 330.934, | |
| "eval_steps_per_second": 5.207, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.04506128281354904, | |
| "learning_rate": 4.505306743117272e-05, | |
| "loss": 0.0721, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.8978666666666667, | |
| "eval_loss": 0.4255366027355194, | |
| "eval_runtime": 11.2896, | |
| "eval_samples_per_second": 332.163, | |
| "eval_steps_per_second": 5.226, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.5337011814117432, | |
| "learning_rate": 2.7828285431465317e-05, | |
| "loss": 0.0273, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9128, | |
| "eval_loss": 0.36624398827552795, | |
| "eval_runtime": 11.2042, | |
| "eval_samples_per_second": 334.696, | |
| "eval_steps_per_second": 5.266, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.0006438386044465005, | |
| "learning_rate": 1.3217741312837138e-05, | |
| "loss": 0.0051, | |
| "step": 1998 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.9301333333333334, | |
| "eval_loss": 0.3094325065612793, | |
| "eval_runtime": 11.3491, | |
| "eval_samples_per_second": 330.422, | |
| "eval_steps_per_second": 5.199, | |
| "step": 1998 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 0.0005581324803642929, | |
| "learning_rate": 3.4457579724370674e-06, | |
| "loss": 0.0014, | |
| "step": 2331 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.932, | |
| "eval_loss": 0.3038519620895386, | |
| "eval_runtime": 12.2371, | |
| "eval_samples_per_second": 306.445, | |
| "eval_steps_per_second": 4.821, | |
| "step": 2331 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 2664, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.153189244358144e+19, | |
| "train_batch_size": 64, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |