| { | |
| "best_metric": 0.9208, | |
| "best_model_checkpoint": "./vit_finetuned_models_dataset/CIFAR100/50_from_100/facebook_dino-vitb16/model_idx_0221/checkpoints/checkpoint-2331", | |
| "epoch": 7.0, | |
| "eval_steps": 500, | |
| "global_step": 2331, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 48.45054626464844, | |
| "learning_rate": 8.661507935327383e-05, | |
| "loss": 0.8032, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.8592, | |
| "eval_loss": 0.48078104853630066, | |
| "eval_runtime": 6.2209, | |
| "eval_samples_per_second": 602.808, | |
| "eval_steps_per_second": 9.484, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 1.3851782083511353, | |
| "learning_rate": 7.689476527896773e-05, | |
| "loss": 0.2648, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.8637333333333334, | |
| "eval_loss": 0.4972417950630188, | |
| "eval_runtime": 6.255, | |
| "eval_samples_per_second": 599.518, | |
| "eval_steps_per_second": 9.432, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 43.53612518310547, | |
| "learning_rate": 6.231876231770601e-05, | |
| "loss": 0.1405, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.8661333333333333, | |
| "eval_loss": 0.5204830169677734, | |
| "eval_runtime": 6.2547, | |
| "eval_samples_per_second": 599.551, | |
| "eval_steps_per_second": 9.433, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.011021789163351059, | |
| "learning_rate": 4.510613478854492e-05, | |
| "loss": 0.0701, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.8901333333333333, | |
| "eval_loss": 0.4364476501941681, | |
| "eval_runtime": 6.7889, | |
| "eval_samples_per_second": 552.37, | |
| "eval_steps_per_second": 8.691, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.000544319802429527, | |
| "learning_rate": 2.7877349199942123e-05, | |
| "loss": 0.0277, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9069333333333334, | |
| "eval_loss": 0.3994794189929962, | |
| "eval_runtime": 6.2995, | |
| "eval_samples_per_second": 595.288, | |
| "eval_steps_per_second": 9.366, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.00033232022542506456, | |
| "learning_rate": 1.3255331978432106e-05, | |
| "loss": 0.0045, | |
| "step": 1998 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.9146666666666666, | |
| "eval_loss": 0.3691798746585846, | |
| "eval_runtime": 7.4099, | |
| "eval_samples_per_second": 506.081, | |
| "eval_steps_per_second": 7.962, | |
| "step": 1998 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 0.0005573926027864218, | |
| "learning_rate": 3.4661526970736e-06, | |
| "loss": 0.0013, | |
| "step": 2331 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.9208, | |
| "eval_loss": 0.3570408523082733, | |
| "eval_runtime": 6.0328, | |
| "eval_samples_per_second": 621.602, | |
| "eval_steps_per_second": 9.78, | |
| "step": 2331 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 2664, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.153189244358144e+19, | |
| "train_batch_size": 64, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |