| { | |
| "best_metric": 0.4144, | |
| "best_model_checkpoint": "./vit_finetuned_models_dataset/CIFAR100/50_from_100/facebook_dino-vitb16/model_idx_0922/checkpoints/checkpoint-1998", | |
| "epoch": 6.0, | |
| "eval_steps": 500, | |
| "global_step": 1998, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 30.680429458618164, | |
| "learning_rate": 0.00028025614696414387, | |
| "loss": 3.3478, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.22533333333333333, | |
| "eval_loss": 2.932523488998413, | |
| "eval_runtime": 8.5418, | |
| "eval_samples_per_second": 439.019, | |
| "eval_steps_per_second": 6.907, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 34.889991760253906, | |
| "learning_rate": 0.00022561193336869865, | |
| "loss": 2.7707, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.2616, | |
| "eval_loss": 2.688565731048584, | |
| "eval_runtime": 6.0142, | |
| "eval_samples_per_second": 623.527, | |
| "eval_steps_per_second": 9.81, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 23.5567626953125, | |
| "learning_rate": 0.00015070756328895485, | |
| "loss": 2.4837, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.31893333333333335, | |
| "eval_loss": 2.49717378616333, | |
| "eval_runtime": 12.2488, | |
| "eval_samples_per_second": 306.152, | |
| "eval_steps_per_second": 4.817, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 41.21067810058594, | |
| "learning_rate": 7.561360219734159e-05, | |
| "loss": 2.1756, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.3736, | |
| "eval_loss": 2.2569801807403564, | |
| "eval_runtime": 6.0123, | |
| "eval_samples_per_second": 623.719, | |
| "eval_steps_per_second": 9.813, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 37.52667999267578, | |
| "learning_rate": 2.0451416324810927e-05, | |
| "loss": 1.838, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.4056, | |
| "eval_loss": 2.170523166656494, | |
| "eval_runtime": 5.7265, | |
| "eval_samples_per_second": 654.845, | |
| "eval_steps_per_second": 10.303, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 41.730804443359375, | |
| "learning_rate": 1.668828642892306e-09, | |
| "loss": 1.5679, | |
| "step": 1998 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.4144, | |
| "eval_loss": 2.204758882522583, | |
| "eval_runtime": 6.2167, | |
| "eval_samples_per_second": 603.214, | |
| "eval_steps_per_second": 9.491, | |
| "step": 1998 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 1998, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9.88447923735552e+18, | |
| "train_batch_size": 64, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |