| { | |
| "best_metric": 0.3888, | |
| "best_model_checkpoint": "./vit_finetuned_models_dataset/CIFAR100/50_from_100/facebook_dino-vitb16/model_idx_0507/checkpoints/checkpoint-2331", | |
| "epoch": 7.0, | |
| "eval_steps": 500, | |
| "global_step": 2331, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 18.971210479736328, | |
| "learning_rate": 0.0004294294294294294, | |
| "loss": 3.5505, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.1648, | |
| "eval_loss": 3.253709316253662, | |
| "eval_runtime": 7.6689, | |
| "eval_samples_per_second": 488.99, | |
| "eval_steps_per_second": 7.693, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 12.817221641540527, | |
| "learning_rate": 0.000358000858000858, | |
| "loss": 3.0443, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.2048, | |
| "eval_loss": 3.0173637866973877, | |
| "eval_runtime": 7.7784, | |
| "eval_samples_per_second": 482.107, | |
| "eval_steps_per_second": 7.585, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 18.8770809173584, | |
| "learning_rate": 0.0002865722865722866, | |
| "loss": 2.8489, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.2544, | |
| "eval_loss": 2.820695400238037, | |
| "eval_runtime": 9.8611, | |
| "eval_samples_per_second": 380.281, | |
| "eval_steps_per_second": 5.983, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 18.830350875854492, | |
| "learning_rate": 0.00021514371514371513, | |
| "loss": 2.6668, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.2848, | |
| "eval_loss": 2.6199207305908203, | |
| "eval_runtime": 7.6895, | |
| "eval_samples_per_second": 487.679, | |
| "eval_steps_per_second": 7.673, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 22.255603790283203, | |
| "learning_rate": 0.00014371514371514372, | |
| "loss": 2.4907, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.3336, | |
| "eval_loss": 2.456575632095337, | |
| "eval_runtime": 7.8039, | |
| "eval_samples_per_second": 480.53, | |
| "eval_steps_per_second": 7.56, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 30.41533851623535, | |
| "learning_rate": 7.228657228657229e-05, | |
| "loss": 2.2925, | |
| "step": 1998 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.37546666666666667, | |
| "eval_loss": 2.3144500255584717, | |
| "eval_runtime": 7.7146, | |
| "eval_samples_per_second": 486.094, | |
| "eval_steps_per_second": 7.648, | |
| "step": 1998 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 15.388129234313965, | |
| "learning_rate": 8.580008580008581e-07, | |
| "loss": 2.0903, | |
| "step": 2331 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.3888, | |
| "eval_loss": 2.2629659175872803, | |
| "eval_runtime": 8.747, | |
| "eval_samples_per_second": 428.721, | |
| "eval_steps_per_second": 6.745, | |
| "step": 2331 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 2331, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 7, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.153189244358144e+19, | |
| "train_batch_size": 64, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |