| { | |
| "best_metric": 0.38853333333333334, | |
| "best_model_checkpoint": "./vit_finetuned_models_dataset/CIFAR100/50_from_100/facebook_dino-vitb16/model_idx_0674/checkpoints/checkpoint-2331", | |
| "epoch": 7.0, | |
| "eval_steps": 500, | |
| "global_step": 2331, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 13.38124942779541, | |
| "learning_rate": 0.0004294294294294294, | |
| "loss": 3.6173, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.1648, | |
| "eval_loss": 3.2028000354766846, | |
| "eval_runtime": 11.931, | |
| "eval_samples_per_second": 314.307, | |
| "eval_steps_per_second": 4.945, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 16.876428604125977, | |
| "learning_rate": 0.000358000858000858, | |
| "loss": 3.0389, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.2344, | |
| "eval_loss": 2.9296176433563232, | |
| "eval_runtime": 12.0127, | |
| "eval_samples_per_second": 312.169, | |
| "eval_steps_per_second": 4.911, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 12.877269744873047, | |
| "learning_rate": 0.0002865722865722866, | |
| "loss": 2.7901, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.2786666666666667, | |
| "eval_loss": 2.7114906311035156, | |
| "eval_runtime": 11.8645, | |
| "eval_samples_per_second": 316.069, | |
| "eval_steps_per_second": 4.973, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 18.108457565307617, | |
| "learning_rate": 0.00021514371514371513, | |
| "loss": 2.581, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.31066666666666665, | |
| "eval_loss": 2.560798168182373, | |
| "eval_runtime": 12.0875, | |
| "eval_samples_per_second": 310.237, | |
| "eval_steps_per_second": 4.881, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 16.452150344848633, | |
| "learning_rate": 0.00014371514371514372, | |
| "loss": 2.3639, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.3488, | |
| "eval_loss": 2.42657732963562, | |
| "eval_runtime": 13.6381, | |
| "eval_samples_per_second": 274.964, | |
| "eval_steps_per_second": 4.326, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 21.635099411010742, | |
| "learning_rate": 7.228657228657229e-05, | |
| "loss": 2.1167, | |
| "step": 1998 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.3757333333333333, | |
| "eval_loss": 2.315603733062744, | |
| "eval_runtime": 12.0885, | |
| "eval_samples_per_second": 310.213, | |
| "eval_steps_per_second": 4.881, | |
| "step": 1998 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 26.68172264099121, | |
| "learning_rate": 8.580008580008581e-07, | |
| "loss": 1.8185, | |
| "step": 2331 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.38853333333333334, | |
| "eval_loss": 2.333320379257202, | |
| "eval_runtime": 12.2341, | |
| "eval_samples_per_second": 306.521, | |
| "eval_steps_per_second": 4.823, | |
| "step": 2331 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 2331, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 7, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.153189244358144e+19, | |
| "train_batch_size": 64, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |