| { | |
| "best_global_step": 152, | |
| "best_metric": 0.5104668140411377, | |
| "best_model_checkpoint": "./solacies/checkpoint-152", | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 152, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06578947368421052, | |
| "grad_norm": 2.7918763160705566, | |
| "learning_rate": 1.9407894736842107e-05, | |
| "loss": 1.3351, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.13157894736842105, | |
| "grad_norm": 2.623225212097168, | |
| "learning_rate": 1.8750000000000002e-05, | |
| "loss": 1.1694, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.19736842105263158, | |
| "grad_norm": 2.48968505859375, | |
| "learning_rate": 1.8092105263157896e-05, | |
| "loss": 1.0772, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.2631578947368421, | |
| "grad_norm": 2.2622175216674805, | |
| "learning_rate": 1.743421052631579e-05, | |
| "loss": 1.0927, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.32894736842105265, | |
| "grad_norm": 2.3349521160125732, | |
| "learning_rate": 1.6776315789473686e-05, | |
| "loss": 0.9766, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.39473684210526316, | |
| "grad_norm": 2.7016446590423584, | |
| "learning_rate": 1.611842105263158e-05, | |
| "loss": 0.8842, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.4605263157894737, | |
| "grad_norm": 1.8381617069244385, | |
| "learning_rate": 1.5460526315789475e-05, | |
| "loss": 0.7284, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 2.1242270469665527, | |
| "learning_rate": 1.4802631578947371e-05, | |
| "loss": 0.6287, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5921052631578947, | |
| "grad_norm": 1.3842352628707886, | |
| "learning_rate": 1.4144736842105264e-05, | |
| "loss": 0.6329, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.6578947368421053, | |
| "grad_norm": 2.2132720947265625, | |
| "learning_rate": 1.3486842105263159e-05, | |
| "loss": 0.607, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7236842105263158, | |
| "grad_norm": 2.2834842205047607, | |
| "learning_rate": 1.2828947368421055e-05, | |
| "loss": 0.5891, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.7894736842105263, | |
| "grad_norm": 2.5198376178741455, | |
| "learning_rate": 1.2171052631578948e-05, | |
| "loss": 0.5515, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.8552631578947368, | |
| "grad_norm": 1.5494874715805054, | |
| "learning_rate": 1.1513157894736844e-05, | |
| "loss": 0.4724, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.9210526315789473, | |
| "grad_norm": 2.719534158706665, | |
| "learning_rate": 1.0855263157894737e-05, | |
| "loss": 0.4908, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.9868421052631579, | |
| "grad_norm": 1.478468418121338, | |
| "learning_rate": 1.0197368421052632e-05, | |
| "loss": 0.4536, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9088235294117647, | |
| "eval_loss": 0.5104668140411377, | |
| "eval_runtime": 5.2696, | |
| "eval_samples_per_second": 64.521, | |
| "eval_steps_per_second": 8.16, | |
| "step": 152 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 304, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9.415445510467584e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |