| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.9523809523809526, | |
| "eval_steps": 500, | |
| "global_step": 156, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.19047619047619047, | |
| "grad_norm": 6.621179518408136, | |
| "learning_rate": 6.25e-06, | |
| "loss": 0.6665, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.38095238095238093, | |
| "grad_norm": 6.179865861627354, | |
| "learning_rate": 9.979871469976197e-06, | |
| "loss": 0.1383, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 5.4359133803868325, | |
| "learning_rate": 9.755282581475769e-06, | |
| "loss": 0.1057, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.7619047619047619, | |
| "grad_norm": 2.3208835438566076, | |
| "learning_rate": 9.292243968009332e-06, | |
| "loss": 0.1083, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.9523809523809523, | |
| "grad_norm": 4.4064446194848985, | |
| "learning_rate": 8.613974319136959e-06, | |
| "loss": 0.1204, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.1333333333333333, | |
| "grad_norm": 3.2405795218219304, | |
| "learning_rate": 7.754484907260513e-06, | |
| "loss": 0.0931, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.3238095238095238, | |
| "grad_norm": 3.9398815649702885, | |
| "learning_rate": 6.7568741204067145e-06, | |
| "loss": 0.0756, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.5142857142857142, | |
| "grad_norm": 2.837924928784106, | |
| "learning_rate": 5.671166329088278e-06, | |
| "loss": 0.0648, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.704761904761905, | |
| "grad_norm": 4.066695779873309, | |
| "learning_rate": 4.551803455482833e-06, | |
| "loss": 0.0543, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.8952380952380952, | |
| "grad_norm": 4.666783552853214, | |
| "learning_rate": 3.4549150281252635e-06, | |
| "loss": 0.0705, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.0761904761904764, | |
| "grad_norm": 0.9199969622070459, | |
| "learning_rate": 2.4355036129704696e-06, | |
| "loss": 0.0391, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.2666666666666666, | |
| "grad_norm": 4.780494703920028, | |
| "learning_rate": 1.544686755065677e-06, | |
| "loss": 0.023, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.4571428571428573, | |
| "grad_norm": 3.292544780435591, | |
| "learning_rate": 8.271337313934869e-07, | |
| "loss": 0.0195, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.6476190476190475, | |
| "grad_norm": 2.3045506632786243, | |
| "learning_rate": 3.18825646801314e-07, | |
| "loss": 0.016, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.8380952380952382, | |
| "grad_norm": 0.975227741285332, | |
| "learning_rate": 4.52511911603265e-08, | |
| "loss": 0.0226, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.9523809523809526, | |
| "step": 156, | |
| "total_flos": 1103265988608.0, | |
| "train_loss": 0.10387866152450442, | |
| "train_runtime": 341.5126, | |
| "train_samples_per_second": 7.379, | |
| "train_steps_per_second": 0.457 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 156, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 250, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1103265988608.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |