| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.954954954954955, | |
| "eval_steps": 500, | |
| "global_step": 123, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.24024024024024024, | |
| "grad_norm": 1.9880704580261388, | |
| "learning_rate": 5e-06, | |
| "loss": 0.9821, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.4804804804804805, | |
| "grad_norm": 6.128096347773532, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8967, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.7207207207207207, | |
| "grad_norm": 1.0677960378432623, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8571, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.960960960960961, | |
| "grad_norm": 0.7422640470327214, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8271, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.984984984984985, | |
| "eval_loss": 0.8107500672340393, | |
| "eval_runtime": 29.4261, | |
| "eval_samples_per_second": 38.129, | |
| "eval_steps_per_second": 0.612, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 1.2012012012012012, | |
| "grad_norm": 0.8941117019169897, | |
| "learning_rate": 5e-06, | |
| "loss": 0.817, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.4414414414414414, | |
| "grad_norm": 0.716156934667577, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7763, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.6816816816816815, | |
| "grad_norm": 0.8254504253978844, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7662, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.921921921921922, | |
| "grad_norm": 0.8536740589946262, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7594, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.993993993993994, | |
| "eval_loss": 0.777886688709259, | |
| "eval_runtime": 28.692, | |
| "eval_samples_per_second": 39.105, | |
| "eval_steps_per_second": 0.627, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 2.1621621621621623, | |
| "grad_norm": 1.0362955122236899, | |
| "learning_rate": 5e-06, | |
| "loss": 0.755, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.4024024024024024, | |
| "grad_norm": 1.0627788137007934, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7095, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.6426426426426426, | |
| "grad_norm": 0.7503435618913146, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7074, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.8828828828828827, | |
| "grad_norm": 0.9599252203158758, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7082, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.954954954954955, | |
| "eval_loss": 0.7722160816192627, | |
| "eval_runtime": 27.4507, | |
| "eval_samples_per_second": 40.873, | |
| "eval_steps_per_second": 0.656, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 2.954954954954955, | |
| "step": 123, | |
| "total_flos": 205820201533440.0, | |
| "train_loss": 0.7943652141384963, | |
| "train_runtime": 4287.8732, | |
| "train_samples_per_second": 14.905, | |
| "train_steps_per_second": 0.029 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 123, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 205820201533440.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |