| { | |
| "best_metric": 0.6984841227531433, | |
| "best_model_checkpoint": "YELP_full/checkpoint-3386", | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 3386, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 8.398588180541992, | |
| "learning_rate": 5.8227997637330186e-05, | |
| "loss": 0.9245, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 3.5803122520446777, | |
| "learning_rate": 5.645599527466036e-05, | |
| "loss": 0.7913, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 3.3800671100616455, | |
| "learning_rate": 5.468399291199055e-05, | |
| "loss": 0.7613, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "grad_norm": 1.9862334728240967, | |
| "learning_rate": 5.291199054932074e-05, | |
| "loss": 0.7362, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "grad_norm": 2.043750524520874, | |
| "learning_rate": 5.113998818665092e-05, | |
| "loss": 0.7262, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "grad_norm": 2.8348655700683594, | |
| "learning_rate": 4.93679858239811e-05, | |
| "loss": 0.7165, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.69042, | |
| "eval_loss": 0.6984841227531433, | |
| "eval_runtime": 141.1206, | |
| "eval_samples_per_second": 354.307, | |
| "eval_steps_per_second": 1.849, | |
| "step": 3386 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 16930, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "total_flos": 3.124146123583488e+17, | |
| "train_batch_size": 192, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |