| { | |
| "best_global_step": 600, | |
| "best_metric": 3.6174755096435547, | |
| "best_model_checkpoint": "codellama_utests_embedded_v3/checkpoint-600", | |
| "epoch": 4.0, | |
| "eval_steps": 200, | |
| "global_step": 652, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.3067484662576687, | |
| "grad_norm": 16.58524513244629, | |
| "learning_rate": 9.800000000000001e-06, | |
| "loss": 14.9826, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6134969325153374, | |
| "grad_norm": 1.136972188949585, | |
| "learning_rate": 1.9600000000000002e-05, | |
| "loss": 7.3524, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.9202453987730062, | |
| "grad_norm": 1.8778836727142334, | |
| "learning_rate": 1.8260869565217393e-05, | |
| "loss": 6.3668, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.2269938650306749, | |
| "grad_norm": 1.6192196607589722, | |
| "learning_rate": 1.644927536231884e-05, | |
| "loss": 6.1982, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.2269938650306749, | |
| "eval_loss": 6.118179798126221, | |
| "eval_runtime": 212.8589, | |
| "eval_samples_per_second": 1.536, | |
| "eval_steps_per_second": 0.193, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.5337423312883436, | |
| "grad_norm": 5.733585357666016, | |
| "learning_rate": 1.4637681159420291e-05, | |
| "loss": 6.0188, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.8404907975460123, | |
| "grad_norm": 2.485412120819092, | |
| "learning_rate": 1.2862318840579711e-05, | |
| "loss": 5.7176, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.147239263803681, | |
| "grad_norm": 5.66612434387207, | |
| "learning_rate": 1.1050724637681161e-05, | |
| "loss": 5.4888, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.4539877300613497, | |
| "grad_norm": 4.912620544433594, | |
| "learning_rate": 9.23913043478261e-06, | |
| "loss": 5.1486, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.4539877300613497, | |
| "eval_loss": 5.174108982086182, | |
| "eval_runtime": 213.7618, | |
| "eval_samples_per_second": 1.53, | |
| "eval_steps_per_second": 0.192, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.7607361963190185, | |
| "grad_norm": 9.008650779724121, | |
| "learning_rate": 7.4637681159420295e-06, | |
| "loss": 4.9863, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 3.067484662576687, | |
| "grad_norm": 9.85195255279541, | |
| "learning_rate": 5.652173913043479e-06, | |
| "loss": 4.7134, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 3.374233128834356, | |
| "grad_norm": 5.555596828460693, | |
| "learning_rate": 3.840579710144928e-06, | |
| "loss": 4.2576, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 3.6809815950920246, | |
| "grad_norm": 5.973374843597412, | |
| "learning_rate": 2.065217391304348e-06, | |
| "loss": 3.6974, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 3.6809815950920246, | |
| "eval_loss": 3.6174755096435547, | |
| "eval_runtime": 215.2474, | |
| "eval_samples_per_second": 1.519, | |
| "eval_steps_per_second": 0.19, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 3.9877300613496933, | |
| "grad_norm": 5.285229206085205, | |
| "learning_rate": 2.536231884057971e-07, | |
| "loss": 3.5779, | |
| "step": 650 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 652, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 200, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.555979922735104e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |