| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.4756242568370987, | |
| "eval_steps": 500, | |
| "global_step": 300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03170828378913991, | |
| "grad_norm": 11.620649337768555, | |
| "learning_rate": 0.00011399999999999999, | |
| "loss": 7.7223, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06341656757827982, | |
| "grad_norm": 1.8820377588272095, | |
| "learning_rate": 0.000234, | |
| "loss": 1.8545, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.09512485136741974, | |
| "grad_norm": 1.3315837383270264, | |
| "learning_rate": 0.00029868292682926826, | |
| "loss": 1.2718, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.12683313515655964, | |
| "grad_norm": 1.0603790283203125, | |
| "learning_rate": 0.00029575609756097557, | |
| "loss": 1.1449, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.15854141894569956, | |
| "grad_norm": 1.1477017402648926, | |
| "learning_rate": 0.00029282926829268287, | |
| "loss": 1.1018, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.1902497027348395, | |
| "grad_norm": 1.0604023933410645, | |
| "learning_rate": 0.0002899024390243902, | |
| "loss": 1.0883, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.22195798652397938, | |
| "grad_norm": 0.8798519372940063, | |
| "learning_rate": 0.0002869756097560975, | |
| "loss": 1.0645, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.2536662703131193, | |
| "grad_norm": 1.0906599760055542, | |
| "learning_rate": 0.0002840487804878048, | |
| "loss": 1.0418, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.2853745541022592, | |
| "grad_norm": 0.9430219531059265, | |
| "learning_rate": 0.0002811219512195122, | |
| "loss": 1.0316, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.3170828378913991, | |
| "grad_norm": 1.0706809759140015, | |
| "learning_rate": 0.0002781951219512195, | |
| "loss": 1.0286, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.34879112168053905, | |
| "grad_norm": 0.8156995177268982, | |
| "learning_rate": 0.00027526829268292684, | |
| "loss": 1.0293, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.380499405469679, | |
| "grad_norm": 0.8572260737419128, | |
| "learning_rate": 0.00027234146341463414, | |
| "loss": 1.0051, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.41220768925881884, | |
| "grad_norm": 0.9287059307098389, | |
| "learning_rate": 0.00026941463414634144, | |
| "loss": 1.0152, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.44391597304795877, | |
| "grad_norm": 0.8125821352005005, | |
| "learning_rate": 0.00026648780487804874, | |
| "loss": 1.0098, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.4756242568370987, | |
| "grad_norm": 0.8053847551345825, | |
| "learning_rate": 0.0002635609756097561, | |
| "loss": 1.0029, | |
| "step": 300 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 2100, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 300, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.570903380983808e+17, | |
| "train_batch_size": 6, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |