| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 11793, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.1271940981938438, | |
| "grad_norm": 20.986921310424805, | |
| "learning_rate": 2.873060290002544e-05, | |
| "loss": 1.7205758056640625, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.2543881963876876, | |
| "grad_norm": 12.114559173583984, | |
| "learning_rate": 2.7458661918087002e-05, | |
| "loss": 1.1848350830078125, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.3815822945815314, | |
| "grad_norm": 14.860601425170898, | |
| "learning_rate": 2.6186720936148565e-05, | |
| "loss": 1.0746712646484375, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.5087763927753752, | |
| "grad_norm": 11.910303115844727, | |
| "learning_rate": 2.4914779954210127e-05, | |
| "loss": 1.0038060302734375, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.635970490969219, | |
| "grad_norm": 8.553890228271484, | |
| "learning_rate": 2.3642838972271685e-05, | |
| "loss": 0.9559959106445313, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.7631645891630628, | |
| "grad_norm": 9.562394142150879, | |
| "learning_rate": 2.2370897990333247e-05, | |
| "loss": 0.9281796875, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.8903586873569066, | |
| "grad_norm": 8.361549377441406, | |
| "learning_rate": 2.1098957008394813e-05, | |
| "loss": 0.9157453002929687, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.0175527855507505, | |
| "grad_norm": 7.545825004577637, | |
| "learning_rate": 1.9827016026456375e-05, | |
| "loss": 0.8658336181640625, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.1447468837445942, | |
| "grad_norm": 13.105203628540039, | |
| "learning_rate": 1.8555075044517934e-05, | |
| "loss": 0.6820219116210937, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.271940981938438, | |
| "grad_norm": 9.773395538330078, | |
| "learning_rate": 1.7283134062579496e-05, | |
| "loss": 0.6653152465820312, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.399135080132282, | |
| "grad_norm": 9.450806617736816, | |
| "learning_rate": 1.6011193080641058e-05, | |
| "loss": 0.6671427612304688, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.5263291783261257, | |
| "grad_norm": 10.44336223602295, | |
| "learning_rate": 1.4739252098702622e-05, | |
| "loss": 0.6522406616210937, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.6535232765199694, | |
| "grad_norm": 10.461090087890625, | |
| "learning_rate": 1.3467311116764182e-05, | |
| "loss": 0.6547742309570312, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.7807173747138134, | |
| "grad_norm": 6.990764141082764, | |
| "learning_rate": 1.2195370134825746e-05, | |
| "loss": 0.6391011352539062, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.907911472907657, | |
| "grad_norm": 11.571297645568848, | |
| "learning_rate": 1.0923429152887306e-05, | |
| "loss": 0.6334611206054688, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 2.035105571101501, | |
| "grad_norm": 7.267425060272217, | |
| "learning_rate": 9.651488170948868e-06, | |
| "loss": 0.5740706176757813, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 2.162299669295345, | |
| "grad_norm": 20.655519485473633, | |
| "learning_rate": 8.37954718901043e-06, | |
| "loss": 0.4668221435546875, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 2.2894937674891884, | |
| "grad_norm": 8.184877395629883, | |
| "learning_rate": 7.107606207071992e-06, | |
| "loss": 0.44616903686523435, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 2.4166878656830324, | |
| "grad_norm": 10.177411079406738, | |
| "learning_rate": 5.8356652251335545e-06, | |
| "loss": 0.4526641235351562, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 2.543881963876876, | |
| "grad_norm": 13.613941192626953, | |
| "learning_rate": 4.563724243195116e-06, | |
| "loss": 0.43699600219726564, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.67107606207072, | |
| "grad_norm": 14.783905982971191, | |
| "learning_rate": 3.291783261256678e-06, | |
| "loss": 0.4329667663574219, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 2.798270160264564, | |
| "grad_norm": 12.480794906616211, | |
| "learning_rate": 2.0198422793182395e-06, | |
| "loss": 0.4396368408203125, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.9254642584584074, | |
| "grad_norm": 14.206427574157715, | |
| "learning_rate": 7.479012973798015e-07, | |
| "loss": 0.4308229064941406, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 11793, | |
| "total_flos": 1.2323538937939968e+17, | |
| "train_loss": 0.7285093832974275, | |
| "train_runtime": 30100.617, | |
| "train_samples_per_second": 15.668, | |
| "train_steps_per_second": 0.392 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 11793, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.2323538937939968e+17, | |
| "train_batch_size": 40, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |