| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 3762, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02658160552897395, | |
| "grad_norm": 0.11167939752340317, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 2.4588, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.0531632110579479, | |
| "grad_norm": 0.188255175948143, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 2.4716, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.07974481658692185, | |
| "grad_norm": 0.3003849685192108, | |
| "learning_rate": 2e-05, | |
| "loss": 2.3953, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.1063264221158958, | |
| "grad_norm": 0.3995692729949951, | |
| "learning_rate": 1.9958854951371364e-05, | |
| "loss": 2.3458, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.13290802764486975, | |
| "grad_norm": 0.46281716227531433, | |
| "learning_rate": 1.9835758388490783e-05, | |
| "loss": 2.2854, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.1594896331738437, | |
| "grad_norm": 0.49080541729927063, | |
| "learning_rate": 1.9631723274171412e-05, | |
| "loss": 2.2997, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.18607123870281764, | |
| "grad_norm": 0.49114933609962463, | |
| "learning_rate": 1.934842861535337e-05, | |
| "loss": 2.2731, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.2126528442317916, | |
| "grad_norm": 0.6612605452537537, | |
| "learning_rate": 1.898820564653931e-05, | |
| "loss": 2.2256, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.23923444976076555, | |
| "grad_norm": 0.5731486678123474, | |
| "learning_rate": 1.8554018646043045e-05, | |
| "loss": 2.2504, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.2658160552897395, | |
| "grad_norm": 0.5822581648826599, | |
| "learning_rate": 1.8049440542914438e-05, | |
| "loss": 2.2489, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.29239766081871343, | |
| "grad_norm": 0.6100282073020935, | |
| "learning_rate": 1.7478623515271526e-05, | |
| "loss": 2.2226, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.3189792663476874, | |
| "grad_norm": 0.6328096985816956, | |
| "learning_rate": 1.684626482198639e-05, | |
| "loss": 2.209, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.34556087187666135, | |
| "grad_norm": 0.6116699576377869, | |
| "learning_rate": 1.6157568148896227e-05, | |
| "loss": 2.171, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.3721424774056353, | |
| "grad_norm": 0.6875652074813843, | |
| "learning_rate": 1.5418200787621966e-05, | |
| "loss": 2.1755, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.39872408293460926, | |
| "grad_norm": 0.7142069339752197, | |
| "learning_rate": 1.4634246999370415e-05, | |
| "loss": 2.1795, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.4253056884635832, | |
| "grad_norm": 0.7618559002876282, | |
| "learning_rate": 1.3812157947489629e-05, | |
| "loss": 2.1622, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.45188729399255717, | |
| "grad_norm": 0.7928171753883362, | |
| "learning_rate": 1.2958698610782939e-05, | |
| "loss": 2.1494, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.4784688995215311, | |
| "grad_norm": 0.6995240449905396, | |
| "learning_rate": 1.208089211443262e-05, | |
| "loss": 2.1713, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.5050505050505051, | |
| "grad_norm": 0.947179913520813, | |
| "learning_rate": 1.1185961936634446e-05, | |
| "loss": 2.1418, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.531632110579479, | |
| "grad_norm": 0.7807749509811401, | |
| "learning_rate": 1.0281272466525365e-05, | |
| "loss": 2.1395, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.5582137161084529, | |
| "grad_norm": 0.8438702821731567, | |
| "learning_rate": 9.374268402553665e-06, | |
| "loss": 2.1325, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.5847953216374269, | |
| "grad_norm": 0.8339056968688965, | |
| "learning_rate": 8.472413489983052e-06, | |
| "loss": 2.161, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.6113769271664008, | |
| "grad_norm": 1.044533610343933, | |
| "learning_rate": 7.5831291016602544e-06, | |
| "loss": 2.1111, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.6379585326953748, | |
| "grad_norm": 0.884345293045044, | |
| "learning_rate": 6.713733167465723e-06, | |
| "loss": 2.1211, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.6645401382243488, | |
| "grad_norm": 0.8833799958229065, | |
| "learning_rate": 5.87137995499745e-06, | |
| "loss": 2.1421, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.6911217437533227, | |
| "grad_norm": 1.018437385559082, | |
| "learning_rate": 5.063001197033337e-06, | |
| "loss": 2.1473, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.7177033492822966, | |
| "grad_norm": 0.7684484720230103, | |
| "learning_rate": 4.295249050234738e-06, | |
| "loss": 2.1583, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.7442849548112705, | |
| "grad_norm": 0.9406973719596863, | |
| "learning_rate": 3.5744413544846103e-06, | |
| "loss": 2.1077, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.7708665603402446, | |
| "grad_norm": 0.7980538606643677, | |
| "learning_rate": 2.9065096433216667e-06, | |
| "loss": 2.1044, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.7974481658692185, | |
| "grad_norm": 0.808063805103302, | |
| "learning_rate": 2.2969503332931754e-06, | |
| "loss": 2.14, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.8240297713981924, | |
| "grad_norm": 1.1028823852539062, | |
| "learning_rate": 1.750779493889776e-06, | |
| "loss": 2.1408, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.8506113769271664, | |
| "grad_norm": 0.7982715964317322, | |
| "learning_rate": 1.2724915702608288e-06, | |
| "loss": 2.1195, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.8771929824561403, | |
| "grad_norm": 0.9090045690536499, | |
| "learning_rate": 8.660223983815708e-07, | |
| "loss": 2.1148, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.9037745879851143, | |
| "grad_norm": 0.8760251402854919, | |
| "learning_rate": 5.347168170206063e-07, | |
| "loss": 2.1416, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.9303561935140883, | |
| "grad_norm": 0.9763931632041931, | |
| "learning_rate": 2.813011430291446e-07, | |
| "loss": 2.128, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.9569377990430622, | |
| "grad_norm": 0.7760120034217834, | |
| "learning_rate": 1.0786073645311035e-07, | |
| "loss": 2.1564, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.9835194045720361, | |
| "grad_norm": 0.8685386180877686, | |
| "learning_rate": 1.5822840085052415e-08, | |
| "loss": 2.1469, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 3762, | |
| "total_flos": 6.83676707586048e+16, | |
| "train_loss": 2.194951180636027, | |
| "train_runtime": 1168.3863, | |
| "train_samples_per_second": 6.44, | |
| "train_steps_per_second": 3.22 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 3762, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.83676707586048e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |