| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.001588751638400127, | |
| "global_step": 40, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 1.984126984126984e-06, | |
| "loss": 10.5689, | |
| "theoretical_loss": 17.59466794495971, | |
| "tokens_seen": 131072 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 3.968253968253968e-06, | |
| "loss": 10.5816, | |
| "theoretical_loss": 14.920783596619636, | |
| "tokens_seen": 262144 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 5.9523809523809525e-06, | |
| "loss": 10.4187, | |
| "theoretical_loss": 13.581028313181289, | |
| "tokens_seen": 393216 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 7.936507936507936e-06, | |
| "loss": 10.1647, | |
| "theoretical_loss": 12.71859646611439, | |
| "tokens_seen": 524288 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 9.92063492063492e-06, | |
| "loss": 9.8414, | |
| "theoretical_loss": 12.095879447666144, | |
| "tokens_seen": 655360 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 1.1904761904761905e-05, | |
| "loss": 9.5154, | |
| "theoretical_loss": 11.615186049337796, | |
| "tokens_seen": 786432 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 1.3888888888888888e-05, | |
| "loss": 9.4061, | |
| "theoretical_loss": 11.227478542742938, | |
| "tokens_seen": 917504 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 1.5873015873015872e-05, | |
| "loss": 9.3243, | |
| "theoretical_loss": 10.904894927088016, | |
| "tokens_seen": 1048576 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 1.7857142857142855e-05, | |
| "loss": 9.0303, | |
| "theoretical_loss": 10.630196716861345, | |
| "tokens_seen": 1179648 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 1.984126984126984e-05, | |
| "loss": 8.9144, | |
| "theoretical_loss": 10.392030784394397, | |
| "tokens_seen": 1310720 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2.1825396825396824e-05, | |
| "loss": 8.7927, | |
| "theoretical_loss": 10.182553393901085, | |
| "tokens_seen": 1441792 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2.380952380952381e-05, | |
| "loss": 8.7473, | |
| "theoretical_loss": 9.996136019471344, | |
| "tokens_seen": 1572864 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "objective/train/docs_used": 831, | |
| "objective/train/instantaneous_batch_size": 16, | |
| "objective/train/instantaneous_microbatch_size": 16384, | |
| "objective/train/original_loss": 8.822301864624023, | |
| "objective/train/theoretical_loss": 9.910229967024176, | |
| "objective/train/tokens_used": -18841600, | |
| "theoretical_loss": 9.910229967024176, | |
| "tokens_seen": 1638400 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2.5793650793650793e-05, | |
| "loss": 8.8575, | |
| "theoretical_loss": 9.828613432171625, | |
| "tokens_seen": 1703936 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2.7777777777777776e-05, | |
| "loss": 8.6865, | |
| "theoretical_loss": 9.676823599712613, | |
| "tokens_seen": 1835008 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2.9761904761904762e-05, | |
| "loss": 8.6022, | |
| "theoretical_loss": 9.538313887395919, | |
| "tokens_seen": 1966080 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 3.1746031746031745e-05, | |
| "loss": 8.4823, | |
| "theoretical_loss": 9.411146631541524, | |
| "tokens_seen": 2097152 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 3.373015873015873e-05, | |
| "loss": 8.4221, | |
| "theoretical_loss": 9.293766507291341, | |
| "tokens_seen": 2228224 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 3.571428571428571e-05, | |
| "loss": 8.3834, | |
| "theoretical_loss": 9.184907653139359, | |
| "tokens_seen": 2359296 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 3.76984126984127e-05, | |
| "loss": 8.2756, | |
| "theoretical_loss": 9.0835271371648, | |
| "tokens_seen": 2490368 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 3.968253968253968e-05, | |
| "loss": 8.3847, | |
| "theoretical_loss": 8.988756330540422, | |
| "tokens_seen": 2621440 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 4.1666666666666665e-05, | |
| "loss": 8.251, | |
| "theoretical_loss": 8.89986473310929, | |
| "tokens_seen": 2752512 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 4.365079365079365e-05, | |
| "loss": 8.1076, | |
| "theoretical_loss": 8.816232633409479, | |
| "tokens_seen": 2883584 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 4.563492063492063e-05, | |
| "loss": 8.1488, | |
| "theoretical_loss": 8.737330150151898, | |
| "tokens_seen": 3014656 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 4.761904761904762e-05, | |
| "loss": 8.0482, | |
| "theoretical_loss": 8.662700958366539, | |
| "tokens_seen": 3145728 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "objective/train/docs_used": 1233, | |
| "objective/train/instantaneous_batch_size": 16, | |
| "objective/train/instantaneous_microbatch_size": 16384, | |
| "objective/train/original_loss": 8.187471389770508, | |
| "objective/train/theoretical_loss": 8.591949505242134, | |
| "objective/train/tokens_used": -17203200, | |
| "theoretical_loss": 8.591949505242134, | |
| "tokens_seen": 3276800 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 4.96031746031746e-05, | |
| "loss": 7.9834, | |
| "theoretical_loss": 8.591949505242134, | |
| "tokens_seen": 3276800 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 5.1587301587301586e-05, | |
| "loss": 8.1198, | |
| "theoretical_loss": 8.524730860277067, | |
| "tokens_seen": 3407872 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 5.357142857142857e-05, | |
| "loss": 7.8858, | |
| "theoretical_loss": 8.460742578303845, | |
| "tokens_seen": 3538944 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 5.555555555555555e-05, | |
| "loss": 8.0049, | |
| "theoretical_loss": 8.399718117751275, | |
| "tokens_seen": 3670016 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 5.753968253968254e-05, | |
| "loss": 7.81, | |
| "theoretical_loss": 8.341421472916394, | |
| "tokens_seen": 3801088 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 5.9523809523809524e-05, | |
| "loss": 7.8978, | |
| "theoretical_loss": 8.28564276288293, | |
| "tokens_seen": 3932160 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 6.15079365079365e-05, | |
| "loss": 7.5006, | |
| "theoretical_loss": 8.232194580909036, | |
| "tokens_seen": 4063232 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 6.349206349206349e-05, | |
| "loss": 7.6305, | |
| "theoretical_loss": 8.180908953270682, | |
| "tokens_seen": 4194304 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 6.547619047619048e-05, | |
| "loss": 7.662, | |
| "theoretical_loss": 8.131634790246775, | |
| "tokens_seen": 4325376 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 6.746031746031745e-05, | |
| "loss": 7.7852, | |
| "theoretical_loss": 8.084235737332481, | |
| "tokens_seen": 4456448 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 6.944444444444444e-05, | |
| "loss": 7.5064, | |
| "theoretical_loss": 8.038588354092902, | |
| "tokens_seen": 4587520 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 7.142857142857142e-05, | |
| "loss": 7.5163, | |
| "theoretical_loss": 7.994580562902867, | |
| "tokens_seen": 4718592 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 7.341269841269842e-05, | |
| "loss": 7.4521, | |
| "theoretical_loss": 7.952110321298584, | |
| "tokens_seen": 4849664 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "objective/train/docs_used": 2007, | |
| "objective/train/instantaneous_batch_size": 16, | |
| "objective/train/instantaneous_microbatch_size": 16384, | |
| "objective/train/original_loss": 7.20822286605835, | |
| "objective/train/theoretical_loss": 7.931422353115133, | |
| "objective/train/tokens_used": -15564800, | |
| "theoretical_loss": 7.931422353115133, | |
| "tokens_seen": 4915200 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 7.53968253968254e-05, | |
| "loss": 7.3365, | |
| "theoretical_loss": 7.911084480620269, | |
| "tokens_seen": 4980736 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 7.738095238095239e-05, | |
| "loss": 7.2213, | |
| "theoretical_loss": 7.871417800659003, | |
| "tokens_seen": 5111808 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 7.936507936507937e-05, | |
| "loss": 7.2532, | |
| "theoretical_loss": 7.833032095585231, | |
| "tokens_seen": 5242880 | |
| } | |
| ], | |
| "max_steps": 25177, | |
| "num_train_epochs": 9223372036854775807, | |
| "total_flos": 2675630407680000.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |