| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.044464206313917294, | |
| "eval_steps": 50, | |
| "global_step": 150, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 1e-05, | |
| "loss": 2.3656, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2e-05, | |
| "loss": 2.2802, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2.4974874371859298e-05, | |
| "loss": 2.2725, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2.492462311557789e-05, | |
| "loss": 2.0394, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2.4874371859296484e-05, | |
| "loss": 2.1627, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2.4824120603015078e-05, | |
| "loss": 2.093, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2.477386934673367e-05, | |
| "loss": 1.9071, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2.4723618090452264e-05, | |
| "loss": 1.854, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.4673366834170854e-05, | |
| "loss": 1.9392, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.462311557788945e-05, | |
| "loss": 1.9703, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.457286432160804e-05, | |
| "loss": 1.8765, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.4522613065326637e-05, | |
| "loss": 1.8566, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.4472361809045227e-05, | |
| "loss": 1.716, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.442211055276382e-05, | |
| "loss": 1.7856, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.4371859296482413e-05, | |
| "loss": 1.7823, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.4321608040201007e-05, | |
| "loss": 1.7174, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.42713567839196e-05, | |
| "loss": 1.6417, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.422110552763819e-05, | |
| "loss": 1.6285, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.4170854271356786e-05, | |
| "loss": 1.8137, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.4120603015075376e-05, | |
| "loss": 1.7487, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.4070351758793973e-05, | |
| "loss": 1.7974, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.4020100502512563e-05, | |
| "loss": 1.7068, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.3969849246231156e-05, | |
| "loss": 1.7251, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.391959798994975e-05, | |
| "loss": 1.7919, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.3869346733668342e-05, | |
| "loss": 1.7122, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "eval_loss": 1.7093677520751953, | |
| "eval_runtime": 2707.7474, | |
| "eval_samples_per_second": 5.695, | |
| "eval_steps_per_second": 0.356, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.3819095477386936e-05, | |
| "loss": 1.6579, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.376884422110553e-05, | |
| "loss": 1.6581, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.3718592964824122e-05, | |
| "loss": 1.7084, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.3668341708542715e-05, | |
| "loss": 1.8677, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.361809045226131e-05, | |
| "loss": 1.6996, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.3567839195979902e-05, | |
| "loss": 1.6643, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.351758793969849e-05, | |
| "loss": 1.7271, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.3467336683417088e-05, | |
| "loss": 1.722, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.3417085427135678e-05, | |
| "loss": 1.5764, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.3366834170854275e-05, | |
| "loss": 1.5796, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.3316582914572865e-05, | |
| "loss": 1.6595, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.326633165829146e-05, | |
| "loss": 1.7247, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.321608040201005e-05, | |
| "loss": 1.7143, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.3165829145728644e-05, | |
| "loss": 1.7236, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.3115577889447238e-05, | |
| "loss": 1.6257, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.3065326633165827e-05, | |
| "loss": 1.722, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.3015075376884424e-05, | |
| "loss": 1.7404, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.2964824120603014e-05, | |
| "loss": 1.7458, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.291457286432161e-05, | |
| "loss": 1.5463, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.28643216080402e-05, | |
| "loss": 1.6731, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.2814070351758797e-05, | |
| "loss": 1.5564, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.2763819095477387e-05, | |
| "loss": 1.8338, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.271356783919598e-05, | |
| "loss": 1.6688, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.2663316582914573e-05, | |
| "loss": 1.6844, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.2613065326633167e-05, | |
| "loss": 1.6507, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "eval_loss": 1.6832690238952637, | |
| "eval_runtime": 2705.0606, | |
| "eval_samples_per_second": 5.701, | |
| "eval_steps_per_second": 0.356, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.256281407035176e-05, | |
| "loss": 1.6102, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.2512562814070353e-05, | |
| "loss": 1.8097, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.2462311557788946e-05, | |
| "loss": 1.6539, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.241206030150754e-05, | |
| "loss": 1.6334, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.2361809045226133e-05, | |
| "loss": 1.704, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.2311557788944726e-05, | |
| "loss": 1.6663, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.2261306532663316e-05, | |
| "loss": 1.6988, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.2211055276381913e-05, | |
| "loss": 1.5737, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.2160804020100502e-05, | |
| "loss": 1.6554, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.21105527638191e-05, | |
| "loss": 1.6942, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.206030150753769e-05, | |
| "loss": 1.5815, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.2010050251256282e-05, | |
| "loss": 1.577, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.1959798994974875e-05, | |
| "loss": 1.7271, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.190954773869347e-05, | |
| "loss": 1.6922, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.1859296482412062e-05, | |
| "loss": 1.5486, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.1809045226130652e-05, | |
| "loss": 1.7836, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.175879396984925e-05, | |
| "loss": 1.5733, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.1708542713567838e-05, | |
| "loss": 1.5774, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.1658291457286435e-05, | |
| "loss": 1.6749, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.1608040201005025e-05, | |
| "loss": 1.6127, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.155778894472362e-05, | |
| "loss": 1.7014, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.150753768844221e-05, | |
| "loss": 1.5019, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.1457286432160804e-05, | |
| "loss": 1.6754, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.1407035175879398e-05, | |
| "loss": 1.607, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.135678391959799e-05, | |
| "loss": 1.6015, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "eval_loss": 1.6669741868972778, | |
| "eval_runtime": 2712.7443, | |
| "eval_samples_per_second": 5.685, | |
| "eval_steps_per_second": 0.355, | |
| "step": 150 | |
| } | |
| ], | |
| "logging_steps": 2, | |
| "max_steps": 1000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "total_flos": 1.540494802944e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |