| { | |
| "best_global_step": 3415, | |
| "best_metric": 0.6295754026354319, | |
| "best_model_checkpoint": "./saved_models/checkpoint-3415", | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 3415, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07320644216691069, | |
| "grad_norm": 1.5678623914718628, | |
| "learning_rate": 1.971303074670571e-05, | |
| "loss": 0.6936, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.14641288433382138, | |
| "grad_norm": 1.88246488571167, | |
| "learning_rate": 1.942020497803807e-05, | |
| "loss": 0.6949, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.21961932650073207, | |
| "grad_norm": 1.9367057085037231, | |
| "learning_rate": 1.9127379209370426e-05, | |
| "loss": 0.6881, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.29282576866764276, | |
| "grad_norm": 1.304018497467041, | |
| "learning_rate": 1.8834553440702785e-05, | |
| "loss": 0.6898, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.36603221083455345, | |
| "grad_norm": 3.572559356689453, | |
| "learning_rate": 1.854172767203514e-05, | |
| "loss": 0.6835, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.43923865300146414, | |
| "grad_norm": 2.7547812461853027, | |
| "learning_rate": 1.8248901903367496e-05, | |
| "loss": 0.6758, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.5124450951683748, | |
| "grad_norm": 5.36733341217041, | |
| "learning_rate": 1.7956076134699855e-05, | |
| "loss": 0.6763, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5856515373352855, | |
| "grad_norm": 2.518483877182007, | |
| "learning_rate": 1.766325036603221e-05, | |
| "loss": 0.6794, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6588579795021962, | |
| "grad_norm": 2.1962575912475586, | |
| "learning_rate": 1.737042459736457e-05, | |
| "loss": 0.6845, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7320644216691069, | |
| "grad_norm": 3.3848378658294678, | |
| "learning_rate": 1.7077598828696925e-05, | |
| "loss": 0.6792, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8052708638360175, | |
| "grad_norm": 0.9908949136734009, | |
| "learning_rate": 1.6784773060029284e-05, | |
| "loss": 0.6905, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8784773060029283, | |
| "grad_norm": 1.581642508506775, | |
| "learning_rate": 1.649194729136164e-05, | |
| "loss": 0.6801, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9516837481698389, | |
| "grad_norm": 3.641871213912964, | |
| "learning_rate": 1.6199121522694e-05, | |
| "loss": 0.6825, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.6010248901903368, | |
| "eval_loss": 0.6722473502159119, | |
| "eval_runtime": 3.5824, | |
| "eval_samples_per_second": 762.619, | |
| "eval_steps_per_second": 12.003, | |
| "step": 683 | |
| }, | |
| { | |
| "epoch": 1.0248901903367496, | |
| "grad_norm": 1.8619815111160278, | |
| "learning_rate": 1.5906295754026355e-05, | |
| "loss": 0.6704, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0980966325036603, | |
| "grad_norm": 0.7537197470664978, | |
| "learning_rate": 1.5613469985358714e-05, | |
| "loss": 0.6758, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.171303074670571, | |
| "grad_norm": 0.7405994534492493, | |
| "learning_rate": 1.532064421669107e-05, | |
| "loss": 0.6682, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2445095168374818, | |
| "grad_norm": 1.4860440492630005, | |
| "learning_rate": 1.5027818448023428e-05, | |
| "loss": 0.6664, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3177159590043923, | |
| "grad_norm": 1.5564947128295898, | |
| "learning_rate": 1.4734992679355784e-05, | |
| "loss": 0.6696, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.390922401171303, | |
| "grad_norm": 2.0371503829956055, | |
| "learning_rate": 1.4442166910688143e-05, | |
| "loss": 0.6679, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4641288433382138, | |
| "grad_norm": 1.321709394454956, | |
| "learning_rate": 1.4149341142020499e-05, | |
| "loss": 0.6589, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5373352855051245, | |
| "grad_norm": 2.6580564975738525, | |
| "learning_rate": 1.3856515373352856e-05, | |
| "loss": 0.6577, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.610541727672035, | |
| "grad_norm": 1.3632615804672241, | |
| "learning_rate": 1.3563689604685213e-05, | |
| "loss": 0.6773, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.6837481698389458, | |
| "grad_norm": 1.320080280303955, | |
| "learning_rate": 1.327086383601757e-05, | |
| "loss": 0.6573, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7569546120058566, | |
| "grad_norm": 2.9068989753723145, | |
| "learning_rate": 1.2978038067349928e-05, | |
| "loss": 0.6548, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.830161054172767, | |
| "grad_norm": 0.9685165286064148, | |
| "learning_rate": 1.2685212298682286e-05, | |
| "loss": 0.6589, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.903367496339678, | |
| "grad_norm": 0.8692395687103271, | |
| "learning_rate": 1.2392386530014641e-05, | |
| "loss": 0.6604, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9765739385065886, | |
| "grad_norm": 0.9666163325309753, | |
| "learning_rate": 1.2099560761347e-05, | |
| "loss": 0.6634, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.5647877013177159, | |
| "eval_loss": 0.6470807790756226, | |
| "eval_runtime": 3.57, | |
| "eval_samples_per_second": 765.268, | |
| "eval_steps_per_second": 12.045, | |
| "step": 1366 | |
| }, | |
| { | |
| "epoch": 2.049780380673499, | |
| "grad_norm": 6.759352207183838, | |
| "learning_rate": 1.1806734992679356e-05, | |
| "loss": 0.6592, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.12298682284041, | |
| "grad_norm": 2.8715744018554688, | |
| "learning_rate": 1.1513909224011715e-05, | |
| "loss": 0.6445, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.1961932650073206, | |
| "grad_norm": 4.17264461517334, | |
| "learning_rate": 1.122108345534407e-05, | |
| "loss": 0.6499, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.269399707174231, | |
| "grad_norm": 1.707520842552185, | |
| "learning_rate": 1.092825768667643e-05, | |
| "loss": 0.649, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.342606149341142, | |
| "grad_norm": 1.4020594358444214, | |
| "learning_rate": 1.0635431918008785e-05, | |
| "loss": 0.6468, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4158125915080526, | |
| "grad_norm": 1.690818190574646, | |
| "learning_rate": 1.0342606149341143e-05, | |
| "loss": 0.6524, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.4890190336749636, | |
| "grad_norm": 2.177947521209717, | |
| "learning_rate": 1.00497803806735e-05, | |
| "loss": 0.6443, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.562225475841874, | |
| "grad_norm": 2.009894847869873, | |
| "learning_rate": 9.756954612005857e-06, | |
| "loss": 0.6491, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.6354319180087846, | |
| "grad_norm": 1.707280158996582, | |
| "learning_rate": 9.464128843338215e-06, | |
| "loss": 0.6364, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.7086383601756956, | |
| "grad_norm": 6.970835208892822, | |
| "learning_rate": 9.171303074670572e-06, | |
| "loss": 0.6442, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.781844802342606, | |
| "grad_norm": 1.682925820350647, | |
| "learning_rate": 8.87847730600293e-06, | |
| "loss": 0.6399, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.855051244509517, | |
| "grad_norm": 3.518610954284668, | |
| "learning_rate": 8.585651537335287e-06, | |
| "loss": 0.6423, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.9282576866764276, | |
| "grad_norm": 3.799734115600586, | |
| "learning_rate": 8.292825768667644e-06, | |
| "loss": 0.6385, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.623718887262079, | |
| "eval_loss": 0.6276178359985352, | |
| "eval_runtime": 3.5711, | |
| "eval_samples_per_second": 765.036, | |
| "eval_steps_per_second": 12.041, | |
| "step": 2049 | |
| }, | |
| { | |
| "epoch": 3.001464128843338, | |
| "grad_norm": 2.7766103744506836, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.651, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.074670571010249, | |
| "grad_norm": 4.394078731536865, | |
| "learning_rate": 7.707174231332359e-06, | |
| "loss": 0.6292, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.1478770131771596, | |
| "grad_norm": 1.7268424034118652, | |
| "learning_rate": 7.414348462664715e-06, | |
| "loss": 0.6345, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.22108345534407, | |
| "grad_norm": 1.5889354944229126, | |
| "learning_rate": 7.1215226939970725e-06, | |
| "loss": 0.6218, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.294289897510981, | |
| "grad_norm": 6.7957658767700195, | |
| "learning_rate": 6.82869692532943e-06, | |
| "loss": 0.6317, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.3674963396778916, | |
| "grad_norm": 2.915957450866699, | |
| "learning_rate": 6.535871156661787e-06, | |
| "loss": 0.6147, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.440702781844802, | |
| "grad_norm": 2.6085119247436523, | |
| "learning_rate": 6.2430453879941446e-06, | |
| "loss": 0.6226, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.513909224011713, | |
| "grad_norm": 2.2643730640411377, | |
| "learning_rate": 5.950219619326502e-06, | |
| "loss": 0.616, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.5871156661786237, | |
| "grad_norm": 2.4427967071533203, | |
| "learning_rate": 5.657393850658858e-06, | |
| "loss": 0.6235, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.660322108345534, | |
| "grad_norm": 3.067891836166382, | |
| "learning_rate": 5.364568081991216e-06, | |
| "loss": 0.6098, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.733528550512445, | |
| "grad_norm": 5.014751434326172, | |
| "learning_rate": 5.071742313323573e-06, | |
| "loss": 0.6162, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.8067349926793557, | |
| "grad_norm": 6.996451377868652, | |
| "learning_rate": 4.77891654465593e-06, | |
| "loss": 0.6057, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.8799414348462666, | |
| "grad_norm": 8.236844062805176, | |
| "learning_rate": 4.486090775988287e-06, | |
| "loss": 0.6065, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 3.953147877013177, | |
| "grad_norm": 2.6412973403930664, | |
| "learning_rate": 4.193265007320644e-06, | |
| "loss": 0.6071, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.5867496339677891, | |
| "eval_loss": 0.647341251373291, | |
| "eval_runtime": 3.5811, | |
| "eval_samples_per_second": 762.894, | |
| "eval_steps_per_second": 12.007, | |
| "step": 2732 | |
| }, | |
| { | |
| "epoch": 4.026354319180088, | |
| "grad_norm": 7.569336414337158, | |
| "learning_rate": 3.900439238653002e-06, | |
| "loss": 0.6059, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.099560761346998, | |
| "grad_norm": 4.824645519256592, | |
| "learning_rate": 3.607613469985359e-06, | |
| "loss": 0.6003, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.172767203513909, | |
| "grad_norm": 5.947188854217529, | |
| "learning_rate": 3.314787701317716e-06, | |
| "loss": 0.6082, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.24597364568082, | |
| "grad_norm": 3.865933895111084, | |
| "learning_rate": 3.0219619326500732e-06, | |
| "loss": 0.5993, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.31918008784773, | |
| "grad_norm": 9.159831047058105, | |
| "learning_rate": 2.7291361639824306e-06, | |
| "loss": 0.5923, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.392386530014641, | |
| "grad_norm": 6.4656782150268555, | |
| "learning_rate": 2.436310395314788e-06, | |
| "loss": 0.5715, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.465592972181552, | |
| "grad_norm": 3.221506118774414, | |
| "learning_rate": 2.1434846266471453e-06, | |
| "loss": 0.5921, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.538799414348462, | |
| "grad_norm": 3.973538398742676, | |
| "learning_rate": 1.8506588579795024e-06, | |
| "loss": 0.5866, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.612005856515373, | |
| "grad_norm": 3.1635329723358154, | |
| "learning_rate": 1.5578330893118595e-06, | |
| "loss": 0.5837, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.685212298682284, | |
| "grad_norm": 3.28462815284729, | |
| "learning_rate": 1.2650073206442169e-06, | |
| "loss": 0.5954, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.758418740849194, | |
| "grad_norm": 6.968575954437256, | |
| "learning_rate": 9.72181551976574e-07, | |
| "loss": 0.5951, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.831625183016105, | |
| "grad_norm": 3.1862399578094482, | |
| "learning_rate": 6.793557833089313e-07, | |
| "loss": 0.5928, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 4.904831625183016, | |
| "grad_norm": 9.21243953704834, | |
| "learning_rate": 3.865300146412885e-07, | |
| "loss": 0.5966, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 4.978038067349927, | |
| "grad_norm": 8.415995597839355, | |
| "learning_rate": 9.370424597364569e-08, | |
| "loss": 0.5912, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.6295754026354319, | |
| "eval_loss": 0.6141241192817688, | |
| "eval_runtime": 3.5919, | |
| "eval_samples_per_second": 760.595, | |
| "eval_steps_per_second": 11.971, | |
| "step": 3415 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 3415, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.43750725095936e+16, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |