| { | |
| "best_global_step": 2049, | |
| "best_metric": 0.6017569546120058, | |
| "best_model_checkpoint": "./saved_models/cotext_2cc/checkpoint-2049", | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 3415, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07320644216691069, | |
| "grad_norm": 2.1882169246673584, | |
| "learning_rate": 1.971303074670571e-05, | |
| "loss": 0.7735, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.14641288433382138, | |
| "grad_norm": 1.3151637315750122, | |
| "learning_rate": 1.942020497803807e-05, | |
| "loss": 0.6965, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.21961932650073207, | |
| "grad_norm": 1.8874374628067017, | |
| "learning_rate": 1.9127379209370426e-05, | |
| "loss": 0.6919, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.29282576866764276, | |
| "grad_norm": 3.595515012741089, | |
| "learning_rate": 1.8834553440702785e-05, | |
| "loss": 0.6911, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.36603221083455345, | |
| "grad_norm": 2.7481727600097656, | |
| "learning_rate": 1.854172767203514e-05, | |
| "loss": 0.6904, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.43923865300146414, | |
| "grad_norm": 2.4479894638061523, | |
| "learning_rate": 1.8248901903367496e-05, | |
| "loss": 0.6773, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.5124450951683748, | |
| "grad_norm": 2.0022459030151367, | |
| "learning_rate": 1.7956076134699855e-05, | |
| "loss": 0.6846, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5856515373352855, | |
| "grad_norm": 0.9217310547828674, | |
| "learning_rate": 1.766325036603221e-05, | |
| "loss": 0.6873, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6588579795021962, | |
| "grad_norm": 1.0517982244491577, | |
| "learning_rate": 1.737042459736457e-05, | |
| "loss": 0.6854, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7320644216691069, | |
| "grad_norm": 2.381688117980957, | |
| "learning_rate": 1.7077598828696925e-05, | |
| "loss": 0.6829, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8052708638360175, | |
| "grad_norm": 1.3180112838745117, | |
| "learning_rate": 1.6784773060029284e-05, | |
| "loss": 0.6902, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8784773060029283, | |
| "grad_norm": 1.1734063625335693, | |
| "learning_rate": 1.649194729136164e-05, | |
| "loss": 0.6849, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9516837481698389, | |
| "grad_norm": 1.2252230644226074, | |
| "learning_rate": 1.6199121522694e-05, | |
| "loss": 0.685, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.5666178623718887, | |
| "eval_loss": 0.6766132116317749, | |
| "eval_runtime": 22.7682, | |
| "eval_samples_per_second": 119.992, | |
| "eval_steps_per_second": 1.889, | |
| "step": 683 | |
| }, | |
| { | |
| "epoch": 1.0248901903367496, | |
| "grad_norm": 1.903355360031128, | |
| "learning_rate": 1.5906295754026355e-05, | |
| "loss": 0.6792, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0980966325036603, | |
| "grad_norm": 1.0279943943023682, | |
| "learning_rate": 1.5613469985358714e-05, | |
| "loss": 0.6825, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.171303074670571, | |
| "grad_norm": 4.962799072265625, | |
| "learning_rate": 1.532064421669107e-05, | |
| "loss": 0.6774, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2445095168374818, | |
| "grad_norm": 1.9989827871322632, | |
| "learning_rate": 1.5027818448023428e-05, | |
| "loss": 0.6819, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3177159590043923, | |
| "grad_norm": 1.2916970252990723, | |
| "learning_rate": 1.4734992679355784e-05, | |
| "loss": 0.6742, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.390922401171303, | |
| "grad_norm": 2.063950777053833, | |
| "learning_rate": 1.4442166910688143e-05, | |
| "loss": 0.6742, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4641288433382138, | |
| "grad_norm": 1.099249005317688, | |
| "learning_rate": 1.4149341142020499e-05, | |
| "loss": 0.6783, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5373352855051245, | |
| "grad_norm": 1.780521273612976, | |
| "learning_rate": 1.3856515373352856e-05, | |
| "loss": 0.6798, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.610541727672035, | |
| "grad_norm": 1.3341678380966187, | |
| "learning_rate": 1.3563689604685213e-05, | |
| "loss": 0.6685, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.6837481698389458, | |
| "grad_norm": 1.1097255945205688, | |
| "learning_rate": 1.327086383601757e-05, | |
| "loss": 0.6735, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7569546120058566, | |
| "grad_norm": 1.7918726205825806, | |
| "learning_rate": 1.2978038067349928e-05, | |
| "loss": 0.6661, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.830161054172767, | |
| "grad_norm": 1.6219556331634521, | |
| "learning_rate": 1.2685212298682286e-05, | |
| "loss": 0.6677, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.903367496339678, | |
| "grad_norm": 2.006284236907959, | |
| "learning_rate": 1.2392386530014641e-05, | |
| "loss": 0.6689, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9765739385065886, | |
| "grad_norm": 1.2488888502120972, | |
| "learning_rate": 1.2099560761347e-05, | |
| "loss": 0.6624, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.5647877013177159, | |
| "eval_loss": 0.6730920076370239, | |
| "eval_runtime": 22.7575, | |
| "eval_samples_per_second": 120.048, | |
| "eval_steps_per_second": 1.889, | |
| "step": 1366 | |
| }, | |
| { | |
| "epoch": 2.049780380673499, | |
| "grad_norm": 1.610883355140686, | |
| "learning_rate": 1.1806734992679356e-05, | |
| "loss": 0.6636, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.12298682284041, | |
| "grad_norm": 1.841633915901184, | |
| "learning_rate": 1.1513909224011715e-05, | |
| "loss": 0.6557, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.1961932650073206, | |
| "grad_norm": 2.1673364639282227, | |
| "learning_rate": 1.122108345534407e-05, | |
| "loss": 0.6569, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.269399707174231, | |
| "grad_norm": 1.2526633739471436, | |
| "learning_rate": 1.092825768667643e-05, | |
| "loss": 0.6608, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.342606149341142, | |
| "grad_norm": 1.2852518558502197, | |
| "learning_rate": 1.0635431918008785e-05, | |
| "loss": 0.6575, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4158125915080526, | |
| "grad_norm": 2.687999725341797, | |
| "learning_rate": 1.0342606149341143e-05, | |
| "loss": 0.6595, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.4890190336749636, | |
| "grad_norm": 2.0115835666656494, | |
| "learning_rate": 1.00497803806735e-05, | |
| "loss": 0.6656, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.562225475841874, | |
| "grad_norm": 1.2562048435211182, | |
| "learning_rate": 9.756954612005857e-06, | |
| "loss": 0.6602, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.6354319180087846, | |
| "grad_norm": 3.354336738586426, | |
| "learning_rate": 9.464128843338215e-06, | |
| "loss": 0.6578, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.7086383601756956, | |
| "grad_norm": 5.723195552825928, | |
| "learning_rate": 9.171303074670572e-06, | |
| "loss": 0.6507, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.781844802342606, | |
| "grad_norm": 1.9592162370681763, | |
| "learning_rate": 8.87847730600293e-06, | |
| "loss": 0.6547, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.855051244509517, | |
| "grad_norm": 3.2084081172943115, | |
| "learning_rate": 8.585651537335287e-06, | |
| "loss": 0.6519, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.9282576866764276, | |
| "grad_norm": 2.990320920944214, | |
| "learning_rate": 8.292825768667644e-06, | |
| "loss": 0.651, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.6017569546120058, | |
| "eval_loss": 0.6528917551040649, | |
| "eval_runtime": 22.7498, | |
| "eval_samples_per_second": 120.089, | |
| "eval_steps_per_second": 1.89, | |
| "step": 2049 | |
| }, | |
| { | |
| "epoch": 3.001464128843338, | |
| "grad_norm": 1.819231390953064, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.6551, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.074670571010249, | |
| "grad_norm": 1.5196412801742554, | |
| "learning_rate": 7.707174231332359e-06, | |
| "loss": 0.6417, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.1478770131771596, | |
| "grad_norm": 1.6357721090316772, | |
| "learning_rate": 7.414348462664715e-06, | |
| "loss": 0.6496, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.22108345534407, | |
| "grad_norm": 1.4427732229232788, | |
| "learning_rate": 7.1215226939970725e-06, | |
| "loss": 0.645, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.294289897510981, | |
| "grad_norm": 9.609319686889648, | |
| "learning_rate": 6.82869692532943e-06, | |
| "loss": 0.6469, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.3674963396778916, | |
| "grad_norm": 2.0619852542877197, | |
| "learning_rate": 6.535871156661787e-06, | |
| "loss": 0.6533, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.440702781844802, | |
| "grad_norm": 2.3825881481170654, | |
| "learning_rate": 6.2430453879941446e-06, | |
| "loss": 0.6411, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.513909224011713, | |
| "grad_norm": 2.9434738159179688, | |
| "learning_rate": 5.950219619326502e-06, | |
| "loss": 0.6449, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.5871156661786237, | |
| "grad_norm": 2.3528411388397217, | |
| "learning_rate": 5.657393850658858e-06, | |
| "loss": 0.6527, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.660322108345534, | |
| "grad_norm": 3.008011817932129, | |
| "learning_rate": 5.364568081991216e-06, | |
| "loss": 0.6566, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.733528550512445, | |
| "grad_norm": 2.1544880867004395, | |
| "learning_rate": 5.071742313323573e-06, | |
| "loss": 0.6464, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.8067349926793557, | |
| "grad_norm": 2.4027340412139893, | |
| "learning_rate": 4.77891654465593e-06, | |
| "loss": 0.6311, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.8799414348462666, | |
| "grad_norm": 3.0393288135528564, | |
| "learning_rate": 4.486090775988287e-06, | |
| "loss": 0.6388, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 3.953147877013177, | |
| "grad_norm": 2.6928722858428955, | |
| "learning_rate": 4.193265007320644e-06, | |
| "loss": 0.6414, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.5937042459736457, | |
| "eval_loss": 0.6503124237060547, | |
| "eval_runtime": 22.7539, | |
| "eval_samples_per_second": 120.067, | |
| "eval_steps_per_second": 1.89, | |
| "step": 2732 | |
| }, | |
| { | |
| "epoch": 4.026354319180088, | |
| "grad_norm": 3.9657881259918213, | |
| "learning_rate": 3.900439238653002e-06, | |
| "loss": 0.6399, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.099560761346998, | |
| "grad_norm": 1.924501895904541, | |
| "learning_rate": 3.607613469985359e-06, | |
| "loss": 0.6337, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.172767203513909, | |
| "grad_norm": 3.779348134994507, | |
| "learning_rate": 3.314787701317716e-06, | |
| "loss": 0.6471, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.24597364568082, | |
| "grad_norm": 3.295867443084717, | |
| "learning_rate": 3.0219619326500732e-06, | |
| "loss": 0.6527, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.31918008784773, | |
| "grad_norm": 2.370459794998169, | |
| "learning_rate": 2.7291361639824306e-06, | |
| "loss": 0.6427, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.392386530014641, | |
| "grad_norm": 2.1006791591644287, | |
| "learning_rate": 2.436310395314788e-06, | |
| "loss": 0.6207, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.465592972181552, | |
| "grad_norm": 17.736797332763672, | |
| "learning_rate": 2.1434846266471453e-06, | |
| "loss": 0.6413, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.538799414348462, | |
| "grad_norm": 2.149487018585205, | |
| "learning_rate": 1.8506588579795024e-06, | |
| "loss": 0.6354, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.612005856515373, | |
| "grad_norm": 2.3940281867980957, | |
| "learning_rate": 1.5578330893118595e-06, | |
| "loss": 0.6342, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.685212298682284, | |
| "grad_norm": 2.103952646255493, | |
| "learning_rate": 1.2650073206442169e-06, | |
| "loss": 0.6341, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.758418740849194, | |
| "grad_norm": 3.5245349407196045, | |
| "learning_rate": 9.72181551976574e-07, | |
| "loss": 0.6348, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.831625183016105, | |
| "grad_norm": 2.1685941219329834, | |
| "learning_rate": 6.793557833089313e-07, | |
| "loss": 0.6469, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 4.904831625183016, | |
| "grad_norm": 2.7275354862213135, | |
| "learning_rate": 3.865300146412885e-07, | |
| "loss": 0.6309, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 4.978038067349927, | |
| "grad_norm": 2.66701602935791, | |
| "learning_rate": 9.370424597364569e-08, | |
| "loss": 0.6405, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.5995607613469985, | |
| "eval_loss": 0.6519700288772583, | |
| "eval_runtime": 22.7428, | |
| "eval_samples_per_second": 120.126, | |
| "eval_steps_per_second": 1.891, | |
| "step": 3415 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 3415, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.33697966920192e+16, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |