| { |
| "best_global_step": 13992, |
| "best_metric": 3.7217817306518555, |
| "best_model_checkpoint": "sindhibert_session2/checkpoint-13992", |
| "epoch": 2.748397868735728, |
| "eval_steps": 1272, |
| "global_step": 13992, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.019642988680727773, |
| "grad_norm": 16.122173309326172, |
| "learning_rate": 9.900000000000002e-06, |
| "loss": 36.5051904296875, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.039285977361455546, |
| "grad_norm": 18.11626434326172, |
| "learning_rate": 1.9900000000000003e-05, |
| "loss": 36.368505859375, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.05892896604218332, |
| "grad_norm": 18.927509307861328, |
| "learning_rate": 2.9900000000000002e-05, |
| "loss": 36.45832275390625, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.07857195472291109, |
| "grad_norm": 18.480167388916016, |
| "learning_rate": 3.99e-05, |
| "loss": 36.31460693359375, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.09821494340363886, |
| "grad_norm": 19.010652542114258, |
| "learning_rate": 4.99e-05, |
| "loss": 36.24136962890625, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.11785793208436664, |
| "grad_norm": 19.11362075805664, |
| "learning_rate": 4.966492926284438e-05, |
| "loss": 36.54402587890625, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.1375009207650944, |
| "grad_norm": 19.41926383972168, |
| "learning_rate": 4.93264739727882e-05, |
| "loss": 36.28423095703125, |
| "step": 700 |
| }, |
| { |
| "epoch": 0.15714390944582218, |
| "grad_norm": 20.20358657836914, |
| "learning_rate": 4.898801868273201e-05, |
| "loss": 36.2180615234375, |
| "step": 800 |
| }, |
| { |
| "epoch": 0.17678689812654996, |
| "grad_norm": 18.598957061767578, |
| "learning_rate": 4.864956339267583e-05, |
| "loss": 36.1883984375, |
| "step": 900 |
| }, |
| { |
| "epoch": 0.19642988680727771, |
| "grad_norm": 18.320087432861328, |
| "learning_rate": 4.831110810261965e-05, |
| "loss": 36.0933837890625, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.2160728754880055, |
| "grad_norm": 21.353364944458008, |
| "learning_rate": 4.797265281256346e-05, |
| "loss": 35.95175048828125, |
| "step": 1100 |
| }, |
| { |
| "epoch": 0.23571586416873327, |
| "grad_norm": 19.709365844726562, |
| "learning_rate": 4.763419752250728e-05, |
| "loss": 35.99169921875, |
| "step": 1200 |
| }, |
| { |
| "epoch": 0.24985881601885726, |
| "eval_loss": 4.386976718902588, |
| "eval_runtime": 41.9212, |
| "eval_samples_per_second": 477.086, |
| "eval_steps_per_second": 14.909, |
| "step": 1272 |
| }, |
| { |
| "epoch": 0.255358852849461, |
| "grad_norm": 17.78667449951172, |
| "learning_rate": 4.729574223245109e-05, |
| "loss": 35.86061279296875, |
| "step": 1300 |
| }, |
| { |
| "epoch": 0.2750018415301888, |
| "grad_norm": 21.036474227905273, |
| "learning_rate": 4.695728694239491e-05, |
| "loss": 35.78255859375, |
| "step": 1400 |
| }, |
| { |
| "epoch": 0.2946448302109166, |
| "grad_norm": 19.002052307128906, |
| "learning_rate": 4.661883165233873e-05, |
| "loss": 35.5993408203125, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.31428781889164437, |
| "grad_norm": 19.828800201416016, |
| "learning_rate": 4.6280376362282543e-05, |
| "loss": 35.70918701171875, |
| "step": 1600 |
| }, |
| { |
| "epoch": 0.33393080757237215, |
| "grad_norm": 17.12042236328125, |
| "learning_rate": 4.594192107222636e-05, |
| "loss": 35.58583251953125, |
| "step": 1700 |
| }, |
| { |
| "epoch": 0.3535737962530999, |
| "grad_norm": 19.869712829589844, |
| "learning_rate": 4.560346578217018e-05, |
| "loss": 35.3600537109375, |
| "step": 1800 |
| }, |
| { |
| "epoch": 0.3732167849338277, |
| "grad_norm": 18.914470672607422, |
| "learning_rate": 4.5265010492113994e-05, |
| "loss": 35.34132080078125, |
| "step": 1900 |
| }, |
| { |
| "epoch": 0.39285977361455543, |
| "grad_norm": 18.888940811157227, |
| "learning_rate": 4.4926555202057814e-05, |
| "loss": 35.33590576171875, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.4125027622952832, |
| "grad_norm": 20.27227783203125, |
| "learning_rate": 4.4588099912001626e-05, |
| "loss": 35.22805908203125, |
| "step": 2100 |
| }, |
| { |
| "epoch": 0.432145750976011, |
| "grad_norm": 15.550501823425293, |
| "learning_rate": 4.424964462194544e-05, |
| "loss": 35.256953125, |
| "step": 2200 |
| }, |
| { |
| "epoch": 0.45178873965673877, |
| "grad_norm": 17.671451568603516, |
| "learning_rate": 4.391118933188926e-05, |
| "loss": 35.056826171875, |
| "step": 2300 |
| }, |
| { |
| "epoch": 0.47143172833746655, |
| "grad_norm": 18.74838638305664, |
| "learning_rate": 4.357273404183308e-05, |
| "loss": 34.9271826171875, |
| "step": 2400 |
| }, |
| { |
| "epoch": 0.49107471701819433, |
| "grad_norm": 20.912931442260742, |
| "learning_rate": 4.323427875177689e-05, |
| "loss": 34.793916015625, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.4997176320377145, |
| "eval_loss": 4.270185947418213, |
| "eval_runtime": 41.8695, |
| "eval_samples_per_second": 477.675, |
| "eval_steps_per_second": 14.927, |
| "step": 2544 |
| }, |
| { |
| "epoch": 0.510717705698922, |
| "grad_norm": 18.11321258544922, |
| "learning_rate": 4.289582346172071e-05, |
| "loss": 34.97314453125, |
| "step": 2600 |
| }, |
| { |
| "epoch": 0.5303606943796498, |
| "grad_norm": 19.38896942138672, |
| "learning_rate": 4.255736817166453e-05, |
| "loss": 34.873974609375, |
| "step": 2700 |
| }, |
| { |
| "epoch": 0.5500036830603776, |
| "grad_norm": 19.479278564453125, |
| "learning_rate": 4.221891288160834e-05, |
| "loss": 34.713251953125, |
| "step": 2800 |
| }, |
| { |
| "epoch": 0.5696466717411054, |
| "grad_norm": 19.849210739135742, |
| "learning_rate": 4.188045759155216e-05, |
| "loss": 34.66454833984375, |
| "step": 2900 |
| }, |
| { |
| "epoch": 0.5892896604218332, |
| "grad_norm": 19.59630012512207, |
| "learning_rate": 4.154200230149597e-05, |
| "loss": 34.546259765625, |
| "step": 3000 |
| }, |
| { |
| "epoch": 0.608932649102561, |
| "grad_norm": 18.32138442993164, |
| "learning_rate": 4.120354701143979e-05, |
| "loss": 34.460576171875, |
| "step": 3100 |
| }, |
| { |
| "epoch": 0.6285756377832887, |
| "grad_norm": 17.825464248657227, |
| "learning_rate": 4.086509172138361e-05, |
| "loss": 34.3186962890625, |
| "step": 3200 |
| }, |
| { |
| "epoch": 0.6482186264640165, |
| "grad_norm": 19.180105209350586, |
| "learning_rate": 4.052663643132742e-05, |
| "loss": 34.40132080078125, |
| "step": 3300 |
| }, |
| { |
| "epoch": 0.6678616151447443, |
| "grad_norm": 18.498641967773438, |
| "learning_rate": 4.018818114127124e-05, |
| "loss": 34.2248828125, |
| "step": 3400 |
| }, |
| { |
| "epoch": 0.6875046038254721, |
| "grad_norm": 19.08323097229004, |
| "learning_rate": 3.984972585121506e-05, |
| "loss": 33.97822265625, |
| "step": 3500 |
| }, |
| { |
| "epoch": 0.7071475925061999, |
| "grad_norm": 20.13410758972168, |
| "learning_rate": 3.9511270561158874e-05, |
| "loss": 34.1056494140625, |
| "step": 3600 |
| }, |
| { |
| "epoch": 0.7267905811869276, |
| "grad_norm": 18.82459259033203, |
| "learning_rate": 3.917281527110269e-05, |
| "loss": 34.2245947265625, |
| "step": 3700 |
| }, |
| { |
| "epoch": 0.7464335698676554, |
| "grad_norm": 17.352100372314453, |
| "learning_rate": 3.8834359981046505e-05, |
| "loss": 34.04242919921875, |
| "step": 3800 |
| }, |
| { |
| "epoch": 0.7495764480565718, |
| "eval_loss": 4.178581237792969, |
| "eval_runtime": 42.0642, |
| "eval_samples_per_second": 475.463, |
| "eval_steps_per_second": 14.858, |
| "step": 3816 |
| }, |
| { |
| "epoch": 0.7660765585483832, |
| "grad_norm": 19.103435516357422, |
| "learning_rate": 3.849590469099032e-05, |
| "loss": 34.01751708984375, |
| "step": 3900 |
| }, |
| { |
| "epoch": 0.7857195472291109, |
| "grad_norm": 18.045913696289062, |
| "learning_rate": 3.815744940093414e-05, |
| "loss": 33.90669921875, |
| "step": 4000 |
| }, |
| { |
| "epoch": 0.8053625359098386, |
| "grad_norm": 18.76168441772461, |
| "learning_rate": 3.7818994110877956e-05, |
| "loss": 33.872587890625, |
| "step": 4100 |
| }, |
| { |
| "epoch": 0.8250055245905664, |
| "grad_norm": 16.547574996948242, |
| "learning_rate": 3.748053882082177e-05, |
| "loss": 33.8643359375, |
| "step": 4200 |
| }, |
| { |
| "epoch": 0.8446485132712942, |
| "grad_norm": 18.636455535888672, |
| "learning_rate": 3.714208353076559e-05, |
| "loss": 33.7685595703125, |
| "step": 4300 |
| }, |
| { |
| "epoch": 0.864291501952022, |
| "grad_norm": 18.742900848388672, |
| "learning_rate": 3.68036282407094e-05, |
| "loss": 33.53510009765625, |
| "step": 4400 |
| }, |
| { |
| "epoch": 0.8839344906327498, |
| "grad_norm": 20.976703643798828, |
| "learning_rate": 3.646517295065322e-05, |
| "loss": 33.42730224609375, |
| "step": 4500 |
| }, |
| { |
| "epoch": 0.9035774793134775, |
| "grad_norm": 18.552316665649414, |
| "learning_rate": 3.612671766059704e-05, |
| "loss": 33.53127197265625, |
| "step": 4600 |
| }, |
| { |
| "epoch": 0.9232204679942053, |
| "grad_norm": 21.41478157043457, |
| "learning_rate": 3.578826237054085e-05, |
| "loss": 33.41383544921875, |
| "step": 4700 |
| }, |
| { |
| "epoch": 0.9428634566749331, |
| "grad_norm": 19.785966873168945, |
| "learning_rate": 3.544980708048467e-05, |
| "loss": 33.38103271484375, |
| "step": 4800 |
| }, |
| { |
| "epoch": 0.9625064453556609, |
| "grad_norm": 17.69455337524414, |
| "learning_rate": 3.511135179042849e-05, |
| "loss": 33.4733447265625, |
| "step": 4900 |
| }, |
| { |
| "epoch": 0.9821494340363887, |
| "grad_norm": 20.246673583984375, |
| "learning_rate": 3.47728965003723e-05, |
| "loss": 33.4089208984375, |
| "step": 5000 |
| }, |
| { |
| "epoch": 0.999435264075429, |
| "eval_loss": 4.0612263679504395, |
| "eval_runtime": 42.4302, |
| "eval_samples_per_second": 471.362, |
| "eval_steps_per_second": 14.73, |
| "step": 5088 |
| }, |
| { |
| "epoch": 1.0017678689812655, |
| "grad_norm": 19.691932678222656, |
| "learning_rate": 3.443444121031612e-05, |
| "loss": 33.1838037109375, |
| "step": 5100 |
| }, |
| { |
| "epoch": 1.0214108576619934, |
| "grad_norm": 18.13388442993164, |
| "learning_rate": 3.409598592025994e-05, |
| "loss": 33.21130859375, |
| "step": 5200 |
| }, |
| { |
| "epoch": 1.041053846342721, |
| "grad_norm": 18.41756248474121, |
| "learning_rate": 3.375753063020375e-05, |
| "loss": 33.07806396484375, |
| "step": 5300 |
| }, |
| { |
| "epoch": 1.060696835023449, |
| "grad_norm": 18.85491943359375, |
| "learning_rate": 3.3419075340147566e-05, |
| "loss": 32.95417724609375, |
| "step": 5400 |
| }, |
| { |
| "epoch": 1.0803398237041766, |
| "grad_norm": 20.03909683227539, |
| "learning_rate": 3.3080620050091385e-05, |
| "loss": 32.93142822265625, |
| "step": 5500 |
| }, |
| { |
| "epoch": 1.0999828123849045, |
| "grad_norm": 19.49604606628418, |
| "learning_rate": 3.27421647600352e-05, |
| "loss": 32.9296630859375, |
| "step": 5600 |
| }, |
| { |
| "epoch": 1.1196258010656321, |
| "grad_norm": 21.259004592895508, |
| "learning_rate": 3.2403709469979017e-05, |
| "loss": 32.85189453125, |
| "step": 5700 |
| }, |
| { |
| "epoch": 1.1392687897463598, |
| "grad_norm": 19.597267150878906, |
| "learning_rate": 3.206525417992283e-05, |
| "loss": 32.82923583984375, |
| "step": 5800 |
| }, |
| { |
| "epoch": 1.1589117784270877, |
| "grad_norm": 20.224699020385742, |
| "learning_rate": 3.172679888986665e-05, |
| "loss": 32.7767626953125, |
| "step": 5900 |
| }, |
| { |
| "epoch": 1.1785547671078154, |
| "grad_norm": 18.452495574951172, |
| "learning_rate": 3.138834359981047e-05, |
| "loss": 32.6592626953125, |
| "step": 6000 |
| }, |
| { |
| "epoch": 1.1981977557885433, |
| "grad_norm": 19.971717834472656, |
| "learning_rate": 3.104988830975428e-05, |
| "loss": 32.80323974609375, |
| "step": 6100 |
| }, |
| { |
| "epoch": 1.217840744469271, |
| "grad_norm": 17.584882736206055, |
| "learning_rate": 3.07114330196981e-05, |
| "loss": 32.6551416015625, |
| "step": 6200 |
| }, |
| { |
| "epoch": 1.2374837331499988, |
| "grad_norm": 19.49502944946289, |
| "learning_rate": 3.0372977729641915e-05, |
| "loss": 32.73489990234375, |
| "step": 6300 |
| }, |
| { |
| "epoch": 1.2492695263584355, |
| "eval_loss": 3.9626989364624023, |
| "eval_runtime": 41.8921, |
| "eval_samples_per_second": 477.418, |
| "eval_steps_per_second": 14.919, |
| "step": 6360 |
| }, |
| { |
| "epoch": 1.2571267218307265, |
| "grad_norm": 20.214130401611328, |
| "learning_rate": 3.0034522439585734e-05, |
| "loss": 32.55817138671875, |
| "step": 6400 |
| }, |
| { |
| "epoch": 1.2767697105114544, |
| "grad_norm": 20.668527603149414, |
| "learning_rate": 2.969606714952955e-05, |
| "loss": 32.53630615234375, |
| "step": 6500 |
| }, |
| { |
| "epoch": 1.296412699192182, |
| "grad_norm": 18.479408264160156, |
| "learning_rate": 2.9357611859473366e-05, |
| "loss": 32.464462890625, |
| "step": 6600 |
| }, |
| { |
| "epoch": 1.31605568787291, |
| "grad_norm": 19.027793884277344, |
| "learning_rate": 2.9019156569417182e-05, |
| "loss": 32.48625244140625, |
| "step": 6700 |
| }, |
| { |
| "epoch": 1.3356986765536376, |
| "grad_norm": 19.871105194091797, |
| "learning_rate": 2.8680701279361e-05, |
| "loss": 32.41595947265625, |
| "step": 6800 |
| }, |
| { |
| "epoch": 1.3553416652343655, |
| "grad_norm": 19.916994094848633, |
| "learning_rate": 2.8342245989304817e-05, |
| "loss": 32.11419921875, |
| "step": 6900 |
| }, |
| { |
| "epoch": 1.3749846539150932, |
| "grad_norm": 21.212909698486328, |
| "learning_rate": 2.8003790699248633e-05, |
| "loss": 32.30314453125, |
| "step": 7000 |
| }, |
| { |
| "epoch": 1.3946276425958208, |
| "grad_norm": 25.216768264770508, |
| "learning_rate": 2.7665335409192445e-05, |
| "loss": 32.19344482421875, |
| "step": 7100 |
| }, |
| { |
| "epoch": 1.4142706312765487, |
| "grad_norm": 19.619844436645508, |
| "learning_rate": 2.732688011913626e-05, |
| "loss": 32.30953125, |
| "step": 7200 |
| }, |
| { |
| "epoch": 1.4339136199572766, |
| "grad_norm": 21.061376571655273, |
| "learning_rate": 2.6988424829080077e-05, |
| "loss": 32.3416162109375, |
| "step": 7300 |
| }, |
| { |
| "epoch": 1.4535566086380043, |
| "grad_norm": 18.674562454223633, |
| "learning_rate": 2.6649969539023896e-05, |
| "loss": 32.2662744140625, |
| "step": 7400 |
| }, |
| { |
| "epoch": 1.473199597318732, |
| "grad_norm": 18.776655197143555, |
| "learning_rate": 2.6311514248967712e-05, |
| "loss": 32.07302978515625, |
| "step": 7500 |
| }, |
| { |
| "epoch": 1.4928425859994598, |
| "grad_norm": 19.0480899810791, |
| "learning_rate": 2.5973058958911528e-05, |
| "loss": 32.19434326171875, |
| "step": 7600 |
| }, |
| { |
| "epoch": 1.4991283423772928, |
| "eval_loss": 3.9200026988983154, |
| "eval_runtime": 42.1732, |
| "eval_samples_per_second": 474.235, |
| "eval_steps_per_second": 14.82, |
| "step": 7632 |
| }, |
| { |
| "epoch": 1.5124855746801877, |
| "grad_norm": 18.192241668701172, |
| "learning_rate": 2.5634603668855344e-05, |
| "loss": 32.0382470703125, |
| "step": 7700 |
| }, |
| { |
| "epoch": 1.5321285633609154, |
| "grad_norm": 21.64850425720215, |
| "learning_rate": 2.5296148378799163e-05, |
| "loss": 32.0071484375, |
| "step": 7800 |
| }, |
| { |
| "epoch": 1.551771552041643, |
| "grad_norm": 21.07256507873535, |
| "learning_rate": 2.495769308874298e-05, |
| "loss": 32.02736328125, |
| "step": 7900 |
| }, |
| { |
| "epoch": 1.5714145407223707, |
| "grad_norm": 18.811485290527344, |
| "learning_rate": 2.4619237798686794e-05, |
| "loss": 32.18232666015625, |
| "step": 8000 |
| }, |
| { |
| "epoch": 1.5910575294030986, |
| "grad_norm": 20.226411819458008, |
| "learning_rate": 2.428078250863061e-05, |
| "loss": 31.81751220703125, |
| "step": 8100 |
| }, |
| { |
| "epoch": 1.6107005180838265, |
| "grad_norm": 21.44918441772461, |
| "learning_rate": 2.394232721857443e-05, |
| "loss": 31.89043212890625, |
| "step": 8200 |
| }, |
| { |
| "epoch": 1.6303435067645542, |
| "grad_norm": 19.660367965698242, |
| "learning_rate": 2.3603871928518245e-05, |
| "loss": 31.95683349609375, |
| "step": 8300 |
| }, |
| { |
| "epoch": 1.6499864954452819, |
| "grad_norm": 19.144596099853516, |
| "learning_rate": 2.3265416638462058e-05, |
| "loss": 31.867197265625, |
| "step": 8400 |
| }, |
| { |
| "epoch": 1.6696294841260098, |
| "grad_norm": 18.604026794433594, |
| "learning_rate": 2.2926961348405877e-05, |
| "loss": 31.78265380859375, |
| "step": 8500 |
| }, |
| { |
| "epoch": 1.6892724728067376, |
| "grad_norm": 19.978652954101562, |
| "learning_rate": 2.2588506058349693e-05, |
| "loss": 31.79925048828125, |
| "step": 8600 |
| }, |
| { |
| "epoch": 1.7089154614874653, |
| "grad_norm": 18.18141746520996, |
| "learning_rate": 2.225005076829351e-05, |
| "loss": 31.853974609375, |
| "step": 8700 |
| }, |
| { |
| "epoch": 1.728558450168193, |
| "grad_norm": 17.99820899963379, |
| "learning_rate": 2.1911595478237325e-05, |
| "loss": 31.75265625, |
| "step": 8800 |
| }, |
| { |
| "epoch": 1.7482014388489209, |
| "grad_norm": 20.680606842041016, |
| "learning_rate": 2.1573140188181144e-05, |
| "loss": 31.6561328125, |
| "step": 8900 |
| }, |
| { |
| "epoch": 1.74898715839615, |
| "eval_loss": 3.863434076309204, |
| "eval_runtime": 42.0055, |
| "eval_samples_per_second": 476.128, |
| "eval_steps_per_second": 14.879, |
| "step": 8904 |
| }, |
| { |
| "epoch": 1.7678444275296488, |
| "grad_norm": 20.50802993774414, |
| "learning_rate": 2.123468489812496e-05, |
| "loss": 31.6072265625, |
| "step": 9000 |
| }, |
| { |
| "epoch": 1.7874874162103764, |
| "grad_norm": 21.482328414916992, |
| "learning_rate": 2.0896229608068775e-05, |
| "loss": 31.7250537109375, |
| "step": 9100 |
| }, |
| { |
| "epoch": 1.807130404891104, |
| "grad_norm": 19.20509910583496, |
| "learning_rate": 2.055777431801259e-05, |
| "loss": 31.58796875, |
| "step": 9200 |
| }, |
| { |
| "epoch": 1.826773393571832, |
| "grad_norm": 21.03694725036621, |
| "learning_rate": 2.0219319027956407e-05, |
| "loss": 31.68398193359375, |
| "step": 9300 |
| }, |
| { |
| "epoch": 1.8464163822525599, |
| "grad_norm": 18.272459030151367, |
| "learning_rate": 1.9880863737900223e-05, |
| "loss": 31.53086181640625, |
| "step": 9400 |
| }, |
| { |
| "epoch": 1.8660593709332876, |
| "grad_norm": 19.046916961669922, |
| "learning_rate": 1.9542408447844042e-05, |
| "loss": 31.525322265625, |
| "step": 9500 |
| }, |
| { |
| "epoch": 1.8857023596140152, |
| "grad_norm": 21.118305206298828, |
| "learning_rate": 1.9203953157787858e-05, |
| "loss": 31.52841552734375, |
| "step": 9600 |
| }, |
| { |
| "epoch": 1.905345348294743, |
| "grad_norm": 18.861080169677734, |
| "learning_rate": 1.8865497867731674e-05, |
| "loss": 31.4529345703125, |
| "step": 9700 |
| }, |
| { |
| "epoch": 1.9249883369754708, |
| "grad_norm": 20.4729061126709, |
| "learning_rate": 1.852704257767549e-05, |
| "loss": 31.35305419921875, |
| "step": 9800 |
| }, |
| { |
| "epoch": 1.9446313256561987, |
| "grad_norm": 17.702392578125, |
| "learning_rate": 1.818858728761931e-05, |
| "loss": 31.5552734375, |
| "step": 9900 |
| }, |
| { |
| "epoch": 1.9642743143369263, |
| "grad_norm": 21.927942276000977, |
| "learning_rate": 1.785013199756312e-05, |
| "loss": 31.42951416015625, |
| "step": 10000 |
| }, |
| { |
| "epoch": 1.983917303017654, |
| "grad_norm": 19.895252227783203, |
| "learning_rate": 1.7511676707506937e-05, |
| "loss": 31.416962890625, |
| "step": 10100 |
| }, |
| { |
| "epoch": 1.998845974415007, |
| "eval_loss": 3.819389820098877, |
| "eval_runtime": 41.9176, |
| "eval_samples_per_second": 477.126, |
| "eval_steps_per_second": 14.91, |
| "step": 10176 |
| }, |
| { |
| "epoch": 2.003535737962531, |
| "grad_norm": 20.209577560424805, |
| "learning_rate": 1.7173221417450756e-05, |
| "loss": 31.25757568359375, |
| "step": 10200 |
| }, |
| { |
| "epoch": 2.0231787266432586, |
| "grad_norm": 19.49869155883789, |
| "learning_rate": 1.6834766127394572e-05, |
| "loss": 31.14395263671875, |
| "step": 10300 |
| }, |
| { |
| "epoch": 2.0428217153239867, |
| "grad_norm": 20.60426139831543, |
| "learning_rate": 1.6496310837338388e-05, |
| "loss": 31.38017333984375, |
| "step": 10400 |
| }, |
| { |
| "epoch": 2.0624647040047144, |
| "grad_norm": 19.177818298339844, |
| "learning_rate": 1.6157855547282204e-05, |
| "loss": 31.20944091796875, |
| "step": 10500 |
| }, |
| { |
| "epoch": 2.082107692685442, |
| "grad_norm": 20.949337005615234, |
| "learning_rate": 1.5819400257226023e-05, |
| "loss": 31.2146240234375, |
| "step": 10600 |
| }, |
| { |
| "epoch": 2.1017506813661697, |
| "grad_norm": 19.25591468811035, |
| "learning_rate": 1.548094496716984e-05, |
| "loss": 31.202607421875, |
| "step": 10700 |
| }, |
| { |
| "epoch": 2.121393670046898, |
| "grad_norm": 18.960092544555664, |
| "learning_rate": 1.5142489677113653e-05, |
| "loss": 31.14611572265625, |
| "step": 10800 |
| }, |
| { |
| "epoch": 2.1410366587276255, |
| "grad_norm": 18.479068756103516, |
| "learning_rate": 1.4804034387057469e-05, |
| "loss": 31.26153564453125, |
| "step": 10900 |
| }, |
| { |
| "epoch": 2.160679647408353, |
| "grad_norm": 21.587387084960938, |
| "learning_rate": 1.4465579097001287e-05, |
| "loss": 31.1222998046875, |
| "step": 11000 |
| }, |
| { |
| "epoch": 2.180322636089081, |
| "grad_norm": 17.947052001953125, |
| "learning_rate": 1.4127123806945102e-05, |
| "loss": 31.08917236328125, |
| "step": 11100 |
| }, |
| { |
| "epoch": 2.199965624769809, |
| "grad_norm": 19.169307708740234, |
| "learning_rate": 1.378866851688892e-05, |
| "loss": 31.0661474609375, |
| "step": 11200 |
| }, |
| { |
| "epoch": 2.2196086134505366, |
| "grad_norm": 16.882522583007812, |
| "learning_rate": 1.3450213226832736e-05, |
| "loss": 30.9886328125, |
| "step": 11300 |
| }, |
| { |
| "epoch": 2.2392516021312643, |
| "grad_norm": 19.624177932739258, |
| "learning_rate": 1.3111757936776553e-05, |
| "loss": 30.93468505859375, |
| "step": 11400 |
| }, |
| { |
| "epoch": 2.2486802366980134, |
| "eval_loss": 3.786958694458008, |
| "eval_runtime": 41.8524, |
| "eval_samples_per_second": 477.869, |
| "eval_steps_per_second": 14.933, |
| "step": 11448 |
| }, |
| { |
| "epoch": 2.258894590811992, |
| "grad_norm": 20.477542877197266, |
| "learning_rate": 1.2773302646720369e-05, |
| "loss": 31.00721923828125, |
| "step": 11500 |
| }, |
| { |
| "epoch": 2.2785375794927196, |
| "grad_norm": 19.928098678588867, |
| "learning_rate": 1.2434847356664185e-05, |
| "loss": 30.95269775390625, |
| "step": 11600 |
| }, |
| { |
| "epoch": 2.2981805681734477, |
| "grad_norm": 19.002788543701172, |
| "learning_rate": 1.2096392066608003e-05, |
| "loss": 30.908701171875, |
| "step": 11700 |
| }, |
| { |
| "epoch": 2.3178235568541754, |
| "grad_norm": 20.50242805480957, |
| "learning_rate": 1.1757936776551818e-05, |
| "loss": 30.96546142578125, |
| "step": 11800 |
| }, |
| { |
| "epoch": 2.337466545534903, |
| "grad_norm": 20.48063850402832, |
| "learning_rate": 1.1419481486495634e-05, |
| "loss": 30.9563623046875, |
| "step": 11900 |
| }, |
| { |
| "epoch": 2.3571095342156307, |
| "grad_norm": 19.522266387939453, |
| "learning_rate": 1.108102619643945e-05, |
| "loss": 30.85960205078125, |
| "step": 12000 |
| }, |
| { |
| "epoch": 2.376752522896359, |
| "grad_norm": 21.33004379272461, |
| "learning_rate": 1.0742570906383268e-05, |
| "loss": 30.829111328125, |
| "step": 12100 |
| }, |
| { |
| "epoch": 2.3963955115770865, |
| "grad_norm": 20.311534881591797, |
| "learning_rate": 1.0404115616327083e-05, |
| "loss": 30.93859619140625, |
| "step": 12200 |
| }, |
| { |
| "epoch": 2.416038500257814, |
| "grad_norm": 20.128795623779297, |
| "learning_rate": 1.00656603262709e-05, |
| "loss": 30.750810546875, |
| "step": 12300 |
| }, |
| { |
| "epoch": 2.435681488938542, |
| "grad_norm": 22.28921890258789, |
| "learning_rate": 9.727205036214717e-06, |
| "loss": 30.714560546875, |
| "step": 12400 |
| }, |
| { |
| "epoch": 2.4553244776192695, |
| "grad_norm": 24.13454818725586, |
| "learning_rate": 9.388749746158533e-06, |
| "loss": 30.87623779296875, |
| "step": 12500 |
| }, |
| { |
| "epoch": 2.4749674662999976, |
| "grad_norm": 20.58381462097168, |
| "learning_rate": 9.05029445610235e-06, |
| "loss": 30.60492431640625, |
| "step": 12600 |
| }, |
| { |
| "epoch": 2.4946104549807253, |
| "grad_norm": 20.045475006103516, |
| "learning_rate": 8.711839166046164e-06, |
| "loss": 30.8008154296875, |
| "step": 12700 |
| }, |
| { |
| "epoch": 2.498539052716871, |
| "eval_loss": 3.7586019039154053, |
| "eval_runtime": 42.0034, |
| "eval_samples_per_second": 476.152, |
| "eval_steps_per_second": 14.88, |
| "step": 12720 |
| }, |
| { |
| "epoch": 2.514253443661453, |
| "grad_norm": 19.53034210205078, |
| "learning_rate": 8.373383875989982e-06, |
| "loss": 30.7534326171875, |
| "step": 12800 |
| }, |
| { |
| "epoch": 2.533896432342181, |
| "grad_norm": 20.510520935058594, |
| "learning_rate": 8.0349285859338e-06, |
| "loss": 30.7755859375, |
| "step": 12900 |
| }, |
| { |
| "epoch": 2.5535394210229088, |
| "grad_norm": 20.725147247314453, |
| "learning_rate": 7.696473295877615e-06, |
| "loss": 30.9005029296875, |
| "step": 13000 |
| }, |
| { |
| "epoch": 2.5731824097036364, |
| "grad_norm": 20.11240577697754, |
| "learning_rate": 7.358018005821431e-06, |
| "loss": 30.81609130859375, |
| "step": 13100 |
| }, |
| { |
| "epoch": 2.592825398384364, |
| "grad_norm": 19.01041603088379, |
| "learning_rate": 7.019562715765248e-06, |
| "loss": 30.70421142578125, |
| "step": 13200 |
| }, |
| { |
| "epoch": 2.6124683870650918, |
| "grad_norm": 20.232532501220703, |
| "learning_rate": 6.681107425709064e-06, |
| "loss": 30.7105322265625, |
| "step": 13300 |
| }, |
| { |
| "epoch": 2.63211137574582, |
| "grad_norm": 21.33913803100586, |
| "learning_rate": 6.342652135652881e-06, |
| "loss": 30.76764892578125, |
| "step": 13400 |
| }, |
| { |
| "epoch": 2.6517543644265475, |
| "grad_norm": 19.718833923339844, |
| "learning_rate": 6.004196845596697e-06, |
| "loss": 30.75705078125, |
| "step": 13500 |
| }, |
| { |
| "epoch": 2.671397353107275, |
| "grad_norm": 20.983705520629883, |
| "learning_rate": 5.665741555540514e-06, |
| "loss": 30.66492431640625, |
| "step": 13600 |
| }, |
| { |
| "epoch": 2.691040341788003, |
| "grad_norm": 18.726970672607422, |
| "learning_rate": 5.3272862654843295e-06, |
| "loss": 30.72262451171875, |
| "step": 13700 |
| }, |
| { |
| "epoch": 2.710683330468731, |
| "grad_norm": 21.197751998901367, |
| "learning_rate": 4.988830975428146e-06, |
| "loss": 30.6771728515625, |
| "step": 13800 |
| }, |
| { |
| "epoch": 2.7303263191494587, |
| "grad_norm": 21.318998336791992, |
| "learning_rate": 4.650375685371963e-06, |
| "loss": 30.52608642578125, |
| "step": 13900 |
| }, |
| { |
| "epoch": 2.748397868735728, |
| "eval_loss": 3.7217817306518555, |
| "eval_runtime": 41.8977, |
| "eval_samples_per_second": 477.353, |
| "eval_steps_per_second": 14.917, |
| "step": 13992 |
| } |
| ], |
| "logging_steps": 100, |
| "max_steps": 15273, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 1272, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 9.427785384950231e+17, |
| "train_batch_size": 32, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|