narySt's picture
Добавлены веса модели
dbcb78f verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.875,
"eval_steps": 500,
"global_step": 875,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005,
"grad_norm": 4.4095964431762695,
"learning_rate": 4.9800000000000004e-05,
"loss": 5.4329,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 2.658348798751831,
"learning_rate": 4.9550000000000005e-05,
"loss": 4.6734,
"step": 10
},
{
"epoch": 0.015,
"grad_norm": 3.4242470264434814,
"learning_rate": 4.93e-05,
"loss": 4.8327,
"step": 15
},
{
"epoch": 0.02,
"grad_norm": 4.363213539123535,
"learning_rate": 4.905e-05,
"loss": 4.4238,
"step": 20
},
{
"epoch": 0.025,
"grad_norm": 2.531619071960449,
"learning_rate": 4.88e-05,
"loss": 3.9284,
"step": 25
},
{
"epoch": 0.03,
"grad_norm": 4.686695098876953,
"learning_rate": 4.855e-05,
"loss": 4.6794,
"step": 30
},
{
"epoch": 0.035,
"grad_norm": 4.860099792480469,
"learning_rate": 4.83e-05,
"loss": 5.0624,
"step": 35
},
{
"epoch": 0.04,
"grad_norm": 8.296855926513672,
"learning_rate": 4.805e-05,
"loss": 4.8011,
"step": 40
},
{
"epoch": 0.045,
"grad_norm": 4.80878210067749,
"learning_rate": 4.78e-05,
"loss": 5.495,
"step": 45
},
{
"epoch": 0.05,
"grad_norm": 4.766687393188477,
"learning_rate": 4.755e-05,
"loss": 4.6778,
"step": 50
},
{
"epoch": 0.055,
"grad_norm": 2.7137186527252197,
"learning_rate": 4.73e-05,
"loss": 4.5044,
"step": 55
},
{
"epoch": 0.06,
"grad_norm": 9.891243934631348,
"learning_rate": 4.705e-05,
"loss": 4.4744,
"step": 60
},
{
"epoch": 0.065,
"grad_norm": 3.6516237258911133,
"learning_rate": 4.6800000000000006e-05,
"loss": 4.6576,
"step": 65
},
{
"epoch": 0.07,
"grad_norm": 5.687813758850098,
"learning_rate": 4.655000000000001e-05,
"loss": 5.1204,
"step": 70
},
{
"epoch": 0.075,
"grad_norm": 4.273693561553955,
"learning_rate": 4.630000000000001e-05,
"loss": 5.361,
"step": 75
},
{
"epoch": 0.08,
"grad_norm": 6.802962779998779,
"learning_rate": 4.605e-05,
"loss": 5.5483,
"step": 80
},
{
"epoch": 0.085,
"grad_norm": 2.7016360759735107,
"learning_rate": 4.58e-05,
"loss": 4.5909,
"step": 85
},
{
"epoch": 0.09,
"grad_norm": 8.201666831970215,
"learning_rate": 4.555e-05,
"loss": 4.2807,
"step": 90
},
{
"epoch": 0.095,
"grad_norm": 3.8271970748901367,
"learning_rate": 4.53e-05,
"loss": 5.5645,
"step": 95
},
{
"epoch": 0.1,
"grad_norm": 3.17282772064209,
"learning_rate": 4.5050000000000004e-05,
"loss": 4.5845,
"step": 100
},
{
"epoch": 0.105,
"grad_norm": 2.9256932735443115,
"learning_rate": 4.4800000000000005e-05,
"loss": 4.6003,
"step": 105
},
{
"epoch": 0.11,
"grad_norm": 10.4873046875,
"learning_rate": 4.4550000000000005e-05,
"loss": 4.6453,
"step": 110
},
{
"epoch": 0.115,
"grad_norm": 2.9158883094787598,
"learning_rate": 4.43e-05,
"loss": 4.8935,
"step": 115
},
{
"epoch": 0.12,
"grad_norm": 3.5588793754577637,
"learning_rate": 4.405e-05,
"loss": 3.9582,
"step": 120
},
{
"epoch": 0.125,
"grad_norm": 2.7040491104125977,
"learning_rate": 4.38e-05,
"loss": 4.6688,
"step": 125
},
{
"epoch": 0.13,
"grad_norm": 3.1420748233795166,
"learning_rate": 4.355e-05,
"loss": 4.5591,
"step": 130
},
{
"epoch": 0.135,
"grad_norm": 2.023017168045044,
"learning_rate": 4.33e-05,
"loss": 4.5944,
"step": 135
},
{
"epoch": 0.14,
"grad_norm": 2.902434825897217,
"learning_rate": 4.305e-05,
"loss": 5.4079,
"step": 140
},
{
"epoch": 0.145,
"grad_norm": 2.4470090866088867,
"learning_rate": 4.2800000000000004e-05,
"loss": 4.9259,
"step": 145
},
{
"epoch": 0.15,
"grad_norm": 4.8970046043396,
"learning_rate": 4.2550000000000004e-05,
"loss": 4.6465,
"step": 150
},
{
"epoch": 0.155,
"grad_norm": 2.197767734527588,
"learning_rate": 4.23e-05,
"loss": 4.6841,
"step": 155
},
{
"epoch": 0.16,
"grad_norm": 4.577955722808838,
"learning_rate": 4.205e-05,
"loss": 4.1775,
"step": 160
},
{
"epoch": 0.165,
"grad_norm": 2.3383991718292236,
"learning_rate": 4.18e-05,
"loss": 4.0309,
"step": 165
},
{
"epoch": 0.17,
"grad_norm": 3.794187307357788,
"learning_rate": 4.155e-05,
"loss": 4.5336,
"step": 170
},
{
"epoch": 0.175,
"grad_norm": 4.427373886108398,
"learning_rate": 4.13e-05,
"loss": 4.241,
"step": 175
},
{
"epoch": 0.18,
"grad_norm": 2.8116564750671387,
"learning_rate": 4.105e-05,
"loss": 4.7562,
"step": 180
},
{
"epoch": 0.185,
"grad_norm": 5.841445446014404,
"learning_rate": 4.08e-05,
"loss": 3.7034,
"step": 185
},
{
"epoch": 0.19,
"grad_norm": 20.96999740600586,
"learning_rate": 4.055e-05,
"loss": 5.1727,
"step": 190
},
{
"epoch": 0.195,
"grad_norm": 11.743700981140137,
"learning_rate": 4.0300000000000004e-05,
"loss": 5.1553,
"step": 195
},
{
"epoch": 0.2,
"grad_norm": 10.230498313903809,
"learning_rate": 4.0050000000000004e-05,
"loss": 4.8657,
"step": 200
},
{
"epoch": 0.205,
"grad_norm": 5.706521987915039,
"learning_rate": 3.9800000000000005e-05,
"loss": 4.3638,
"step": 205
},
{
"epoch": 0.21,
"grad_norm": 5.781250476837158,
"learning_rate": 3.9550000000000006e-05,
"loss": 4.3452,
"step": 210
},
{
"epoch": 0.215,
"grad_norm": 2.371096611022949,
"learning_rate": 3.9300000000000007e-05,
"loss": 4.6681,
"step": 215
},
{
"epoch": 0.22,
"grad_norm": 4.352181911468506,
"learning_rate": 3.905e-05,
"loss": 4.2025,
"step": 220
},
{
"epoch": 0.225,
"grad_norm": 4.531359672546387,
"learning_rate": 3.88e-05,
"loss": 4.5391,
"step": 225
},
{
"epoch": 0.23,
"grad_norm": 6.948793411254883,
"learning_rate": 3.855e-05,
"loss": 4.0233,
"step": 230
},
{
"epoch": 0.235,
"grad_norm": 4.765657424926758,
"learning_rate": 3.83e-05,
"loss": 4.1225,
"step": 235
},
{
"epoch": 0.24,
"grad_norm": 3.838766574859619,
"learning_rate": 3.805e-05,
"loss": 4.9202,
"step": 240
},
{
"epoch": 0.245,
"grad_norm": 6.471155643463135,
"learning_rate": 3.7800000000000004e-05,
"loss": 4.7561,
"step": 245
},
{
"epoch": 0.25,
"grad_norm": 5.748368740081787,
"learning_rate": 3.7550000000000005e-05,
"loss": 3.7783,
"step": 250
},
{
"epoch": 0.255,
"grad_norm": 2.663558006286621,
"learning_rate": 3.73e-05,
"loss": 4.1553,
"step": 255
},
{
"epoch": 0.26,
"grad_norm": 3.4103472232818604,
"learning_rate": 3.705e-05,
"loss": 4.9314,
"step": 260
},
{
"epoch": 0.265,
"grad_norm": 6.4364824295043945,
"learning_rate": 3.68e-05,
"loss": 4.4708,
"step": 265
},
{
"epoch": 0.27,
"grad_norm": 4.145668983459473,
"learning_rate": 3.655e-05,
"loss": 4.5879,
"step": 270
},
{
"epoch": 0.275,
"grad_norm": 3.4401297569274902,
"learning_rate": 3.63e-05,
"loss": 4.7668,
"step": 275
},
{
"epoch": 0.28,
"grad_norm": 9.18470287322998,
"learning_rate": 3.605e-05,
"loss": 4.619,
"step": 280
},
{
"epoch": 0.285,
"grad_norm": 4.180783748626709,
"learning_rate": 3.58e-05,
"loss": 3.8878,
"step": 285
},
{
"epoch": 0.29,
"grad_norm": 7.336719036102295,
"learning_rate": 3.555e-05,
"loss": 4.5864,
"step": 290
},
{
"epoch": 0.295,
"grad_norm": 4.498530864715576,
"learning_rate": 3.53e-05,
"loss": 3.8987,
"step": 295
},
{
"epoch": 0.3,
"grad_norm": 3.3154962062835693,
"learning_rate": 3.505e-05,
"loss": 4.5049,
"step": 300
},
{
"epoch": 0.305,
"grad_norm": 5.832047462463379,
"learning_rate": 3.48e-05,
"loss": 5.0849,
"step": 305
},
{
"epoch": 0.31,
"grad_norm": 17.935897827148438,
"learning_rate": 3.455e-05,
"loss": 4.8395,
"step": 310
},
{
"epoch": 0.315,
"grad_norm": 8.821608543395996,
"learning_rate": 3.430000000000001e-05,
"loss": 4.9111,
"step": 315
},
{
"epoch": 0.32,
"grad_norm": 4.014020919799805,
"learning_rate": 3.405e-05,
"loss": 3.6753,
"step": 320
},
{
"epoch": 0.325,
"grad_norm": 7.319735050201416,
"learning_rate": 3.38e-05,
"loss": 4.6448,
"step": 325
},
{
"epoch": 0.33,
"grad_norm": 3.252227783203125,
"learning_rate": 3.355e-05,
"loss": 4.1821,
"step": 330
},
{
"epoch": 0.335,
"grad_norm": 5.4611616134643555,
"learning_rate": 3.33e-05,
"loss": 4.6576,
"step": 335
},
{
"epoch": 0.34,
"grad_norm": 5.558750152587891,
"learning_rate": 3.3050000000000004e-05,
"loss": 3.8608,
"step": 340
},
{
"epoch": 0.345,
"grad_norm": 7.037349700927734,
"learning_rate": 3.2800000000000004e-05,
"loss": 4.5832,
"step": 345
},
{
"epoch": 0.35,
"grad_norm": 8.361868858337402,
"learning_rate": 3.2550000000000005e-05,
"loss": 4.7027,
"step": 350
},
{
"epoch": 0.355,
"grad_norm": 3.756870746612549,
"learning_rate": 3.2300000000000006e-05,
"loss": 3.964,
"step": 355
},
{
"epoch": 0.36,
"grad_norm": 6.4203667640686035,
"learning_rate": 3.205e-05,
"loss": 4.6799,
"step": 360
},
{
"epoch": 0.365,
"grad_norm": 6.658571720123291,
"learning_rate": 3.18e-05,
"loss": 4.9203,
"step": 365
},
{
"epoch": 0.37,
"grad_norm": 3.5544397830963135,
"learning_rate": 3.155e-05,
"loss": 3.6623,
"step": 370
},
{
"epoch": 0.375,
"grad_norm": 6.467970848083496,
"learning_rate": 3.13e-05,
"loss": 4.5927,
"step": 375
},
{
"epoch": 0.38,
"grad_norm": 5.445802211761475,
"learning_rate": 3.105e-05,
"loss": 4.2229,
"step": 380
},
{
"epoch": 0.385,
"grad_norm": 4.658431529998779,
"learning_rate": 3.08e-05,
"loss": 3.9,
"step": 385
},
{
"epoch": 0.39,
"grad_norm": 1.9451472759246826,
"learning_rate": 3.0550000000000004e-05,
"loss": 4.5191,
"step": 390
},
{
"epoch": 0.395,
"grad_norm": 8.094263076782227,
"learning_rate": 3.03e-05,
"loss": 4.6497,
"step": 395
},
{
"epoch": 0.4,
"grad_norm": 4.177872657775879,
"learning_rate": 3.0050000000000002e-05,
"loss": 4.2222,
"step": 400
},
{
"epoch": 0.405,
"grad_norm": 5.344555377960205,
"learning_rate": 2.98e-05,
"loss": 4.0168,
"step": 405
},
{
"epoch": 0.41,
"grad_norm": 8.562535285949707,
"learning_rate": 2.955e-05,
"loss": 4.2704,
"step": 410
},
{
"epoch": 0.415,
"grad_norm": 4.839510917663574,
"learning_rate": 2.93e-05,
"loss": 3.6407,
"step": 415
},
{
"epoch": 0.42,
"grad_norm": 6.950375080108643,
"learning_rate": 2.9049999999999998e-05,
"loss": 4.0565,
"step": 420
},
{
"epoch": 0.425,
"grad_norm": 6.146759510040283,
"learning_rate": 2.88e-05,
"loss": 4.1459,
"step": 425
},
{
"epoch": 0.43,
"grad_norm": 4.605158805847168,
"learning_rate": 2.855e-05,
"loss": 4.5043,
"step": 430
},
{
"epoch": 0.435,
"grad_norm": 8.02145004272461,
"learning_rate": 2.83e-05,
"loss": 3.9532,
"step": 435
},
{
"epoch": 0.44,
"grad_norm": 15.021471977233887,
"learning_rate": 2.8050000000000004e-05,
"loss": 4.4141,
"step": 440
},
{
"epoch": 0.445,
"grad_norm": 8.20073127746582,
"learning_rate": 2.7800000000000005e-05,
"loss": 4.345,
"step": 445
},
{
"epoch": 0.45,
"grad_norm": 4.261655807495117,
"learning_rate": 2.7550000000000002e-05,
"loss": 4.2696,
"step": 450
},
{
"epoch": 0.455,
"grad_norm": 6.736018180847168,
"learning_rate": 2.7300000000000003e-05,
"loss": 4.2742,
"step": 455
},
{
"epoch": 0.46,
"grad_norm": 4.796390533447266,
"learning_rate": 2.7050000000000004e-05,
"loss": 4.1937,
"step": 460
},
{
"epoch": 0.465,
"grad_norm": 12.736322402954102,
"learning_rate": 2.6800000000000004e-05,
"loss": 4.4847,
"step": 465
},
{
"epoch": 0.47,
"grad_norm": 7.087122917175293,
"learning_rate": 2.655e-05,
"loss": 4.217,
"step": 470
},
{
"epoch": 0.475,
"grad_norm": 6.4038190841674805,
"learning_rate": 2.6300000000000002e-05,
"loss": 4.6776,
"step": 475
},
{
"epoch": 0.48,
"grad_norm": 5.816014289855957,
"learning_rate": 2.6050000000000003e-05,
"loss": 4.0214,
"step": 480
},
{
"epoch": 0.485,
"grad_norm": 12.878230094909668,
"learning_rate": 2.58e-05,
"loss": 4.407,
"step": 485
},
{
"epoch": 0.49,
"grad_norm": 8.707136154174805,
"learning_rate": 2.555e-05,
"loss": 4.3686,
"step": 490
},
{
"epoch": 0.495,
"grad_norm": 5.850894927978516,
"learning_rate": 2.5300000000000002e-05,
"loss": 4.034,
"step": 495
},
{
"epoch": 0.5,
"grad_norm": 4.997234344482422,
"learning_rate": 2.5050000000000002e-05,
"loss": 3.8851,
"step": 500
},
{
"epoch": 0.505,
"grad_norm": 8.565802574157715,
"learning_rate": 2.48e-05,
"loss": 4.6681,
"step": 505
},
{
"epoch": 0.51,
"grad_norm": 10.438283920288086,
"learning_rate": 2.455e-05,
"loss": 4.4391,
"step": 510
},
{
"epoch": 0.515,
"grad_norm": 12.387027740478516,
"learning_rate": 2.43e-05,
"loss": 4.6539,
"step": 515
},
{
"epoch": 0.52,
"grad_norm": 8.13821029663086,
"learning_rate": 2.4050000000000002e-05,
"loss": 5.1541,
"step": 520
},
{
"epoch": 0.525,
"grad_norm": 6.827164649963379,
"learning_rate": 2.38e-05,
"loss": 4.356,
"step": 525
},
{
"epoch": 0.53,
"grad_norm": 6.84028959274292,
"learning_rate": 2.355e-05,
"loss": 4.1476,
"step": 530
},
{
"epoch": 0.535,
"grad_norm": 10.003660202026367,
"learning_rate": 2.3300000000000004e-05,
"loss": 4.2211,
"step": 535
},
{
"epoch": 0.54,
"grad_norm": 8.519783973693848,
"learning_rate": 2.305e-05,
"loss": 3.9058,
"step": 540
},
{
"epoch": 0.545,
"grad_norm": 9.998250961303711,
"learning_rate": 2.2800000000000002e-05,
"loss": 3.9606,
"step": 545
},
{
"epoch": 0.55,
"grad_norm": 3.5864787101745605,
"learning_rate": 2.2550000000000003e-05,
"loss": 4.2792,
"step": 550
},
{
"epoch": 0.555,
"grad_norm": 2.15779709815979,
"learning_rate": 2.23e-05,
"loss": 4.2588,
"step": 555
},
{
"epoch": 0.56,
"grad_norm": 7.362779140472412,
"learning_rate": 2.205e-05,
"loss": 4.3834,
"step": 560
},
{
"epoch": 0.565,
"grad_norm": 3.7950263023376465,
"learning_rate": 2.18e-05,
"loss": 5.0136,
"step": 565
},
{
"epoch": 0.57,
"grad_norm": 6.706342697143555,
"learning_rate": 2.1550000000000002e-05,
"loss": 3.8674,
"step": 570
},
{
"epoch": 0.575,
"grad_norm": 4.262815475463867,
"learning_rate": 2.13e-05,
"loss": 3.2834,
"step": 575
},
{
"epoch": 0.58,
"grad_norm": 5.646935939788818,
"learning_rate": 2.105e-05,
"loss": 3.7601,
"step": 580
},
{
"epoch": 0.585,
"grad_norm": 4.463131427764893,
"learning_rate": 2.08e-05,
"loss": 3.9355,
"step": 585
},
{
"epoch": 0.59,
"grad_norm": 7.170533657073975,
"learning_rate": 2.055e-05,
"loss": 4.1956,
"step": 590
},
{
"epoch": 0.595,
"grad_norm": 4.189892768859863,
"learning_rate": 2.0300000000000002e-05,
"loss": 4.385,
"step": 595
},
{
"epoch": 0.6,
"grad_norm": 9.586647987365723,
"learning_rate": 2.0050000000000003e-05,
"loss": 4.3872,
"step": 600
},
{
"epoch": 0.605,
"grad_norm": 2.3141019344329834,
"learning_rate": 1.9800000000000004e-05,
"loss": 4.1622,
"step": 605
},
{
"epoch": 0.61,
"grad_norm": 11.550451278686523,
"learning_rate": 1.955e-05,
"loss": 4.8773,
"step": 610
},
{
"epoch": 0.615,
"grad_norm": 9.522382736206055,
"learning_rate": 1.93e-05,
"loss": 4.6614,
"step": 615
},
{
"epoch": 0.62,
"grad_norm": 10.683562278747559,
"learning_rate": 1.9050000000000002e-05,
"loss": 4.0905,
"step": 620
},
{
"epoch": 0.625,
"grad_norm": 2.9108145236968994,
"learning_rate": 1.88e-05,
"loss": 4.049,
"step": 625
},
{
"epoch": 0.63,
"grad_norm": 5.9698944091796875,
"learning_rate": 1.855e-05,
"loss": 3.7179,
"step": 630
},
{
"epoch": 0.635,
"grad_norm": 5.07297945022583,
"learning_rate": 1.83e-05,
"loss": 3.8728,
"step": 635
},
{
"epoch": 0.64,
"grad_norm": 7.098571300506592,
"learning_rate": 1.805e-05,
"loss": 3.7689,
"step": 640
},
{
"epoch": 0.645,
"grad_norm": 8.76673698425293,
"learning_rate": 1.78e-05,
"loss": 4.4143,
"step": 645
},
{
"epoch": 0.65,
"grad_norm": 2.236011266708374,
"learning_rate": 1.755e-05,
"loss": 3.4461,
"step": 650
},
{
"epoch": 0.655,
"grad_norm": 6.17922830581665,
"learning_rate": 1.73e-05,
"loss": 4.3143,
"step": 655
},
{
"epoch": 0.66,
"grad_norm": 5.667351722717285,
"learning_rate": 1.705e-05,
"loss": 4.19,
"step": 660
},
{
"epoch": 0.665,
"grad_norm": 13.386966705322266,
"learning_rate": 1.6800000000000002e-05,
"loss": 4.5092,
"step": 665
},
{
"epoch": 0.67,
"grad_norm": 7.265449523925781,
"learning_rate": 1.6550000000000002e-05,
"loss": 3.9085,
"step": 670
},
{
"epoch": 0.675,
"grad_norm": 9.232586860656738,
"learning_rate": 1.63e-05,
"loss": 5.0393,
"step": 675
},
{
"epoch": 0.68,
"grad_norm": 8.057585716247559,
"learning_rate": 1.605e-05,
"loss": 3.9848,
"step": 680
},
{
"epoch": 0.685,
"grad_norm": 2.304192543029785,
"learning_rate": 1.58e-05,
"loss": 4.3656,
"step": 685
},
{
"epoch": 0.69,
"grad_norm": 10.112837791442871,
"learning_rate": 1.5550000000000002e-05,
"loss": 4.4643,
"step": 690
},
{
"epoch": 0.695,
"grad_norm": 6.0983757972717285,
"learning_rate": 1.53e-05,
"loss": 4.1222,
"step": 695
},
{
"epoch": 0.7,
"grad_norm": 5.149627685546875,
"learning_rate": 1.505e-05,
"loss": 4.0705,
"step": 700
},
{
"epoch": 0.705,
"grad_norm": 8.752741813659668,
"learning_rate": 1.48e-05,
"loss": 4.4209,
"step": 705
},
{
"epoch": 0.71,
"grad_norm": 4.792961120605469,
"learning_rate": 1.455e-05,
"loss": 4.1384,
"step": 710
},
{
"epoch": 0.715,
"grad_norm": 7.746492385864258,
"learning_rate": 1.43e-05,
"loss": 4.1497,
"step": 715
},
{
"epoch": 0.72,
"grad_norm": 6.519612789154053,
"learning_rate": 1.4050000000000003e-05,
"loss": 4.3319,
"step": 720
},
{
"epoch": 0.725,
"grad_norm": 8.192032814025879,
"learning_rate": 1.3800000000000002e-05,
"loss": 3.6076,
"step": 725
},
{
"epoch": 0.73,
"grad_norm": 9.460343360900879,
"learning_rate": 1.3550000000000002e-05,
"loss": 3.983,
"step": 730
},
{
"epoch": 0.735,
"grad_norm": 4.7448225021362305,
"learning_rate": 1.3300000000000001e-05,
"loss": 4.4842,
"step": 735
},
{
"epoch": 0.74,
"grad_norm": 2.821718454360962,
"learning_rate": 1.305e-05,
"loss": 4.7517,
"step": 740
},
{
"epoch": 0.745,
"grad_norm": 7.751161575317383,
"learning_rate": 1.2800000000000001e-05,
"loss": 3.5541,
"step": 745
},
{
"epoch": 0.75,
"grad_norm": 7.190122127532959,
"learning_rate": 1.255e-05,
"loss": 4.5604,
"step": 750
},
{
"epoch": 0.755,
"grad_norm": 6.862963676452637,
"learning_rate": 1.23e-05,
"loss": 4.2742,
"step": 755
},
{
"epoch": 0.76,
"grad_norm": 7.691545486450195,
"learning_rate": 1.205e-05,
"loss": 3.5938,
"step": 760
},
{
"epoch": 0.765,
"grad_norm": 11.704629898071289,
"learning_rate": 1.18e-05,
"loss": 4.4012,
"step": 765
},
{
"epoch": 0.77,
"grad_norm": 9.413323402404785,
"learning_rate": 1.1550000000000001e-05,
"loss": 3.9794,
"step": 770
},
{
"epoch": 0.775,
"grad_norm": 22.67475128173828,
"learning_rate": 1.13e-05,
"loss": 4.078,
"step": 775
},
{
"epoch": 0.78,
"grad_norm": 2.4493584632873535,
"learning_rate": 1.1050000000000001e-05,
"loss": 3.9336,
"step": 780
},
{
"epoch": 0.785,
"grad_norm": 8.993667602539062,
"learning_rate": 1.08e-05,
"loss": 4.5752,
"step": 785
},
{
"epoch": 0.79,
"grad_norm": 4.641674995422363,
"learning_rate": 1.055e-05,
"loss": 4.6423,
"step": 790
},
{
"epoch": 0.795,
"grad_norm": 2.4793174266815186,
"learning_rate": 1.03e-05,
"loss": 4.2724,
"step": 795
},
{
"epoch": 0.8,
"grad_norm": 5.038002014160156,
"learning_rate": 1.005e-05,
"loss": 4.56,
"step": 800
},
{
"epoch": 0.805,
"grad_norm": 4.353021144866943,
"learning_rate": 9.800000000000001e-06,
"loss": 3.8578,
"step": 805
},
{
"epoch": 0.81,
"grad_norm": 8.213798522949219,
"learning_rate": 9.55e-06,
"loss": 4.1989,
"step": 810
},
{
"epoch": 0.815,
"grad_norm": 5.633493900299072,
"learning_rate": 9.3e-06,
"loss": 4.2873,
"step": 815
},
{
"epoch": 0.82,
"grad_norm": 7.785790920257568,
"learning_rate": 9.05e-06,
"loss": 4.2596,
"step": 820
},
{
"epoch": 0.825,
"grad_norm": 6.220232009887695,
"learning_rate": 8.8e-06,
"loss": 4.5386,
"step": 825
},
{
"epoch": 0.83,
"grad_norm": 4.381409168243408,
"learning_rate": 8.550000000000001e-06,
"loss": 4.0956,
"step": 830
},
{
"epoch": 0.835,
"grad_norm": 7.15064811706543,
"learning_rate": 8.3e-06,
"loss": 4.1738,
"step": 835
},
{
"epoch": 0.84,
"grad_norm": 10.692646980285645,
"learning_rate": 8.050000000000001e-06,
"loss": 4.319,
"step": 840
},
{
"epoch": 0.845,
"grad_norm": 6.923873424530029,
"learning_rate": 7.8e-06,
"loss": 4.1981,
"step": 845
},
{
"epoch": 0.85,
"grad_norm": 11.210564613342285,
"learning_rate": 7.55e-06,
"loss": 3.8061,
"step": 850
},
{
"epoch": 0.855,
"grad_norm": 4.295876502990723,
"learning_rate": 7.2999999999999996e-06,
"loss": 5.1272,
"step": 855
},
{
"epoch": 0.86,
"grad_norm": 5.475194454193115,
"learning_rate": 7.049999999999999e-06,
"loss": 4.3979,
"step": 860
},
{
"epoch": 0.865,
"grad_norm": 8.845963478088379,
"learning_rate": 6.800000000000001e-06,
"loss": 4.5577,
"step": 865
},
{
"epoch": 0.87,
"grad_norm": 4.435886859893799,
"learning_rate": 6.550000000000001e-06,
"loss": 4.2463,
"step": 870
},
{
"epoch": 0.875,
"grad_norm": 4.606530666351318,
"learning_rate": 6.300000000000001e-06,
"loss": 4.3915,
"step": 875
}
],
"logging_steps": 5,
"max_steps": 1000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 297535537152000.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}