narySt's picture
Добавлены веса модели
dbcb78f verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.325,
"eval_steps": 500,
"global_step": 325,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005,
"grad_norm": 4.4095964431762695,
"learning_rate": 4.9800000000000004e-05,
"loss": 5.4329,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 2.658348798751831,
"learning_rate": 4.9550000000000005e-05,
"loss": 4.6734,
"step": 10
},
{
"epoch": 0.015,
"grad_norm": 3.4242470264434814,
"learning_rate": 4.93e-05,
"loss": 4.8327,
"step": 15
},
{
"epoch": 0.02,
"grad_norm": 4.363213539123535,
"learning_rate": 4.905e-05,
"loss": 4.4238,
"step": 20
},
{
"epoch": 0.025,
"grad_norm": 2.531619071960449,
"learning_rate": 4.88e-05,
"loss": 3.9284,
"step": 25
},
{
"epoch": 0.03,
"grad_norm": 4.686695098876953,
"learning_rate": 4.855e-05,
"loss": 4.6794,
"step": 30
},
{
"epoch": 0.035,
"grad_norm": 4.860099792480469,
"learning_rate": 4.83e-05,
"loss": 5.0624,
"step": 35
},
{
"epoch": 0.04,
"grad_norm": 8.296855926513672,
"learning_rate": 4.805e-05,
"loss": 4.8011,
"step": 40
},
{
"epoch": 0.045,
"grad_norm": 4.80878210067749,
"learning_rate": 4.78e-05,
"loss": 5.495,
"step": 45
},
{
"epoch": 0.05,
"grad_norm": 4.766687393188477,
"learning_rate": 4.755e-05,
"loss": 4.6778,
"step": 50
},
{
"epoch": 0.055,
"grad_norm": 2.7137186527252197,
"learning_rate": 4.73e-05,
"loss": 4.5044,
"step": 55
},
{
"epoch": 0.06,
"grad_norm": 9.891243934631348,
"learning_rate": 4.705e-05,
"loss": 4.4744,
"step": 60
},
{
"epoch": 0.065,
"grad_norm": 3.6516237258911133,
"learning_rate": 4.6800000000000006e-05,
"loss": 4.6576,
"step": 65
},
{
"epoch": 0.07,
"grad_norm": 5.687813758850098,
"learning_rate": 4.655000000000001e-05,
"loss": 5.1204,
"step": 70
},
{
"epoch": 0.075,
"grad_norm": 4.273693561553955,
"learning_rate": 4.630000000000001e-05,
"loss": 5.361,
"step": 75
},
{
"epoch": 0.08,
"grad_norm": 6.802962779998779,
"learning_rate": 4.605e-05,
"loss": 5.5483,
"step": 80
},
{
"epoch": 0.085,
"grad_norm": 2.7016360759735107,
"learning_rate": 4.58e-05,
"loss": 4.5909,
"step": 85
},
{
"epoch": 0.09,
"grad_norm": 8.201666831970215,
"learning_rate": 4.555e-05,
"loss": 4.2807,
"step": 90
},
{
"epoch": 0.095,
"grad_norm": 3.8271970748901367,
"learning_rate": 4.53e-05,
"loss": 5.5645,
"step": 95
},
{
"epoch": 0.1,
"grad_norm": 3.17282772064209,
"learning_rate": 4.5050000000000004e-05,
"loss": 4.5845,
"step": 100
},
{
"epoch": 0.105,
"grad_norm": 2.9256932735443115,
"learning_rate": 4.4800000000000005e-05,
"loss": 4.6003,
"step": 105
},
{
"epoch": 0.11,
"grad_norm": 10.4873046875,
"learning_rate": 4.4550000000000005e-05,
"loss": 4.6453,
"step": 110
},
{
"epoch": 0.115,
"grad_norm": 2.9158883094787598,
"learning_rate": 4.43e-05,
"loss": 4.8935,
"step": 115
},
{
"epoch": 0.12,
"grad_norm": 3.5588793754577637,
"learning_rate": 4.405e-05,
"loss": 3.9582,
"step": 120
},
{
"epoch": 0.125,
"grad_norm": 2.7040491104125977,
"learning_rate": 4.38e-05,
"loss": 4.6688,
"step": 125
},
{
"epoch": 0.13,
"grad_norm": 3.1420748233795166,
"learning_rate": 4.355e-05,
"loss": 4.5591,
"step": 130
},
{
"epoch": 0.135,
"grad_norm": 2.023017168045044,
"learning_rate": 4.33e-05,
"loss": 4.5944,
"step": 135
},
{
"epoch": 0.14,
"grad_norm": 2.902434825897217,
"learning_rate": 4.305e-05,
"loss": 5.4079,
"step": 140
},
{
"epoch": 0.145,
"grad_norm": 2.4470090866088867,
"learning_rate": 4.2800000000000004e-05,
"loss": 4.9259,
"step": 145
},
{
"epoch": 0.15,
"grad_norm": 4.8970046043396,
"learning_rate": 4.2550000000000004e-05,
"loss": 4.6465,
"step": 150
},
{
"epoch": 0.155,
"grad_norm": 2.197767734527588,
"learning_rate": 4.23e-05,
"loss": 4.6841,
"step": 155
},
{
"epoch": 0.16,
"grad_norm": 4.577955722808838,
"learning_rate": 4.205e-05,
"loss": 4.1775,
"step": 160
},
{
"epoch": 0.165,
"grad_norm": 2.3383991718292236,
"learning_rate": 4.18e-05,
"loss": 4.0309,
"step": 165
},
{
"epoch": 0.17,
"grad_norm": 3.794187307357788,
"learning_rate": 4.155e-05,
"loss": 4.5336,
"step": 170
},
{
"epoch": 0.175,
"grad_norm": 4.427373886108398,
"learning_rate": 4.13e-05,
"loss": 4.241,
"step": 175
},
{
"epoch": 0.18,
"grad_norm": 2.8116564750671387,
"learning_rate": 4.105e-05,
"loss": 4.7562,
"step": 180
},
{
"epoch": 0.185,
"grad_norm": 5.841445446014404,
"learning_rate": 4.08e-05,
"loss": 3.7034,
"step": 185
},
{
"epoch": 0.19,
"grad_norm": 20.96999740600586,
"learning_rate": 4.055e-05,
"loss": 5.1727,
"step": 190
},
{
"epoch": 0.195,
"grad_norm": 11.743700981140137,
"learning_rate": 4.0300000000000004e-05,
"loss": 5.1553,
"step": 195
},
{
"epoch": 0.2,
"grad_norm": 10.230498313903809,
"learning_rate": 4.0050000000000004e-05,
"loss": 4.8657,
"step": 200
},
{
"epoch": 0.205,
"grad_norm": 5.706521987915039,
"learning_rate": 3.9800000000000005e-05,
"loss": 4.3638,
"step": 205
},
{
"epoch": 0.21,
"grad_norm": 5.781250476837158,
"learning_rate": 3.9550000000000006e-05,
"loss": 4.3452,
"step": 210
},
{
"epoch": 0.215,
"grad_norm": 2.371096611022949,
"learning_rate": 3.9300000000000007e-05,
"loss": 4.6681,
"step": 215
},
{
"epoch": 0.22,
"grad_norm": 4.352181911468506,
"learning_rate": 3.905e-05,
"loss": 4.2025,
"step": 220
},
{
"epoch": 0.225,
"grad_norm": 4.531359672546387,
"learning_rate": 3.88e-05,
"loss": 4.5391,
"step": 225
},
{
"epoch": 0.23,
"grad_norm": 6.948793411254883,
"learning_rate": 3.855e-05,
"loss": 4.0233,
"step": 230
},
{
"epoch": 0.235,
"grad_norm": 4.765657424926758,
"learning_rate": 3.83e-05,
"loss": 4.1225,
"step": 235
},
{
"epoch": 0.24,
"grad_norm": 3.838766574859619,
"learning_rate": 3.805e-05,
"loss": 4.9202,
"step": 240
},
{
"epoch": 0.245,
"grad_norm": 6.471155643463135,
"learning_rate": 3.7800000000000004e-05,
"loss": 4.7561,
"step": 245
},
{
"epoch": 0.25,
"grad_norm": 5.748368740081787,
"learning_rate": 3.7550000000000005e-05,
"loss": 3.7783,
"step": 250
},
{
"epoch": 0.255,
"grad_norm": 2.663558006286621,
"learning_rate": 3.73e-05,
"loss": 4.1553,
"step": 255
},
{
"epoch": 0.26,
"grad_norm": 3.4103472232818604,
"learning_rate": 3.705e-05,
"loss": 4.9314,
"step": 260
},
{
"epoch": 0.265,
"grad_norm": 6.4364824295043945,
"learning_rate": 3.68e-05,
"loss": 4.4708,
"step": 265
},
{
"epoch": 0.27,
"grad_norm": 4.145668983459473,
"learning_rate": 3.655e-05,
"loss": 4.5879,
"step": 270
},
{
"epoch": 0.275,
"grad_norm": 3.4401297569274902,
"learning_rate": 3.63e-05,
"loss": 4.7668,
"step": 275
},
{
"epoch": 0.28,
"grad_norm": 9.18470287322998,
"learning_rate": 3.605e-05,
"loss": 4.619,
"step": 280
},
{
"epoch": 0.285,
"grad_norm": 4.180783748626709,
"learning_rate": 3.58e-05,
"loss": 3.8878,
"step": 285
},
{
"epoch": 0.29,
"grad_norm": 7.336719036102295,
"learning_rate": 3.555e-05,
"loss": 4.5864,
"step": 290
},
{
"epoch": 0.295,
"grad_norm": 4.498530864715576,
"learning_rate": 3.53e-05,
"loss": 3.8987,
"step": 295
},
{
"epoch": 0.3,
"grad_norm": 3.3154962062835693,
"learning_rate": 3.505e-05,
"loss": 4.5049,
"step": 300
},
{
"epoch": 0.305,
"grad_norm": 5.832047462463379,
"learning_rate": 3.48e-05,
"loss": 5.0849,
"step": 305
},
{
"epoch": 0.31,
"grad_norm": 17.935897827148438,
"learning_rate": 3.455e-05,
"loss": 4.8395,
"step": 310
},
{
"epoch": 0.315,
"grad_norm": 8.821608543395996,
"learning_rate": 3.430000000000001e-05,
"loss": 4.9111,
"step": 315
},
{
"epoch": 0.32,
"grad_norm": 4.014020919799805,
"learning_rate": 3.405e-05,
"loss": 3.6753,
"step": 320
},
{
"epoch": 0.325,
"grad_norm": 7.319735050201416,
"learning_rate": 3.38e-05,
"loss": 4.6448,
"step": 325
}
],
"logging_steps": 5,
"max_steps": 1000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 110513199513600.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}