deepseek-math-7b-sft / trainer_state.json
klyang's picture
Upload folder using huggingface_hub
33a49b9 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9991503823279524,
"eval_steps": 29,
"global_step": 147,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006796941376380629,
"grad_norm": 4.598693370819092,
"learning_rate": 1.111111111111111e-06,
"loss": 0.5628,
"step": 1
},
{
"epoch": 0.013593882752761258,
"grad_norm": 3.3473575115203857,
"learning_rate": 2.222222222222222e-06,
"loss": 0.5195,
"step": 2
},
{
"epoch": 0.020390824129141887,
"grad_norm": 4.778041362762451,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.5557,
"step": 3
},
{
"epoch": 0.027187765505522515,
"grad_norm": 4.810835838317871,
"learning_rate": 4.444444444444444e-06,
"loss": 0.5973,
"step": 4
},
{
"epoch": 0.033984706881903144,
"grad_norm": 4.811781406402588,
"learning_rate": 5.555555555555557e-06,
"loss": 0.5647,
"step": 5
},
{
"epoch": 0.04078164825828377,
"grad_norm": 2.4056437015533447,
"learning_rate": 6.666666666666667e-06,
"loss": 0.5343,
"step": 6
},
{
"epoch": 0.0475785896346644,
"grad_norm": 1.500022530555725,
"learning_rate": 7.77777777777778e-06,
"loss": 0.4847,
"step": 7
},
{
"epoch": 0.05437553101104503,
"grad_norm": 1.5126186609268188,
"learning_rate": 8.888888888888888e-06,
"loss": 0.5285,
"step": 8
},
{
"epoch": 0.06117247238742566,
"grad_norm": 1.3590396642684937,
"learning_rate": 1e-05,
"loss": 0.4215,
"step": 9
},
{
"epoch": 0.06796941376380629,
"grad_norm": 1.0946522951126099,
"learning_rate": 1.1111111111111113e-05,
"loss": 0.4077,
"step": 10
},
{
"epoch": 0.07476635514018691,
"grad_norm": 1.035597801208496,
"learning_rate": 1.2222222222222224e-05,
"loss": 0.3961,
"step": 11
},
{
"epoch": 0.08156329651656755,
"grad_norm": 0.9033994078636169,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.3715,
"step": 12
},
{
"epoch": 0.08836023789294817,
"grad_norm": 1.1996865272521973,
"learning_rate": 1.4444444444444446e-05,
"loss": 0.3576,
"step": 13
},
{
"epoch": 0.0951571792693288,
"grad_norm": 0.9262693524360657,
"learning_rate": 1.555555555555556e-05,
"loss": 0.3483,
"step": 14
},
{
"epoch": 0.10195412064570943,
"grad_norm": 0.8593380451202393,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.3552,
"step": 15
},
{
"epoch": 0.10875106202209006,
"grad_norm": 0.9698188900947571,
"learning_rate": 1.7777777777777777e-05,
"loss": 0.329,
"step": 16
},
{
"epoch": 0.11554800339847068,
"grad_norm": 1.0091280937194824,
"learning_rate": 1.888888888888889e-05,
"loss": 0.3793,
"step": 17
},
{
"epoch": 0.12234494477485132,
"grad_norm": 0.8758748173713684,
"learning_rate": 2e-05,
"loss": 0.3134,
"step": 18
},
{
"epoch": 0.12914188615123195,
"grad_norm": 0.7287446856498718,
"learning_rate": 1.9999724204599748e-05,
"loss": 0.3261,
"step": 19
},
{
"epoch": 0.13593882752761258,
"grad_norm": 0.9394727349281311,
"learning_rate": 1.9998896833611603e-05,
"loss": 0.3553,
"step": 20
},
{
"epoch": 0.1427357689039932,
"grad_norm": 0.8428260684013367,
"learning_rate": 1.9997517932672592e-05,
"loss": 0.3411,
"step": 21
},
{
"epoch": 0.14953271028037382,
"grad_norm": 0.8483632802963257,
"learning_rate": 1.999558757784162e-05,
"loss": 0.351,
"step": 22
},
{
"epoch": 0.15632965165675447,
"grad_norm": 0.7444515228271484,
"learning_rate": 1.999310587559529e-05,
"loss": 0.3487,
"step": 23
},
{
"epoch": 0.1631265930331351,
"grad_norm": 0.8404412865638733,
"learning_rate": 1.999007296282201e-05,
"loss": 0.3439,
"step": 24
},
{
"epoch": 0.16992353440951571,
"grad_norm": 0.7445095181465149,
"learning_rate": 1.9986489006814454e-05,
"loss": 0.2972,
"step": 25
},
{
"epoch": 0.17672047578589634,
"grad_norm": 0.7440016269683838,
"learning_rate": 1.9982354205260347e-05,
"loss": 0.3391,
"step": 26
},
{
"epoch": 0.18351741716227699,
"grad_norm": 0.6863638162612915,
"learning_rate": 1.9977668786231536e-05,
"loss": 0.2993,
"step": 27
},
{
"epoch": 0.1903143585386576,
"grad_norm": 0.7364312410354614,
"learning_rate": 1.9972433008171417e-05,
"loss": 0.3169,
"step": 28
},
{
"epoch": 0.19711129991503823,
"grad_norm": 0.7718295454978943,
"learning_rate": 1.9966647159880703e-05,
"loss": 0.3291,
"step": 29
},
{
"epoch": 0.19711129991503823,
"eval_loss": 0.3171866536140442,
"eval_runtime": 25.1652,
"eval_samples_per_second": 19.869,
"eval_steps_per_second": 0.636,
"step": 29
},
{
"epoch": 0.20390824129141885,
"grad_norm": 0.7047903537750244,
"learning_rate": 1.9960311560501457e-05,
"loss": 0.3326,
"step": 30
},
{
"epoch": 0.2107051826677995,
"grad_norm": 0.7129018902778625,
"learning_rate": 1.9953426559499508e-05,
"loss": 0.3404,
"step": 31
},
{
"epoch": 0.21750212404418012,
"grad_norm": 0.740166187286377,
"learning_rate": 1.9945992536645188e-05,
"loss": 0.3283,
"step": 32
},
{
"epoch": 0.22429906542056074,
"grad_norm": 0.6873639225959778,
"learning_rate": 1.993800990199235e-05,
"loss": 0.3064,
"step": 33
},
{
"epoch": 0.23109600679694137,
"grad_norm": 0.7272984981536865,
"learning_rate": 1.9929479095855783e-05,
"loss": 0.3305,
"step": 34
},
{
"epoch": 0.23789294817332202,
"grad_norm": 0.7347909808158875,
"learning_rate": 1.99204005887869e-05,
"loss": 0.323,
"step": 35
},
{
"epoch": 0.24468988954970264,
"grad_norm": 0.7388918399810791,
"learning_rate": 1.9910774881547803e-05,
"loss": 0.3013,
"step": 36
},
{
"epoch": 0.25148683092608326,
"grad_norm": 0.702777087688446,
"learning_rate": 1.990060250508365e-05,
"loss": 0.3274,
"step": 37
},
{
"epoch": 0.2582837723024639,
"grad_norm": 0.7246643900871277,
"learning_rate": 1.9889884020493363e-05,
"loss": 0.3362,
"step": 38
},
{
"epoch": 0.2650807136788445,
"grad_norm": 0.6060255765914917,
"learning_rate": 1.9878620018998696e-05,
"loss": 0.287,
"step": 39
},
{
"epoch": 0.27187765505522515,
"grad_norm": 0.6417918801307678,
"learning_rate": 1.986681112191161e-05,
"loss": 0.2818,
"step": 40
},
{
"epoch": 0.2786745964316058,
"grad_norm": 0.7274541258811951,
"learning_rate": 1.98544579806e-05,
"loss": 0.3222,
"step": 41
},
{
"epoch": 0.2854715378079864,
"grad_norm": 0.7122014164924622,
"learning_rate": 1.984156127645178e-05,
"loss": 0.3392,
"step": 42
},
{
"epoch": 0.29226847918436705,
"grad_norm": 0.6548435688018799,
"learning_rate": 1.9828121720837288e-05,
"loss": 0.3056,
"step": 43
},
{
"epoch": 0.29906542056074764,
"grad_norm": 0.6896780133247375,
"learning_rate": 1.9814140055070044e-05,
"loss": 0.3107,
"step": 44
},
{
"epoch": 0.3058623619371283,
"grad_norm": 0.7305508255958557,
"learning_rate": 1.979961705036587e-05,
"loss": 0.3077,
"step": 45
},
{
"epoch": 0.31265930331350894,
"grad_norm": 0.6713555455207825,
"learning_rate": 1.9784553507800346e-05,
"loss": 0.2747,
"step": 46
},
{
"epoch": 0.31945624468988953,
"grad_norm": 0.7295777797698975,
"learning_rate": 1.9768950258264625e-05,
"loss": 0.3126,
"step": 47
},
{
"epoch": 0.3262531860662702,
"grad_norm": 0.6292139291763306,
"learning_rate": 1.975280816241959e-05,
"loss": 0.2966,
"step": 48
},
{
"epoch": 0.33305012744265083,
"grad_norm": 0.6628125309944153,
"learning_rate": 1.9736128110648407e-05,
"loss": 0.3266,
"step": 49
},
{
"epoch": 0.33984706881903143,
"grad_norm": 0.6593727469444275,
"learning_rate": 1.9718911023007382e-05,
"loss": 0.3055,
"step": 50
},
{
"epoch": 0.3466440101954121,
"grad_norm": 0.737305760383606,
"learning_rate": 1.970115784917523e-05,
"loss": 0.3038,
"step": 51
},
{
"epoch": 0.35344095157179267,
"grad_norm": 0.6167359948158264,
"learning_rate": 1.9682869568400683e-05,
"loss": 0.3112,
"step": 52
},
{
"epoch": 0.3602378929481733,
"grad_norm": 0.6573403477668762,
"learning_rate": 1.9664047189448496e-05,
"loss": 0.2971,
"step": 53
},
{
"epoch": 0.36703483432455397,
"grad_norm": 0.7991875410079956,
"learning_rate": 1.964469175054377e-05,
"loss": 0.3504,
"step": 54
},
{
"epoch": 0.37383177570093457,
"grad_norm": 0.6709067225456238,
"learning_rate": 1.9624804319314704e-05,
"loss": 0.3307,
"step": 55
},
{
"epoch": 0.3806287170773152,
"grad_norm": 0.5573310852050781,
"learning_rate": 1.9604385992733718e-05,
"loss": 0.2759,
"step": 56
},
{
"epoch": 0.3874256584536958,
"grad_norm": 0.6823217868804932,
"learning_rate": 1.9583437897056915e-05,
"loss": 0.2965,
"step": 57
},
{
"epoch": 0.39422259983007646,
"grad_norm": 0.6070125699043274,
"learning_rate": 1.9561961187761987e-05,
"loss": 0.2893,
"step": 58
},
{
"epoch": 0.39422259983007646,
"eval_loss": 0.29359060525894165,
"eval_runtime": 24.6748,
"eval_samples_per_second": 20.264,
"eval_steps_per_second": 0.648,
"step": 58
},
{
"epoch": 0.4010195412064571,
"grad_norm": 0.6562219858169556,
"learning_rate": 1.953995704948446e-05,
"loss": 0.2821,
"step": 59
},
{
"epoch": 0.4078164825828377,
"grad_norm": 0.6055108904838562,
"learning_rate": 1.9517426695952358e-05,
"loss": 0.297,
"step": 60
},
{
"epoch": 0.41461342395921835,
"grad_norm": 0.7652341723442078,
"learning_rate": 1.9494371369919253e-05,
"loss": 0.3092,
"step": 61
},
{
"epoch": 0.421410365335599,
"grad_norm": 0.612332284450531,
"learning_rate": 1.9470792343095718e-05,
"loss": 0.291,
"step": 62
},
{
"epoch": 0.4282073067119796,
"grad_norm": 0.5866986513137817,
"learning_rate": 1.944669091607919e-05,
"loss": 0.2586,
"step": 63
},
{
"epoch": 0.43500424808836025,
"grad_norm": 0.847610592842102,
"learning_rate": 1.9422068418282204e-05,
"loss": 0.3567,
"step": 64
},
{
"epoch": 0.44180118946474084,
"grad_norm": 0.6735373139381409,
"learning_rate": 1.9396926207859085e-05,
"loss": 0.3018,
"step": 65
},
{
"epoch": 0.4485981308411215,
"grad_norm": 0.7430619597434998,
"learning_rate": 1.9371265671631038e-05,
"loss": 0.3245,
"step": 66
},
{
"epoch": 0.45539507221750214,
"grad_norm": 0.7295202016830444,
"learning_rate": 1.9345088225009626e-05,
"loss": 0.3464,
"step": 67
},
{
"epoch": 0.46219201359388273,
"grad_norm": 0.6956584453582764,
"learning_rate": 1.931839531191873e-05,
"loss": 0.3116,
"step": 68
},
{
"epoch": 0.4689889549702634,
"grad_norm": 0.7618936896324158,
"learning_rate": 1.9291188404714876e-05,
"loss": 0.2802,
"step": 69
},
{
"epoch": 0.47578589634664403,
"grad_norm": 0.6563156843185425,
"learning_rate": 1.926346900410604e-05,
"loss": 0.3162,
"step": 70
},
{
"epoch": 0.4825828377230246,
"grad_norm": 0.6584069728851318,
"learning_rate": 1.9235238639068855e-05,
"loss": 0.3147,
"step": 71
},
{
"epoch": 0.4893797790994053,
"grad_norm": 0.7286915183067322,
"learning_rate": 1.920649886676429e-05,
"loss": 0.3232,
"step": 72
},
{
"epoch": 0.49617672047578587,
"grad_norm": 0.6320315599441528,
"learning_rate": 1.9177251272451742e-05,
"loss": 0.2946,
"step": 73
},
{
"epoch": 0.5029736618521665,
"grad_norm": 0.7056065797805786,
"learning_rate": 1.914749746940161e-05,
"loss": 0.3148,
"step": 74
},
{
"epoch": 0.5097706032285472,
"grad_norm": 0.6424931287765503,
"learning_rate": 1.9117239098806296e-05,
"loss": 0.2757,
"step": 75
},
{
"epoch": 0.5165675446049278,
"grad_norm": 0.6416671872138977,
"learning_rate": 1.9086477829689688e-05,
"loss": 0.2962,
"step": 76
},
{
"epoch": 0.5233644859813084,
"grad_norm": 0.7531472444534302,
"learning_rate": 1.905521535881509e-05,
"loss": 0.3129,
"step": 77
},
{
"epoch": 0.530161427357689,
"grad_norm": 0.631662130355835,
"learning_rate": 1.902345341059164e-05,
"loss": 0.3076,
"step": 78
},
{
"epoch": 0.5369583687340697,
"grad_norm": 0.6587047576904297,
"learning_rate": 1.8991193736979176e-05,
"loss": 0.3031,
"step": 79
},
{
"epoch": 0.5437553101104503,
"grad_norm": 0.6439694762229919,
"learning_rate": 1.895843811739162e-05,
"loss": 0.2765,
"step": 80
},
{
"epoch": 0.550552251486831,
"grad_norm": 0.584459662437439,
"learning_rate": 1.8925188358598815e-05,
"loss": 0.2602,
"step": 81
},
{
"epoch": 0.5573491928632116,
"grad_norm": 0.6689626574516296,
"learning_rate": 1.8891446294626868e-05,
"loss": 0.2842,
"step": 82
},
{
"epoch": 0.5641461342395921,
"grad_norm": 0.664592981338501,
"learning_rate": 1.8857213786656986e-05,
"loss": 0.2827,
"step": 83
},
{
"epoch": 0.5709430756159728,
"grad_norm": 0.5949574708938599,
"learning_rate": 1.882249272292282e-05,
"loss": 0.2781,
"step": 84
},
{
"epoch": 0.5777400169923534,
"grad_norm": 0.6539214253425598,
"learning_rate": 1.87872850186063e-05,
"loss": 0.2772,
"step": 85
},
{
"epoch": 0.5845369583687341,
"grad_norm": 0.6778468489646912,
"learning_rate": 1.8751592615732007e-05,
"loss": 0.2938,
"step": 86
},
{
"epoch": 0.5913338997451147,
"grad_norm": 0.7007662057876587,
"learning_rate": 1.871541748306005e-05,
"loss": 0.2962,
"step": 87
},
{
"epoch": 0.5913338997451147,
"eval_loss": 0.288781076669693,
"eval_runtime": 24.6643,
"eval_samples_per_second": 20.272,
"eval_steps_per_second": 0.649,
"step": 87
},
{
"epoch": 0.5981308411214953,
"grad_norm": 0.6959559321403503,
"learning_rate": 1.867876161597747e-05,
"loss": 0.2944,
"step": 88
},
{
"epoch": 0.6049277824978759,
"grad_norm": 0.6510220766067505,
"learning_rate": 1.8641627036388168e-05,
"loss": 0.2974,
"step": 89
},
{
"epoch": 0.6117247238742566,
"grad_norm": 0.6992867588996887,
"learning_rate": 1.8604015792601395e-05,
"loss": 0.2915,
"step": 90
},
{
"epoch": 0.6185216652506372,
"grad_norm": 0.6997932195663452,
"learning_rate": 1.856592995921876e-05,
"loss": 0.2795,
"step": 91
},
{
"epoch": 0.6253186066270179,
"grad_norm": 0.747304379940033,
"learning_rate": 1.852737163701979e-05,
"loss": 0.2991,
"step": 92
},
{
"epoch": 0.6321155480033984,
"grad_norm": 0.6337850093841553,
"learning_rate": 1.8488342952846074e-05,
"loss": 0.2816,
"step": 93
},
{
"epoch": 0.6389124893797791,
"grad_norm": 0.6768142580986023,
"learning_rate": 1.844884605948392e-05,
"loss": 0.2882,
"step": 94
},
{
"epoch": 0.6457094307561597,
"grad_norm": 0.6271469593048096,
"learning_rate": 1.8408883135545634e-05,
"loss": 0.2767,
"step": 95
},
{
"epoch": 0.6525063721325404,
"grad_norm": 0.751917839050293,
"learning_rate": 1.8368456385349333e-05,
"loss": 0.3126,
"step": 96
},
{
"epoch": 0.659303313508921,
"grad_norm": 0.6843672394752502,
"learning_rate": 1.832756803879737e-05,
"loss": 0.3092,
"step": 97
},
{
"epoch": 0.6661002548853017,
"grad_norm": 0.6191291213035583,
"learning_rate": 1.8286220351253324e-05,
"loss": 0.27,
"step": 98
},
{
"epoch": 0.6728971962616822,
"grad_norm": 0.7665583491325378,
"learning_rate": 1.8244415603417603e-05,
"loss": 0.3247,
"step": 99
},
{
"epoch": 0.6796941376380629,
"grad_norm": 0.5895417332649231,
"learning_rate": 1.8202156101201646e-05,
"loss": 0.2967,
"step": 100
},
{
"epoch": 0.6864910790144435,
"grad_norm": 0.697557270526886,
"learning_rate": 1.8159444175600706e-05,
"loss": 0.3319,
"step": 101
},
{
"epoch": 0.6932880203908242,
"grad_norm": 0.6296725273132324,
"learning_rate": 1.8116282182565313e-05,
"loss": 0.3177,
"step": 102
},
{
"epoch": 0.7000849617672048,
"grad_norm": 0.5775023102760315,
"learning_rate": 1.8072672502871295e-05,
"loss": 0.2712,
"step": 103
},
{
"epoch": 0.7068819031435853,
"grad_norm": 0.5521108508110046,
"learning_rate": 1.802861754198847e-05,
"loss": 0.2724,
"step": 104
},
{
"epoch": 0.713678844519966,
"grad_norm": 0.7251706719398499,
"learning_rate": 1.7984119729947944e-05,
"loss": 0.3194,
"step": 105
},
{
"epoch": 0.7204757858963466,
"grad_norm": 0.6509170532226562,
"learning_rate": 1.79391815212081e-05,
"loss": 0.2859,
"step": 106
},
{
"epoch": 0.7272727272727273,
"grad_norm": 0.7021235227584839,
"learning_rate": 1.7893805394519187e-05,
"loss": 0.3059,
"step": 107
},
{
"epoch": 0.7340696686491079,
"grad_norm": 0.6381179690361023,
"learning_rate": 1.7847993852786612e-05,
"loss": 0.2556,
"step": 108
},
{
"epoch": 0.7408666100254885,
"grad_norm": 0.6494466662406921,
"learning_rate": 1.780174942293287e-05,
"loss": 0.2933,
"step": 109
},
{
"epoch": 0.7476635514018691,
"grad_norm": 0.7242010831832886,
"learning_rate": 1.7755074655758174e-05,
"loss": 0.3111,
"step": 110
},
{
"epoch": 0.7544604927782498,
"grad_norm": 0.6928083896636963,
"learning_rate": 1.7707972125799738e-05,
"loss": 0.2913,
"step": 111
},
{
"epoch": 0.7612574341546304,
"grad_norm": 0.6862010955810547,
"learning_rate": 1.766044443118978e-05,
"loss": 0.2961,
"step": 112
},
{
"epoch": 0.7680543755310111,
"grad_norm": 0.622751772403717,
"learning_rate": 1.761249419351222e-05,
"loss": 0.2581,
"step": 113
},
{
"epoch": 0.7748513169073916,
"grad_norm": 0.6430375576019287,
"learning_rate": 1.7564124057658057e-05,
"loss": 0.2817,
"step": 114
},
{
"epoch": 0.7816482582837723,
"grad_norm": 0.5642279386520386,
"learning_rate": 1.7515336691679478e-05,
"loss": 0.2759,
"step": 115
},
{
"epoch": 0.7884451996601529,
"grad_norm": 0.5759322047233582,
"learning_rate": 1.746613478664271e-05,
"loss": 0.2708,
"step": 116
},
{
"epoch": 0.7884451996601529,
"eval_loss": 0.28095486760139465,
"eval_runtime": 24.6484,
"eval_samples_per_second": 20.285,
"eval_steps_per_second": 0.649,
"step": 116
},
{
"epoch": 0.7952421410365336,
"grad_norm": 0.6173487305641174,
"learning_rate": 1.7416521056479577e-05,
"loss": 0.3005,
"step": 117
},
{
"epoch": 0.8020390824129142,
"grad_norm": 0.6516419649124146,
"learning_rate": 1.736649823783779e-05,
"loss": 0.2788,
"step": 118
},
{
"epoch": 0.8088360237892949,
"grad_norm": 0.6238332986831665,
"learning_rate": 1.7316069089930007e-05,
"loss": 0.2789,
"step": 119
},
{
"epoch": 0.8156329651656754,
"grad_norm": 0.5984514355659485,
"learning_rate": 1.7265236394381634e-05,
"loss": 0.3,
"step": 120
},
{
"epoch": 0.822429906542056,
"grad_norm": 0.6230698823928833,
"learning_rate": 1.7214002955077394e-05,
"loss": 0.2579,
"step": 121
},
{
"epoch": 0.8292268479184367,
"grad_norm": 0.6018764972686768,
"learning_rate": 1.7162371598006668e-05,
"loss": 0.3065,
"step": 122
},
{
"epoch": 0.8360237892948174,
"grad_norm": 0.6184946298599243,
"learning_rate": 1.711034517110761e-05,
"loss": 0.2699,
"step": 123
},
{
"epoch": 0.842820730671198,
"grad_norm": 0.5912549495697021,
"learning_rate": 1.705792654411007e-05,
"loss": 0.2656,
"step": 124
},
{
"epoch": 0.8496176720475785,
"grad_norm": 0.6374722123146057,
"learning_rate": 1.7005118608377288e-05,
"loss": 0.3074,
"step": 125
},
{
"epoch": 0.8564146134239592,
"grad_norm": 0.6522223353385925,
"learning_rate": 1.6951924276746425e-05,
"loss": 0.2842,
"step": 126
},
{
"epoch": 0.8632115548003398,
"grad_norm": 0.6337326765060425,
"learning_rate": 1.689834648336787e-05,
"loss": 0.3009,
"step": 127
},
{
"epoch": 0.8700084961767205,
"grad_norm": 0.6591935753822327,
"learning_rate": 1.6844388183543418e-05,
"loss": 0.2758,
"step": 128
},
{
"epoch": 0.8768054375531011,
"grad_norm": 0.71869957447052,
"learning_rate": 1.6790052353563254e-05,
"loss": 0.3036,
"step": 129
},
{
"epoch": 0.8836023789294817,
"grad_norm": 0.6463243961334229,
"learning_rate": 1.6735341990541766e-05,
"loss": 0.2749,
"step": 130
},
{
"epoch": 0.8903993203058623,
"grad_norm": 0.5988742709159851,
"learning_rate": 1.6680260112252253e-05,
"loss": 0.2857,
"step": 131
},
{
"epoch": 0.897196261682243,
"grad_norm": 0.5640173554420471,
"learning_rate": 1.6624809756960445e-05,
"loss": 0.265,
"step": 132
},
{
"epoch": 0.9039932030586236,
"grad_norm": 0.6231476068496704,
"learning_rate": 1.656899398325693e-05,
"loss": 0.2837,
"step": 133
},
{
"epoch": 0.9107901444350043,
"grad_norm": 0.644237220287323,
"learning_rate": 1.651281586988844e-05,
"loss": 0.2911,
"step": 134
},
{
"epoch": 0.9175870858113849,
"grad_norm": 0.6296971440315247,
"learning_rate": 1.6456278515588023e-05,
"loss": 0.3025,
"step": 135
},
{
"epoch": 0.9243840271877655,
"grad_norm": 0.6154587864875793,
"learning_rate": 1.639938503890414e-05,
"loss": 0.2427,
"step": 136
},
{
"epoch": 0.9311809685641461,
"grad_norm": 0.6521453261375427,
"learning_rate": 1.6342138578028613e-05,
"loss": 0.2742,
"step": 137
},
{
"epoch": 0.9379779099405268,
"grad_norm": 0.7209259271621704,
"learning_rate": 1.6284542290623568e-05,
"loss": 0.3017,
"step": 138
},
{
"epoch": 0.9447748513169074,
"grad_norm": 0.6163744330406189,
"learning_rate": 1.622659935364723e-05,
"loss": 0.2779,
"step": 139
},
{
"epoch": 0.9515717926932881,
"grad_norm": 0.622027575969696,
"learning_rate": 1.61683129631787e-05,
"loss": 0.2767,
"step": 140
},
{
"epoch": 0.9583687340696686,
"grad_norm": 0.6272425055503845,
"learning_rate": 1.6109686334241655e-05,
"loss": 0.2644,
"step": 141
},
{
"epoch": 0.9651656754460493,
"grad_norm": 0.5646295547485352,
"learning_rate": 1.605072270062701e-05,
"loss": 0.2589,
"step": 142
},
{
"epoch": 0.9719626168224299,
"grad_norm": 0.6147757768630981,
"learning_rate": 1.599142531471456e-05,
"loss": 0.29,
"step": 143
},
{
"epoch": 0.9787595581988106,
"grad_norm": 0.610895574092865,
"learning_rate": 1.5931797447293553e-05,
"loss": 0.262,
"step": 144
},
{
"epoch": 0.9855564995751912,
"grad_norm": 0.5855327248573303,
"learning_rate": 1.5871842387382307e-05,
"loss": 0.2435,
"step": 145
},
{
"epoch": 0.9855564995751912,
"eval_loss": 0.27577897906303406,
"eval_runtime": 24.6998,
"eval_samples_per_second": 20.243,
"eval_steps_per_second": 0.648,
"step": 145
},
{
"epoch": 0.9923534409515717,
"grad_norm": 0.6426906585693359,
"learning_rate": 1.5811563442046768e-05,
"loss": 0.2755,
"step": 146
},
{
"epoch": 0.9991503823279524,
"grad_norm": 0.5879192352294922,
"learning_rate": 1.5750963936218104e-05,
"loss": 0.2515,
"step": 147
}
],
"logging_steps": 1.0,
"max_steps": 441,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 10.0,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.877564828072018e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}