pemix09's picture
Add files using upload-large-folder tool
78da702 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.46448087431694,
"eval_steps": 500,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0546448087431694,
"grad_norm": 2.1467645168304443,
"learning_rate": 0.0004973224043715847,
"loss": 5.4078,
"step": 50
},
{
"epoch": 0.1092896174863388,
"grad_norm": 0.8721267580986023,
"learning_rate": 0.0004945901639344262,
"loss": 1.2654,
"step": 100
},
{
"epoch": 0.16393442622950818,
"grad_norm": 0.8103071451187134,
"learning_rate": 0.0004918579234972678,
"loss": 1.0538,
"step": 150
},
{
"epoch": 0.2185792349726776,
"grad_norm": 1.3929318189620972,
"learning_rate": 0.0004891256830601093,
"loss": 1.0236,
"step": 200
},
{
"epoch": 0.273224043715847,
"grad_norm": 0.970960259437561,
"learning_rate": 0.00048639344262295083,
"loss": 1.075,
"step": 250
},
{
"epoch": 0.32786885245901637,
"grad_norm": 0.9727946519851685,
"learning_rate": 0.00048366120218579234,
"loss": 1.0434,
"step": 300
},
{
"epoch": 0.3825136612021858,
"grad_norm": 0.6411604285240173,
"learning_rate": 0.0004809289617486339,
"loss": 0.9285,
"step": 350
},
{
"epoch": 0.4371584699453552,
"grad_norm": 0.8864787817001343,
"learning_rate": 0.0004781967213114754,
"loss": 0.9315,
"step": 400
},
{
"epoch": 0.4918032786885246,
"grad_norm": 0.8886504173278809,
"learning_rate": 0.0004754644808743169,
"loss": 0.8818,
"step": 450
},
{
"epoch": 0.546448087431694,
"grad_norm": 0.7289233803749084,
"learning_rate": 0.0004727322404371585,
"loss": 0.92,
"step": 500
},
{
"epoch": 0.6010928961748634,
"grad_norm": 1.0301029682159424,
"learning_rate": 0.00047,
"loss": 0.8222,
"step": 550
},
{
"epoch": 0.6557377049180327,
"grad_norm": 0.808475136756897,
"learning_rate": 0.00046726775956284155,
"loss": 0.8767,
"step": 600
},
{
"epoch": 0.7103825136612022,
"grad_norm": 0.8232861161231995,
"learning_rate": 0.0004645355191256831,
"loss": 0.835,
"step": 650
},
{
"epoch": 0.7650273224043715,
"grad_norm": 0.9552908539772034,
"learning_rate": 0.0004618032786885246,
"loss": 0.8203,
"step": 700
},
{
"epoch": 0.819672131147541,
"grad_norm": 0.7888686656951904,
"learning_rate": 0.00045907103825136613,
"loss": 0.8296,
"step": 750
},
{
"epoch": 0.8743169398907104,
"grad_norm": 0.7473599910736084,
"learning_rate": 0.0004563387978142077,
"loss": 0.7801,
"step": 800
},
{
"epoch": 0.9289617486338798,
"grad_norm": 0.8543986678123474,
"learning_rate": 0.0004536065573770492,
"loss": 0.9248,
"step": 850
},
{
"epoch": 0.9836065573770492,
"grad_norm": 0.7910193204879761,
"learning_rate": 0.0004508743169398907,
"loss": 0.8386,
"step": 900
},
{
"epoch": 1.0,
"eval_loss": 0.6989373564720154,
"eval_runtime": 35.1236,
"eval_samples_per_second": 23.175,
"eval_steps_per_second": 2.904,
"step": 915
},
{
"epoch": 1.0382513661202186,
"grad_norm": 1.1451815366744995,
"learning_rate": 0.00044814207650273227,
"loss": 0.7198,
"step": 950
},
{
"epoch": 1.092896174863388,
"grad_norm": 0.7096536755561829,
"learning_rate": 0.0004454098360655738,
"loss": 0.7374,
"step": 1000
},
{
"epoch": 1.1475409836065573,
"grad_norm": 0.8436596393585205,
"learning_rate": 0.0004426775956284153,
"loss": 0.7471,
"step": 1050
},
{
"epoch": 1.2021857923497268,
"grad_norm": 0.8844044208526611,
"learning_rate": 0.00043994535519125685,
"loss": 0.7072,
"step": 1100
},
{
"epoch": 1.2568306010928962,
"grad_norm": 0.9547187089920044,
"learning_rate": 0.00043721311475409836,
"loss": 0.7618,
"step": 1150
},
{
"epoch": 1.3114754098360657,
"grad_norm": 0.8470160961151123,
"learning_rate": 0.00043448087431693987,
"loss": 0.7719,
"step": 1200
},
{
"epoch": 1.366120218579235,
"grad_norm": 0.8797179460525513,
"learning_rate": 0.00043174863387978143,
"loss": 0.7172,
"step": 1250
},
{
"epoch": 1.4207650273224044,
"grad_norm": 0.6831291317939758,
"learning_rate": 0.00042901639344262294,
"loss": 0.7254,
"step": 1300
},
{
"epoch": 1.4754098360655736,
"grad_norm": 0.7458399534225464,
"learning_rate": 0.00042628415300546445,
"loss": 0.7219,
"step": 1350
},
{
"epoch": 1.530054644808743,
"grad_norm": 0.7829724550247192,
"learning_rate": 0.000423551912568306,
"loss": 0.6969,
"step": 1400
},
{
"epoch": 1.5846994535519126,
"grad_norm": 0.914277970790863,
"learning_rate": 0.0004208196721311475,
"loss": 0.7088,
"step": 1450
},
{
"epoch": 1.639344262295082,
"grad_norm": 0.9189252853393555,
"learning_rate": 0.00041808743169398913,
"loss": 0.7574,
"step": 1500
},
{
"epoch": 1.6939890710382515,
"grad_norm": 0.8233757019042969,
"learning_rate": 0.00041535519125683064,
"loss": 0.7248,
"step": 1550
},
{
"epoch": 1.748633879781421,
"grad_norm": 0.5294966101646423,
"learning_rate": 0.00041262295081967215,
"loss": 0.7173,
"step": 1600
},
{
"epoch": 1.8032786885245902,
"grad_norm": 0.7432788014411926,
"learning_rate": 0.0004098907103825137,
"loss": 0.731,
"step": 1650
},
{
"epoch": 1.8579234972677594,
"grad_norm": 0.9326347708702087,
"learning_rate": 0.0004071584699453552,
"loss": 0.6559,
"step": 1700
},
{
"epoch": 1.9125683060109289,
"grad_norm": 0.6357129216194153,
"learning_rate": 0.00040442622950819673,
"loss": 0.7079,
"step": 1750
},
{
"epoch": 1.9672131147540983,
"grad_norm": 0.6454396843910217,
"learning_rate": 0.0004016939890710383,
"loss": 0.6284,
"step": 1800
},
{
"epoch": 2.0,
"eval_loss": 0.6172024607658386,
"eval_runtime": 34.4665,
"eval_samples_per_second": 23.617,
"eval_steps_per_second": 2.959,
"step": 1830
},
{
"epoch": 2.021857923497268,
"grad_norm": 0.9404008388519287,
"learning_rate": 0.0003989617486338798,
"loss": 0.6996,
"step": 1850
},
{
"epoch": 2.0765027322404372,
"grad_norm": 0.7585016489028931,
"learning_rate": 0.0003962295081967213,
"loss": 0.6634,
"step": 1900
},
{
"epoch": 2.1311475409836067,
"grad_norm": 0.9319397807121277,
"learning_rate": 0.00039349726775956287,
"loss": 0.6439,
"step": 1950
},
{
"epoch": 2.185792349726776,
"grad_norm": 0.7812663912773132,
"learning_rate": 0.0003907650273224044,
"loss": 0.6807,
"step": 2000
},
{
"epoch": 2.240437158469945,
"grad_norm": 0.8016160726547241,
"learning_rate": 0.0003880327868852459,
"loss": 0.6121,
"step": 2050
},
{
"epoch": 2.2950819672131146,
"grad_norm": 0.5846936106681824,
"learning_rate": 0.00038530054644808745,
"loss": 0.6501,
"step": 2100
},
{
"epoch": 2.349726775956284,
"grad_norm": 0.6993207931518555,
"learning_rate": 0.00038256830601092896,
"loss": 0.6242,
"step": 2150
},
{
"epoch": 2.4043715846994536,
"grad_norm": 0.5866222381591797,
"learning_rate": 0.00037983606557377047,
"loss": 0.5766,
"step": 2200
},
{
"epoch": 2.459016393442623,
"grad_norm": 0.8375122547149658,
"learning_rate": 0.00037710382513661203,
"loss": 0.6395,
"step": 2250
},
{
"epoch": 2.5136612021857925,
"grad_norm": 0.9567583799362183,
"learning_rate": 0.00037437158469945354,
"loss": 0.6829,
"step": 2300
},
{
"epoch": 2.5683060109289615,
"grad_norm": 0.829088032245636,
"learning_rate": 0.00037163934426229505,
"loss": 0.6138,
"step": 2350
},
{
"epoch": 2.6229508196721314,
"grad_norm": 0.7738655805587769,
"learning_rate": 0.00036890710382513666,
"loss": 0.5961,
"step": 2400
},
{
"epoch": 2.6775956284153004,
"grad_norm": 0.6849051117897034,
"learning_rate": 0.00036617486338797817,
"loss": 0.6158,
"step": 2450
},
{
"epoch": 2.73224043715847,
"grad_norm": 0.6353682279586792,
"learning_rate": 0.0003634426229508197,
"loss": 0.6077,
"step": 2500
},
{
"epoch": 2.7868852459016393,
"grad_norm": 0.6507243514060974,
"learning_rate": 0.00036071038251366124,
"loss": 0.5881,
"step": 2550
},
{
"epoch": 2.841530054644809,
"grad_norm": 0.7680765390396118,
"learning_rate": 0.00035797814207650275,
"loss": 0.64,
"step": 2600
},
{
"epoch": 2.8961748633879782,
"grad_norm": 0.8768549561500549,
"learning_rate": 0.00035524590163934426,
"loss": 0.5946,
"step": 2650
},
{
"epoch": 2.9508196721311473,
"grad_norm": 0.9345018267631531,
"learning_rate": 0.0003525136612021858,
"loss": 0.5986,
"step": 2700
},
{
"epoch": 3.0,
"eval_loss": 0.5772798657417297,
"eval_runtime": 34.1537,
"eval_samples_per_second": 23.833,
"eval_steps_per_second": 2.986,
"step": 2745
},
{
"epoch": 3.0054644808743167,
"grad_norm": 0.8109590411186218,
"learning_rate": 0.00034978142076502733,
"loss": 0.6056,
"step": 2750
},
{
"epoch": 3.060109289617486,
"grad_norm": 1.1651564836502075,
"learning_rate": 0.00034704918032786884,
"loss": 0.5321,
"step": 2800
},
{
"epoch": 3.1147540983606556,
"grad_norm": 0.9508790969848633,
"learning_rate": 0.0003443169398907104,
"loss": 0.5405,
"step": 2850
},
{
"epoch": 3.169398907103825,
"grad_norm": 0.7765358686447144,
"learning_rate": 0.0003415846994535519,
"loss": 0.6053,
"step": 2900
},
{
"epoch": 3.2240437158469946,
"grad_norm": 1.0186572074890137,
"learning_rate": 0.0003388524590163934,
"loss": 0.6171,
"step": 2950
},
{
"epoch": 3.278688524590164,
"grad_norm": 0.9198819398880005,
"learning_rate": 0.000336120218579235,
"loss": 0.5755,
"step": 3000
},
{
"epoch": 3.3333333333333335,
"grad_norm": 1.0580278635025024,
"learning_rate": 0.0003333879781420765,
"loss": 0.5521,
"step": 3050
},
{
"epoch": 3.387978142076503,
"grad_norm": 0.637541651725769,
"learning_rate": 0.00033065573770491805,
"loss": 0.5288,
"step": 3100
},
{
"epoch": 3.442622950819672,
"grad_norm": 1.3321665525436401,
"learning_rate": 0.00032792349726775956,
"loss": 0.57,
"step": 3150
},
{
"epoch": 3.4972677595628414,
"grad_norm": 1.1390025615692139,
"learning_rate": 0.00032519125683060107,
"loss": 0.588,
"step": 3200
},
{
"epoch": 3.551912568306011,
"grad_norm": 0.6929836273193359,
"learning_rate": 0.00032245901639344263,
"loss": 0.5508,
"step": 3250
},
{
"epoch": 3.6065573770491803,
"grad_norm": 0.6794707179069519,
"learning_rate": 0.00031972677595628414,
"loss": 0.5284,
"step": 3300
},
{
"epoch": 3.66120218579235,
"grad_norm": 0.7672021985054016,
"learning_rate": 0.0003169945355191257,
"loss": 0.5968,
"step": 3350
},
{
"epoch": 3.7158469945355193,
"grad_norm": 1.0557798147201538,
"learning_rate": 0.00031426229508196726,
"loss": 0.5586,
"step": 3400
},
{
"epoch": 3.7704918032786887,
"grad_norm": 0.7945201992988586,
"learning_rate": 0.00031153005464480877,
"loss": 0.5042,
"step": 3450
},
{
"epoch": 3.8251366120218577,
"grad_norm": 0.49171268939971924,
"learning_rate": 0.0003087978142076503,
"loss": 0.5695,
"step": 3500
},
{
"epoch": 3.879781420765027,
"grad_norm": 1.0232176780700684,
"learning_rate": 0.00030606557377049184,
"loss": 0.5511,
"step": 3550
},
{
"epoch": 3.9344262295081966,
"grad_norm": 0.6561440229415894,
"learning_rate": 0.00030333333333333335,
"loss": 0.5256,
"step": 3600
},
{
"epoch": 3.989071038251366,
"grad_norm": 0.6772050261497498,
"learning_rate": 0.00030060109289617486,
"loss": 0.5752,
"step": 3650
},
{
"epoch": 4.0,
"eval_loss": 0.5453636050224304,
"eval_runtime": 18.8015,
"eval_samples_per_second": 43.294,
"eval_steps_per_second": 5.425,
"step": 3660
},
{
"epoch": 4.043715846994536,
"grad_norm": 0.9626933336257935,
"learning_rate": 0.0002978688524590164,
"loss": 0.5659,
"step": 3700
},
{
"epoch": 4.098360655737705,
"grad_norm": 0.6350908875465393,
"learning_rate": 0.00029513661202185793,
"loss": 0.4663,
"step": 3750
},
{
"epoch": 4.1530054644808745,
"grad_norm": 0.862783670425415,
"learning_rate": 0.00029240437158469944,
"loss": 0.5265,
"step": 3800
},
{
"epoch": 4.2076502732240435,
"grad_norm": 0.845670223236084,
"learning_rate": 0.000289672131147541,
"loss": 0.5029,
"step": 3850
},
{
"epoch": 4.262295081967213,
"grad_norm": 0.5551162958145142,
"learning_rate": 0.0002869398907103825,
"loss": 0.473,
"step": 3900
},
{
"epoch": 4.316939890710382,
"grad_norm": 0.8691427707672119,
"learning_rate": 0.000284207650273224,
"loss": 0.5257,
"step": 3950
},
{
"epoch": 4.371584699453552,
"grad_norm": 0.7771849036216736,
"learning_rate": 0.0002814754098360656,
"loss": 0.4816,
"step": 4000
},
{
"epoch": 4.426229508196721,
"grad_norm": 0.8042870163917542,
"learning_rate": 0.0002787431693989071,
"loss": 0.5424,
"step": 4050
},
{
"epoch": 4.48087431693989,
"grad_norm": 0.9805220365524292,
"learning_rate": 0.0002760109289617486,
"loss": 0.5323,
"step": 4100
},
{
"epoch": 4.53551912568306,
"grad_norm": 0.7407246828079224,
"learning_rate": 0.00027327868852459016,
"loss": 0.5581,
"step": 4150
},
{
"epoch": 4.590163934426229,
"grad_norm": 0.7939064502716064,
"learning_rate": 0.00027054644808743167,
"loss": 0.4608,
"step": 4200
},
{
"epoch": 4.644808743169399,
"grad_norm": 0.8826588988304138,
"learning_rate": 0.00026781420765027323,
"loss": 0.5058,
"step": 4250
},
{
"epoch": 4.699453551912568,
"grad_norm": 0.8143342137336731,
"learning_rate": 0.0002650819672131148,
"loss": 0.5476,
"step": 4300
},
{
"epoch": 4.754098360655737,
"grad_norm": 0.6828013062477112,
"learning_rate": 0.0002623497267759563,
"loss": 0.5289,
"step": 4350
},
{
"epoch": 4.808743169398907,
"grad_norm": 0.8838549256324768,
"learning_rate": 0.0002596174863387978,
"loss": 0.4789,
"step": 4400
},
{
"epoch": 4.863387978142076,
"grad_norm": 1.089988350868225,
"learning_rate": 0.0002568852459016394,
"loss": 0.5388,
"step": 4450
},
{
"epoch": 4.918032786885246,
"grad_norm": 0.7895328998565674,
"learning_rate": 0.0002541530054644809,
"loss": 0.4863,
"step": 4500
},
{
"epoch": 4.972677595628415,
"grad_norm": 0.7219722270965576,
"learning_rate": 0.00025142076502732244,
"loss": 0.4884,
"step": 4550
},
{
"epoch": 5.0,
"eval_loss": 0.5269655585289001,
"eval_runtime": 82.4268,
"eval_samples_per_second": 9.875,
"eval_steps_per_second": 1.237,
"step": 4575
},
{
"epoch": 5.027322404371585,
"grad_norm": 0.611998975276947,
"learning_rate": 0.00024868852459016395,
"loss": 0.5115,
"step": 4600
},
{
"epoch": 5.081967213114754,
"grad_norm": 0.8524764776229858,
"learning_rate": 0.00024595628415300546,
"loss": 0.4502,
"step": 4650
},
{
"epoch": 5.136612021857924,
"grad_norm": 1.2595255374908447,
"learning_rate": 0.000243224043715847,
"loss": 0.4866,
"step": 4700
},
{
"epoch": 5.191256830601093,
"grad_norm": 0.887760579586029,
"learning_rate": 0.00024049180327868853,
"loss": 0.4813,
"step": 4750
},
{
"epoch": 5.245901639344262,
"grad_norm": 0.6199231147766113,
"learning_rate": 0.00023775956284153004,
"loss": 0.476,
"step": 4800
},
{
"epoch": 5.300546448087432,
"grad_norm": 0.8007299304008484,
"learning_rate": 0.00023502732240437158,
"loss": 0.4773,
"step": 4850
},
{
"epoch": 5.355191256830601,
"grad_norm": 0.5639681816101074,
"learning_rate": 0.0002322950819672131,
"loss": 0.4903,
"step": 4900
},
{
"epoch": 5.409836065573771,
"grad_norm": 0.980188250541687,
"learning_rate": 0.00022956284153005467,
"loss": 0.5212,
"step": 4950
},
{
"epoch": 5.46448087431694,
"grad_norm": 0.8411896228790283,
"learning_rate": 0.00022683060109289618,
"loss": 0.4216,
"step": 5000
}
],
"logging_steps": 50,
"max_steps": 9150,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7435620188160000.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}