Python-UML-v0.2 / trainer_state.json
HA-Siala's picture
Upload folder using huggingface_hub
91857c5 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 50,
"global_step": 2089,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09573958831977022,
"grad_norm": 0.8935234546661377,
"learning_rate": 2.3467432950191573e-06,
"loss": 0.7777,
"step": 50
},
{
"epoch": 0.19147917663954045,
"grad_norm": 1.103339433670044,
"learning_rate": 4.741379310344828e-06,
"loss": 0.5384,
"step": 100
},
{
"epoch": 0.2872187649593107,
"grad_norm": 0.6603535413742065,
"learning_rate": 7.088122605363985e-06,
"loss": 0.3699,
"step": 150
},
{
"epoch": 0.3829583532790809,
"grad_norm": 0.4637402892112732,
"learning_rate": 9.434865900383142e-06,
"loss": 0.3241,
"step": 200
},
{
"epoch": 0.47869794159885115,
"grad_norm": 0.5532308220863342,
"learning_rate": 1.1829501915708814e-05,
"loss": 0.2858,
"step": 250
},
{
"epoch": 0.5744375299186214,
"grad_norm": 0.7322471141815186,
"learning_rate": 1.4224137931034485e-05,
"loss": 0.2893,
"step": 300
},
{
"epoch": 0.6701771182383915,
"grad_norm": 0.6479516625404358,
"learning_rate": 1.6618773946360154e-05,
"loss": 0.2783,
"step": 350
},
{
"epoch": 0.7659167065581618,
"grad_norm": 0.8326407074928284,
"learning_rate": 1.9013409961685824e-05,
"loss": 0.2604,
"step": 400
},
{
"epoch": 0.861656294877932,
"grad_norm": 0.49058330059051514,
"learning_rate": 2.1408045977011497e-05,
"loss": 0.275,
"step": 450
},
{
"epoch": 0.9573958831977023,
"grad_norm": 0.7281334400177002,
"learning_rate": 2.3802681992337166e-05,
"loss": 0.2373,
"step": 500
},
{
"epoch": 0.9995213020584012,
"eval_loss": 0.4093719720840454,
"eval_runtime": 193.3529,
"eval_samples_per_second": 1.205,
"eval_steps_per_second": 0.155,
"step": 522
},
{
"epoch": 1.0531354715174726,
"grad_norm": 0.5162340402603149,
"learning_rate": 2.6197318007662836e-05,
"loss": 0.2351,
"step": 550
},
{
"epoch": 1.1488750598372426,
"grad_norm": 0.5875126719474792,
"learning_rate": 2.859195402298851e-05,
"loss": 0.2431,
"step": 600
},
{
"epoch": 1.2446146481570128,
"grad_norm": 0.6982198357582092,
"learning_rate": 3.098659003831418e-05,
"loss": 0.2489,
"step": 650
},
{
"epoch": 1.340354236476783,
"grad_norm": 0.4876953065395355,
"learning_rate": 3.338122605363985e-05,
"loss": 0.2226,
"step": 700
},
{
"epoch": 1.4360938247965533,
"grad_norm": 0.5332165360450745,
"learning_rate": 3.5775862068965524e-05,
"loss": 0.2378,
"step": 750
},
{
"epoch": 1.5318334131163236,
"grad_norm": 0.7943192720413208,
"learning_rate": 3.817049808429119e-05,
"loss": 0.2367,
"step": 800
},
{
"epoch": 1.6275730014360938,
"grad_norm": 0.8393548727035522,
"learning_rate": 4.056513409961686e-05,
"loss": 0.2482,
"step": 850
},
{
"epoch": 1.723312589755864,
"grad_norm": 0.5985046625137329,
"learning_rate": 4.295977011494253e-05,
"loss": 0.2241,
"step": 900
},
{
"epoch": 1.8190521780756344,
"grad_norm": 0.4653654992580414,
"learning_rate": 4.53544061302682e-05,
"loss": 0.2273,
"step": 950
},
{
"epoch": 1.9147917663954046,
"grad_norm": 0.6886143088340759,
"learning_rate": 4.774904214559387e-05,
"loss": 0.2302,
"step": 1000
},
{
"epoch": 1.9990426041168023,
"eval_loss": 0.38943055272102356,
"eval_runtime": 192.1581,
"eval_samples_per_second": 1.213,
"eval_steps_per_second": 0.156,
"step": 1044
},
{
"epoch": 2.010531354715175,
"grad_norm": 0.385065495967865,
"learning_rate": 4.9999936330549235e-05,
"loss": 0.2268,
"step": 1050
},
{
"epoch": 2.106270943034945,
"grad_norm": 0.5849937796592712,
"learning_rate": 4.998013068103311e-05,
"loss": 0.2052,
"step": 1100
},
{
"epoch": 2.2020105313547154,
"grad_norm": 0.7700575590133667,
"learning_rate": 4.992498537912657e-05,
"loss": 0.2325,
"step": 1150
},
{
"epoch": 2.297750119674485,
"grad_norm": 0.5966218113899231,
"learning_rate": 4.983457843946554e-05,
"loss": 0.2196,
"step": 1200
},
{
"epoch": 2.393489707994256,
"grad_norm": 0.7738291025161743,
"learning_rate": 4.970903776169402e-05,
"loss": 0.2132,
"step": 1250
},
{
"epoch": 2.4892292963140257,
"grad_norm": 0.4848026931285858,
"learning_rate": 4.954854094952314e-05,
"loss": 0.2125,
"step": 1300
},
{
"epoch": 2.584968884633796,
"grad_norm": 0.42147791385650635,
"learning_rate": 4.9353315059473295e-05,
"loss": 0.1996,
"step": 1350
},
{
"epoch": 2.680708472953566,
"grad_norm": 0.823914647102356,
"learning_rate": 4.912363627965497e-05,
"loss": 0.2075,
"step": 1400
},
{
"epoch": 2.7764480612733364,
"grad_norm": 0.4025530517101288,
"learning_rate": 4.8859829539042466e-05,
"loss": 0.2073,
"step": 1450
},
{
"epoch": 2.8721876495931067,
"grad_norm": 0.4118387699127197,
"learning_rate": 4.85622680477935e-05,
"loss": 0.1986,
"step": 1500
},
{
"epoch": 2.967927237912877,
"grad_norm": 0.47056856751441956,
"learning_rate": 4.823137276926492e-05,
"loss": 0.2151,
"step": 1550
},
{
"epoch": 2.998563906175203,
"eval_loss": 0.3755970001220703,
"eval_runtime": 192.1914,
"eval_samples_per_second": 1.212,
"eval_steps_per_second": 0.156,
"step": 1566
},
{
"epoch": 3.063666826232647,
"grad_norm": 0.3440151512622833,
"learning_rate": 4.786761182447138e-05,
"loss": 0.1825,
"step": 1600
},
{
"epoch": 3.1594064145524174,
"grad_norm": 0.47479385137557983,
"learning_rate": 4.7471499829829666e-05,
"loss": 0.222,
"step": 1650
},
{
"epoch": 3.2551460028721877,
"grad_norm": 0.44426199793815613,
"learning_rate": 4.704359716912549e-05,
"loss": 0.1952,
"step": 1700
},
{
"epoch": 3.350885591191958,
"grad_norm": 0.3928620219230652,
"learning_rate": 4.6584509200732645e-05,
"loss": 0.2082,
"step": 1750
},
{
"epoch": 3.446625179511728,
"grad_norm": 0.476755827665329,
"learning_rate": 4.609488540120619e-05,
"loss": 0.1815,
"step": 1800
},
{
"epoch": 3.5423647678314985,
"grad_norm": 0.8411477208137512,
"learning_rate": 4.557541844646116e-05,
"loss": 0.17,
"step": 1850
},
{
"epoch": 3.6381043561512687,
"grad_norm": 0.687340259552002,
"learning_rate": 4.502684323183672e-05,
"loss": 0.1834,
"step": 1900
},
{
"epoch": 3.7338439444710385,
"grad_norm": 0.5409902334213257,
"learning_rate": 4.4449935832432e-05,
"loss": 0.1825,
"step": 1950
},
{
"epoch": 3.829583532790809,
"grad_norm": 0.7994140982627869,
"learning_rate": 4.3845512405184565e-05,
"loss": 0.2147,
"step": 2000
},
{
"epoch": 3.925323121110579,
"grad_norm": 0.40214937925338745,
"learning_rate": 4.321442803424463e-05,
"loss": 0.1716,
"step": 2050
},
{
"epoch": 4.0,
"eval_loss": 0.3735233545303345,
"eval_runtime": 192.2095,
"eval_samples_per_second": 1.212,
"eval_steps_per_second": 0.156,
"step": 2089
}
],
"logging_steps": 50,
"max_steps": 5220,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 8.606064626851185e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}