theworld-vsr / checkpoint-1920 /trainer_state.json
kasohrab's picture
Upload folder using huggingface_hub
5dfb528 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1920,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.026041666666666668,
"grad_norm": 368.0,
"learning_rate": 4.9e-05,
"loss": 22.6338,
"step": 50
},
{
"epoch": 0.052083333333333336,
"grad_norm": 428.0,
"learning_rate": 9.900000000000001e-05,
"loss": 20.7782,
"step": 100
},
{
"epoch": 0.078125,
"grad_norm": 203.0,
"learning_rate": 9.730769230769232e-05,
"loss": 16.2125,
"step": 150
},
{
"epoch": 0.10416666666666667,
"grad_norm": 86.5,
"learning_rate": 9.456043956043956e-05,
"loss": 12.6787,
"step": 200
},
{
"epoch": 0.13020833333333334,
"grad_norm": 50.5,
"learning_rate": 9.181318681318681e-05,
"loss": 10.1977,
"step": 250
},
{
"epoch": 0.15625,
"grad_norm": 47.0,
"learning_rate": 8.906593406593407e-05,
"loss": 8.4288,
"step": 300
},
{
"epoch": 0.18229166666666666,
"grad_norm": 40.25,
"learning_rate": 8.631868131868133e-05,
"loss": 7.3367,
"step": 350
},
{
"epoch": 0.20833333333333334,
"grad_norm": 39.75,
"learning_rate": 8.357142857142858e-05,
"loss": 6.6945,
"step": 400
},
{
"epoch": 0.234375,
"grad_norm": 58.5,
"learning_rate": 8.082417582417583e-05,
"loss": 6.1483,
"step": 450
},
{
"epoch": 0.2604166666666667,
"grad_norm": 28.25,
"learning_rate": 7.807692307692307e-05,
"loss": 5.5996,
"step": 500
},
{
"epoch": 0.2864583333333333,
"grad_norm": 29.875,
"learning_rate": 7.532967032967034e-05,
"loss": 5.2421,
"step": 550
},
{
"epoch": 0.3125,
"grad_norm": 41.75,
"learning_rate": 7.258241758241758e-05,
"loss": 4.9438,
"step": 600
},
{
"epoch": 0.3385416666666667,
"grad_norm": 30.125,
"learning_rate": 6.983516483516483e-05,
"loss": 4.6639,
"step": 650
},
{
"epoch": 0.3645833333333333,
"grad_norm": 43.75,
"learning_rate": 6.708791208791209e-05,
"loss": 4.4483,
"step": 700
},
{
"epoch": 0.390625,
"grad_norm": 40.0,
"learning_rate": 6.434065934065935e-05,
"loss": 4.2564,
"step": 750
},
{
"epoch": 0.4166666666666667,
"grad_norm": 45.5,
"learning_rate": 6.15934065934066e-05,
"loss": 4.039,
"step": 800
},
{
"epoch": 0.4427083333333333,
"grad_norm": 34.25,
"learning_rate": 5.884615384615385e-05,
"loss": 3.8383,
"step": 850
},
{
"epoch": 0.46875,
"grad_norm": 34.5,
"learning_rate": 5.60989010989011e-05,
"loss": 3.7334,
"step": 900
},
{
"epoch": 0.4947916666666667,
"grad_norm": 29.25,
"learning_rate": 5.3351648351648354e-05,
"loss": 3.7124,
"step": 950
},
{
"epoch": 0.5208333333333334,
"grad_norm": 20.125,
"learning_rate": 5.06043956043956e-05,
"loss": 3.5392,
"step": 1000
},
{
"epoch": 0.546875,
"grad_norm": 36.25,
"learning_rate": 4.785714285714286e-05,
"loss": 3.6442,
"step": 1050
},
{
"epoch": 0.5729166666666666,
"grad_norm": 16.25,
"learning_rate": 4.510989010989011e-05,
"loss": 3.3756,
"step": 1100
},
{
"epoch": 0.5989583333333334,
"grad_norm": 21.375,
"learning_rate": 4.2362637362637364e-05,
"loss": 3.3847,
"step": 1150
},
{
"epoch": 0.625,
"grad_norm": 29.875,
"learning_rate": 3.961538461538462e-05,
"loss": 3.3237,
"step": 1200
},
{
"epoch": 0.6510416666666666,
"grad_norm": 45.0,
"learning_rate": 3.686813186813187e-05,
"loss": 3.3797,
"step": 1250
},
{
"epoch": 0.6770833333333334,
"grad_norm": 20.25,
"learning_rate": 3.4120879120879126e-05,
"loss": 3.2698,
"step": 1300
},
{
"epoch": 0.703125,
"grad_norm": 25.125,
"learning_rate": 3.1373626373626374e-05,
"loss": 3.295,
"step": 1350
},
{
"epoch": 0.7291666666666666,
"grad_norm": 36.25,
"learning_rate": 2.8626373626373624e-05,
"loss": 3.2116,
"step": 1400
},
{
"epoch": 0.7552083333333334,
"grad_norm": 44.5,
"learning_rate": 2.5879120879120882e-05,
"loss": 3.3309,
"step": 1450
},
{
"epoch": 0.78125,
"grad_norm": 32.5,
"learning_rate": 2.3131868131868133e-05,
"loss": 3.206,
"step": 1500
},
{
"epoch": 0.8072916666666666,
"grad_norm": 128.0,
"learning_rate": 2.0384615384615387e-05,
"loss": 3.2975,
"step": 1550
},
{
"epoch": 0.8333333333333334,
"grad_norm": 34.75,
"learning_rate": 1.763736263736264e-05,
"loss": 3.3141,
"step": 1600
},
{
"epoch": 0.859375,
"grad_norm": 22.875,
"learning_rate": 1.489010989010989e-05,
"loss": 3.2336,
"step": 1650
},
{
"epoch": 0.8854166666666666,
"grad_norm": 24.625,
"learning_rate": 1.2142857142857144e-05,
"loss": 3.2556,
"step": 1700
},
{
"epoch": 0.9114583333333334,
"grad_norm": 19.375,
"learning_rate": 9.395604395604396e-06,
"loss": 3.249,
"step": 1750
},
{
"epoch": 0.9375,
"grad_norm": 40.75,
"learning_rate": 6.648351648351649e-06,
"loss": 3.219,
"step": 1800
},
{
"epoch": 0.9635416666666666,
"grad_norm": 39.75,
"learning_rate": 3.901098901098901e-06,
"loss": 3.3161,
"step": 1850
},
{
"epoch": 0.9895833333333334,
"grad_norm": 18.0,
"learning_rate": 1.153846153846154e-06,
"loss": 3.2349,
"step": 1900
}
],
"logging_steps": 50,
"max_steps": 1920,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.075946360284369e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}