us-laws-subjects-extractor / trainer_state.json
andrea colombo
load model
618f041
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 20,
"global_step": 695,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07194244604316546,
"grad_norm": 0.7139045596122742,
"learning_rate": 0.0002,
"loss": 2.0811,
"step": 10
},
{
"epoch": 0.14388489208633093,
"grad_norm": 0.5728554129600525,
"learning_rate": 0.0002,
"loss": 1.567,
"step": 20
},
{
"epoch": 0.2158273381294964,
"grad_norm": 0.28863629698753357,
"learning_rate": 0.0002,
"loss": 1.357,
"step": 30
},
{
"epoch": 0.28776978417266186,
"grad_norm": 0.3229255974292755,
"learning_rate": 0.0002,
"loss": 1.2521,
"step": 40
},
{
"epoch": 0.3597122302158273,
"grad_norm": 0.33988726139068604,
"learning_rate": 0.0002,
"loss": 1.2164,
"step": 50
},
{
"epoch": 0.4316546762589928,
"grad_norm": 0.40238669514656067,
"learning_rate": 0.0002,
"loss": 1.1385,
"step": 60
},
{
"epoch": 0.5035971223021583,
"grad_norm": 0.38314664363861084,
"learning_rate": 0.0002,
"loss": 1.0973,
"step": 70
},
{
"epoch": 0.5755395683453237,
"grad_norm": 0.3884238302707672,
"learning_rate": 0.0002,
"loss": 1.0836,
"step": 80
},
{
"epoch": 0.6474820143884892,
"grad_norm": 0.3774580657482147,
"learning_rate": 0.0002,
"loss": 1.0409,
"step": 90
},
{
"epoch": 0.7194244604316546,
"grad_norm": 0.43280744552612305,
"learning_rate": 0.0002,
"loss": 1.0462,
"step": 100
},
{
"epoch": 0.7913669064748201,
"grad_norm": 0.3758571445941925,
"learning_rate": 0.0002,
"loss": 1.0206,
"step": 110
},
{
"epoch": 0.8633093525179856,
"grad_norm": 0.3923165798187256,
"learning_rate": 0.0002,
"loss": 1.0219,
"step": 120
},
{
"epoch": 0.935251798561151,
"grad_norm": 0.4213840663433075,
"learning_rate": 0.0002,
"loss": 0.9971,
"step": 130
},
{
"epoch": 1.0,
"eval_loss": 0.9775540828704834,
"eval_runtime": 21.5909,
"eval_samples_per_second": 4.771,
"eval_steps_per_second": 0.602,
"step": 139
},
{
"epoch": 1.0071942446043165,
"grad_norm": 0.40076276659965515,
"learning_rate": 0.0002,
"loss": 0.9885,
"step": 140
},
{
"epoch": 1.079136690647482,
"grad_norm": 0.3987070918083191,
"learning_rate": 0.0002,
"loss": 0.9555,
"step": 150
},
{
"epoch": 1.1510791366906474,
"grad_norm": 0.41527315974235535,
"learning_rate": 0.0002,
"loss": 0.9391,
"step": 160
},
{
"epoch": 1.223021582733813,
"grad_norm": 0.42107248306274414,
"learning_rate": 0.0002,
"loss": 0.9332,
"step": 170
},
{
"epoch": 1.2949640287769784,
"grad_norm": 0.4587080776691437,
"learning_rate": 0.0002,
"loss": 0.9163,
"step": 180
},
{
"epoch": 1.3669064748201438,
"grad_norm": 0.3977225124835968,
"learning_rate": 0.0002,
"loss": 0.9142,
"step": 190
},
{
"epoch": 1.4388489208633093,
"grad_norm": 0.41111239790916443,
"learning_rate": 0.0002,
"loss": 0.9088,
"step": 200
},
{
"epoch": 1.5107913669064748,
"grad_norm": 0.4326966404914856,
"learning_rate": 0.0002,
"loss": 0.9082,
"step": 210
},
{
"epoch": 1.5827338129496402,
"grad_norm": 0.3831544816493988,
"learning_rate": 0.0002,
"loss": 0.908,
"step": 220
},
{
"epoch": 1.6546762589928057,
"grad_norm": 0.39992555975914,
"learning_rate": 0.0002,
"loss": 0.9076,
"step": 230
},
{
"epoch": 1.7266187050359711,
"grad_norm": 0.39961057901382446,
"learning_rate": 0.0002,
"loss": 0.9059,
"step": 240
},
{
"epoch": 1.7985611510791366,
"grad_norm": 0.3854500949382782,
"learning_rate": 0.0002,
"loss": 0.8903,
"step": 250
},
{
"epoch": 1.870503597122302,
"grad_norm": 0.4092749357223511,
"learning_rate": 0.0002,
"loss": 0.8904,
"step": 260
},
{
"epoch": 1.9424460431654675,
"grad_norm": 0.40900877118110657,
"learning_rate": 0.0002,
"loss": 0.8679,
"step": 270
},
{
"epoch": 2.0,
"eval_loss": 0.8940790891647339,
"eval_runtime": 21.5978,
"eval_samples_per_second": 4.769,
"eval_steps_per_second": 0.602,
"step": 278
},
{
"epoch": 2.014388489208633,
"grad_norm": 0.3746669888496399,
"learning_rate": 0.0002,
"loss": 0.8669,
"step": 280
},
{
"epoch": 2.0863309352517985,
"grad_norm": 0.4724111258983612,
"learning_rate": 0.0002,
"loss": 0.8304,
"step": 290
},
{
"epoch": 2.158273381294964,
"grad_norm": 0.39437365531921387,
"learning_rate": 0.0002,
"loss": 0.8318,
"step": 300
},
{
"epoch": 2.2302158273381294,
"grad_norm": 0.4238971471786499,
"learning_rate": 0.0002,
"loss": 0.829,
"step": 310
},
{
"epoch": 2.302158273381295,
"grad_norm": 0.37740206718444824,
"learning_rate": 0.0002,
"loss": 0.8235,
"step": 320
},
{
"epoch": 2.3741007194244603,
"grad_norm": 0.40223780274391174,
"learning_rate": 0.0002,
"loss": 0.8328,
"step": 330
},
{
"epoch": 2.446043165467626,
"grad_norm": 0.4160473048686981,
"learning_rate": 0.0002,
"loss": 0.8252,
"step": 340
},
{
"epoch": 2.5179856115107913,
"grad_norm": 0.4427769184112549,
"learning_rate": 0.0002,
"loss": 0.8215,
"step": 350
},
{
"epoch": 2.5899280575539567,
"grad_norm": 0.47273996472358704,
"learning_rate": 0.0002,
"loss": 0.819,
"step": 360
},
{
"epoch": 2.661870503597122,
"grad_norm": 0.38981807231903076,
"learning_rate": 0.0002,
"loss": 0.8225,
"step": 370
},
{
"epoch": 2.7338129496402876,
"grad_norm": 0.385079562664032,
"learning_rate": 0.0002,
"loss": 0.8045,
"step": 380
},
{
"epoch": 2.805755395683453,
"grad_norm": 0.3796544075012207,
"learning_rate": 0.0002,
"loss": 0.8234,
"step": 390
},
{
"epoch": 2.8776978417266186,
"grad_norm": 0.3638385236263275,
"learning_rate": 0.0002,
"loss": 0.8195,
"step": 400
},
{
"epoch": 2.949640287769784,
"grad_norm": 0.37602856755256653,
"learning_rate": 0.0002,
"loss": 0.831,
"step": 410
},
{
"epoch": 3.0,
"eval_loss": 0.8596920967102051,
"eval_runtime": 21.5962,
"eval_samples_per_second": 4.769,
"eval_steps_per_second": 0.602,
"step": 417
},
{
"epoch": 3.0215827338129495,
"grad_norm": 0.3858413100242615,
"learning_rate": 0.0002,
"loss": 0.8076,
"step": 420
},
{
"epoch": 3.093525179856115,
"grad_norm": 0.39952272176742554,
"learning_rate": 0.0002,
"loss": 0.7777,
"step": 430
},
{
"epoch": 3.1654676258992804,
"grad_norm": 0.42352095246315,
"learning_rate": 0.0002,
"loss": 0.7704,
"step": 440
},
{
"epoch": 3.237410071942446,
"grad_norm": 0.4132436513900757,
"learning_rate": 0.0002,
"loss": 0.768,
"step": 450
},
{
"epoch": 3.3093525179856114,
"grad_norm": 0.414110392332077,
"learning_rate": 0.0002,
"loss": 0.7631,
"step": 460
},
{
"epoch": 3.381294964028777,
"grad_norm": 0.43551069498062134,
"learning_rate": 0.0002,
"loss": 0.7673,
"step": 470
},
{
"epoch": 3.4532374100719423,
"grad_norm": 0.4042975604534149,
"learning_rate": 0.0002,
"loss": 0.7552,
"step": 480
},
{
"epoch": 3.5251798561151078,
"grad_norm": 0.4289880394935608,
"learning_rate": 0.0002,
"loss": 0.7559,
"step": 490
},
{
"epoch": 3.597122302158273,
"grad_norm": 0.39530348777770996,
"learning_rate": 0.0002,
"loss": 0.7661,
"step": 500
},
{
"epoch": 3.6690647482014387,
"grad_norm": 0.4326631724834442,
"learning_rate": 0.0002,
"loss": 0.7605,
"step": 510
},
{
"epoch": 3.741007194244604,
"grad_norm": 0.3833424150943756,
"learning_rate": 0.0002,
"loss": 0.7549,
"step": 520
},
{
"epoch": 3.81294964028777,
"grad_norm": 0.40698808431625366,
"learning_rate": 0.0002,
"loss": 0.764,
"step": 530
},
{
"epoch": 3.884892086330935,
"grad_norm": 0.39018431305885315,
"learning_rate": 0.0002,
"loss": 0.7653,
"step": 540
},
{
"epoch": 3.956834532374101,
"grad_norm": 0.37253016233444214,
"learning_rate": 0.0002,
"loss": 0.7677,
"step": 550
},
{
"epoch": 4.0,
"eval_loss": 0.8444326519966125,
"eval_runtime": 21.5712,
"eval_samples_per_second": 4.775,
"eval_steps_per_second": 0.603,
"step": 556
},
{
"epoch": 4.028776978417266,
"grad_norm": 0.4029178023338318,
"learning_rate": 0.0002,
"loss": 0.7467,
"step": 560
},
{
"epoch": 4.100719424460432,
"grad_norm": 0.382028192281723,
"learning_rate": 0.0002,
"loss": 0.7116,
"step": 570
},
{
"epoch": 4.172661870503597,
"grad_norm": 0.42502614855766296,
"learning_rate": 0.0002,
"loss": 0.7298,
"step": 580
},
{
"epoch": 4.244604316546763,
"grad_norm": 0.4241486191749573,
"learning_rate": 0.0002,
"loss": 0.7073,
"step": 590
},
{
"epoch": 4.316546762589928,
"grad_norm": 0.4571862816810608,
"learning_rate": 0.0002,
"loss": 0.7106,
"step": 600
},
{
"epoch": 4.388489208633094,
"grad_norm": 0.43121734261512756,
"learning_rate": 0.0002,
"loss": 0.7031,
"step": 610
},
{
"epoch": 4.460431654676259,
"grad_norm": 0.40107443928718567,
"learning_rate": 0.0002,
"loss": 0.693,
"step": 620
},
{
"epoch": 4.532374100719425,
"grad_norm": 0.4040583670139313,
"learning_rate": 0.0002,
"loss": 0.707,
"step": 630
},
{
"epoch": 4.60431654676259,
"grad_norm": 0.4380245506763458,
"learning_rate": 0.0002,
"loss": 0.7066,
"step": 640
},
{
"epoch": 4.676258992805756,
"grad_norm": 0.4263726472854614,
"learning_rate": 0.0002,
"loss": 0.7216,
"step": 650
},
{
"epoch": 4.748201438848921,
"grad_norm": 0.4532300531864166,
"learning_rate": 0.0002,
"loss": 0.7337,
"step": 660
},
{
"epoch": 4.820143884892087,
"grad_norm": 0.4051191210746765,
"learning_rate": 0.0002,
"loss": 0.7133,
"step": 670
},
{
"epoch": 4.892086330935252,
"grad_norm": 0.434962660074234,
"learning_rate": 0.0002,
"loss": 0.7144,
"step": 680
},
{
"epoch": 4.9640287769784175,
"grad_norm": 0.4131017029285431,
"learning_rate": 0.0002,
"loss": 0.7167,
"step": 690
},
{
"epoch": 5.0,
"eval_loss": 0.8359549641609192,
"eval_runtime": 21.598,
"eval_samples_per_second": 4.769,
"eval_steps_per_second": 0.602,
"step": 695
}
],
"logging_steps": 10,
"max_steps": 695,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.8287701530771456e+17,
"train_batch_size": 3,
"trial_name": null,
"trial_params": null
}