alessandronascimento's picture
Training in progress, epoch 6, checkpoint
501b7ab verified
{
"best_metric": 5.877429066458717e-05,
"best_model_checkpoint": "ProtChem_ESM2_MolGen_Decoder/checkpoint-27805",
"epoch": 6.999811190131537,
"eval_steps": 500,
"global_step": 27805,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.12587324564163888,
"grad_norm": 0.044921875,
"learning_rate": 5e-06,
"loss": 0.0025,
"step": 500
},
{
"epoch": 0.25174649128327775,
"grad_norm": 0.01129150390625,
"learning_rate": 1e-05,
"loss": 0.0013,
"step": 1000
},
{
"epoch": 0.3776197369249166,
"grad_norm": 0.01416015625,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.0007,
"step": 1500
},
{
"epoch": 0.5034929825665555,
"grad_norm": 0.0133056640625,
"learning_rate": 2e-05,
"loss": 0.0005,
"step": 2000
},
{
"epoch": 0.6293662282081943,
"grad_norm": 0.00872802734375,
"learning_rate": 1.9991330308838565e-05,
"loss": 0.0003,
"step": 2500
},
{
"epoch": 0.7552394738498333,
"grad_norm": 0.020263671875,
"learning_rate": 1.996533626806322e-05,
"loss": 0.0003,
"step": 3000
},
{
"epoch": 0.8811127194914721,
"grad_norm": 0.0024261474609375,
"learning_rate": 1.992206294973508e-05,
"loss": 0.0002,
"step": 3500
},
{
"epoch": 0.9999370633771791,
"eval_loss": 9.923595644067973e-05,
"eval_runtime": 16805.4557,
"eval_samples_per_second": 15.226,
"eval_steps_per_second": 0.952,
"step": 3972
},
{
"epoch": 1.006985965133111,
"grad_norm": 0.0023651123046875,
"learning_rate": 1.9861585387115228e-05,
"loss": 0.0002,
"step": 4000
},
{
"epoch": 1.13285921077475,
"grad_norm": 0.005157470703125,
"learning_rate": 1.9784008444561692e-05,
"loss": 0.0002,
"step": 4500
},
{
"epoch": 1.2587324564163886,
"grad_norm": 0.009765625,
"learning_rate": 1.9689466635701106e-05,
"loss": 0.0002,
"step": 5000
},
{
"epoch": 1.3846057020580276,
"grad_norm": 0.006011962890625,
"learning_rate": 1.9578123890190405e-05,
"loss": 0.0002,
"step": 5500
},
{
"epoch": 1.5104789476996663,
"grad_norm": 0.01397705078125,
"learning_rate": 1.9450173269472915e-05,
"loss": 0.0002,
"step": 6000
},
{
"epoch": 1.6363521933413052,
"grad_norm": 0.0035247802734375,
"learning_rate": 1.9305836632021744e-05,
"loss": 0.0002,
"step": 6500
},
{
"epoch": 1.7622254389829441,
"grad_norm": 0.004669189453125,
"learning_rate": 1.9145364248650892e-05,
"loss": 0.0002,
"step": 7000
},
{
"epoch": 1.888098684624583,
"grad_norm": 0.0034332275390625,
"learning_rate": 1.8969034368561105e-05,
"loss": 0.0002,
"step": 7500
},
{
"epoch": 1.9998741267543583,
"eval_loss": 6.921035674167797e-05,
"eval_runtime": 16790.6132,
"eval_samples_per_second": 15.24,
"eval_steps_per_second": 0.952,
"step": 7944
},
{
"epoch": 2.013971930266222,
"grad_norm": 0.003448486328125,
"learning_rate": 1.877715273687297e-05,
"loss": 0.0002,
"step": 8000
},
{
"epoch": 2.139845175907861,
"grad_norm": 0.01324462890625,
"learning_rate": 1.857005206448375e-05,
"loss": 0.0002,
"step": 8500
},
{
"epoch": 2.2657184215495,
"grad_norm": 0.0032806396484375,
"learning_rate": 1.8348091451167224e-05,
"loss": 0.0002,
"step": 9000
},
{
"epoch": 2.3915916671911384,
"grad_norm": 0.00182342529296875,
"learning_rate": 1.8111655762916885e-05,
"loss": 0.0002,
"step": 9500
},
{
"epoch": 2.5174649128327773,
"grad_norm": 0.007781982421875,
"learning_rate": 1.786115496461207e-05,
"loss": 0.0002,
"step": 10000
},
{
"epoch": 2.643338158474416,
"grad_norm": 0.00250244140625,
"learning_rate": 1.759702340916418e-05,
"loss": 0.0002,
"step": 10500
},
{
"epoch": 2.769211404116055,
"grad_norm": 0.00185394287109375,
"learning_rate": 1.7319719084375556e-05,
"loss": 0.0001,
"step": 11000
},
{
"epoch": 2.895084649757694,
"grad_norm": 0.00445556640625,
"learning_rate": 1.702972281881693e-05,
"loss": 0.0001,
"step": 11500
},
{
"epoch": 2.9998111901315374,
"eval_loss": 6.317481165751815e-05,
"eval_runtime": 16782.274,
"eval_samples_per_second": 15.248,
"eval_steps_per_second": 0.953,
"step": 11916
},
{
"epoch": 3.020957895399333,
"grad_norm": 0.0028076171875,
"learning_rate": 1.672753744810037e-05,
"loss": 0.0002,
"step": 12000
},
{
"epoch": 3.146831141040972,
"grad_norm": 0.00225830078125,
"learning_rate": 1.6413686942993405e-05,
"loss": 0.0001,
"step": 12500
},
{
"epoch": 3.2727043866826104,
"grad_norm": 0.00579833984375,
"learning_rate": 1.608871550088606e-05,
"loss": 0.0002,
"step": 13000
},
{
"epoch": 3.3985776323242494,
"grad_norm": 0.0086669921875,
"learning_rate": 1.5753186602186207e-05,
"loss": 0.0002,
"step": 13500
},
{
"epoch": 3.5244508779658883,
"grad_norm": 0.0022735595703125,
"learning_rate": 1.540768203327934e-05,
"loss": 0.0001,
"step": 14000
},
{
"epoch": 3.6503241236075272,
"grad_norm": 0.01202392578125,
"learning_rate": 1.5052800877746915e-05,
"loss": 0.0001,
"step": 14500
},
{
"epoch": 3.776197369249166,
"grad_norm": 0.01190185546875,
"learning_rate": 1.4689158477592433e-05,
"loss": 0.0001,
"step": 15000
},
{
"epoch": 3.902070614890805,
"grad_norm": 0.017822265625,
"learning_rate": 1.4317385366276393e-05,
"loss": 0.0001,
"step": 15500
},
{
"epoch": 4.0,
"eval_loss": 6.0006157582392916e-05,
"eval_runtime": 16792.4882,
"eval_samples_per_second": 15.238,
"eval_steps_per_second": 0.952,
"step": 15889
},
{
"epoch": 4.027943860532444,
"grad_norm": 0.007171630859375,
"learning_rate": 1.393812617541025e-05,
"loss": 0.0001,
"step": 16000
},
{
"epoch": 4.153817106174083,
"grad_norm": 0.001190185546875,
"learning_rate": 1.3552038517004991e-05,
"loss": 0.0001,
"step": 16500
},
{
"epoch": 4.279690351815722,
"grad_norm": 0.003082275390625,
"learning_rate": 1.3159791843212542e-05,
"loss": 0.0001,
"step": 17000
},
{
"epoch": 4.405563597457361,
"grad_norm": 0.002593994140625,
"learning_rate": 1.2762066285537071e-05,
"loss": 0.0001,
"step": 17500
},
{
"epoch": 4.531436843099,
"grad_norm": 0.01129150390625,
"learning_rate": 1.2359551475529e-05,
"loss": 0.0001,
"step": 18000
},
{
"epoch": 4.657310088740639,
"grad_norm": 0.002166748046875,
"learning_rate": 1.1952945349006455e-05,
"loss": 0.0001,
"step": 18500
},
{
"epoch": 4.783183334382277,
"grad_norm": 0.006622314453125,
"learning_rate": 1.1542952935877703e-05,
"loss": 0.0001,
"step": 19000
},
{
"epoch": 4.909056580023916,
"grad_norm": 0.0029449462890625,
"learning_rate": 1.1130285137662811e-05,
"loss": 0.0001,
"step": 19500
},
{
"epoch": 4.999937063377179,
"eval_loss": 5.9128127759322524e-05,
"eval_runtime": 16790.9197,
"eval_samples_per_second": 15.24,
"eval_steps_per_second": 0.952,
"step": 19861
},
{
"epoch": 5.034929825665555,
"grad_norm": 0.0125732421875,
"learning_rate": 1.071565749483434e-05,
"loss": 0.0002,
"step": 20000
},
{
"epoch": 5.1608030713071935,
"grad_norm": 0.00118255615234375,
"learning_rate": 1.0299788946114352e-05,
"loss": 0.0001,
"step": 20500
},
{
"epoch": 5.286676316948832,
"grad_norm": 0.002471923828125,
"learning_rate": 9.883400581879077e-06,
"loss": 0.0001,
"step": 21000
},
{
"epoch": 5.412549562590471,
"grad_norm": 0.0062255859375,
"learning_rate": 9.467214393832746e-06,
"loss": 0.0002,
"step": 21500
},
{
"epoch": 5.53842280823211,
"grad_norm": 0.00531005859375,
"learning_rate": 9.051952023118563e-06,
"loss": 0.0001,
"step": 22000
},
{
"epoch": 5.664296053873749,
"grad_norm": 0.006683349609375,
"learning_rate": 8.638333509037537e-06,
"loss": 0.0001,
"step": 22500
},
{
"epoch": 5.790169299515388,
"grad_norm": 0.003631591796875,
"learning_rate": 8.227076040544813e-06,
"loss": 0.0002,
"step": 23000
},
{
"epoch": 5.916042545157027,
"grad_norm": 0.0093994140625,
"learning_rate": 7.818892712688328e-06,
"loss": 0.0001,
"step": 23500
},
{
"epoch": 5.999874126754358,
"eval_loss": 5.881577453692444e-05,
"eval_runtime": 16791.1194,
"eval_samples_per_second": 15.239,
"eval_steps_per_second": 0.952,
"step": 23833
},
{
"epoch": 6.041915790798666,
"grad_norm": 0.008056640625,
"learning_rate": 7.4144912901460355e-06,
"loss": 0.0001,
"step": 24000
},
{
"epoch": 6.167789036440305,
"grad_norm": 0.0050048828125,
"learning_rate": 7.014572980005667e-06,
"loss": 0.0001,
"step": 24500
},
{
"epoch": 6.293662282081944,
"grad_norm": 0.0023956298828125,
"learning_rate": 6.619831215914974e-06,
"loss": 0.0001,
"step": 25000
},
{
"epoch": 6.419535527723582,
"grad_norm": 0.00182342529296875,
"learning_rate": 6.230950455710592e-06,
"loss": 0.0001,
"step": 25500
},
{
"epoch": 6.545408773365221,
"grad_norm": 0.00457763671875,
"learning_rate": 5.848604994610434e-06,
"loss": 0.0002,
"step": 26000
},
{
"epoch": 6.67128201900686,
"grad_norm": 0.002960205078125,
"learning_rate": 5.4734577960274515e-06,
"loss": 0.0001,
"step": 26500
},
{
"epoch": 6.797155264648499,
"grad_norm": 0.0128173828125,
"learning_rate": 5.106159342032e-06,
"loss": 0.0001,
"step": 27000
},
{
"epoch": 6.923028510290138,
"grad_norm": 0.0020294189453125,
"learning_rate": 4.747346505456123e-06,
"loss": 0.0001,
"step": 27500
},
{
"epoch": 6.999811190131537,
"eval_loss": 5.877429066458717e-05,
"eval_runtime": 16781.6197,
"eval_samples_per_second": 15.248,
"eval_steps_per_second": 0.953,
"step": 27805
}
],
"logging_steps": 500,
"max_steps": 39720,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 2,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.8671338999791317e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}