cs552_GenAI_NL2SQL / checkpoint-590 /trainer_state.json
jcai0o0's picture
Upload folder using huggingface_hub
dbaaf7c verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 590,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.033927056827820185,
"grad_norm": 0.5479967594146729,
"learning_rate": 0.0001977324263038549,
"loss": 1.8868,
"step": 10
},
{
"epoch": 0.06785411365564037,
"grad_norm": 0.224782794713974,
"learning_rate": 0.00019546485260770976,
"loss": 1.2591,
"step": 20
},
{
"epoch": 0.10178117048346055,
"grad_norm": 0.24415747821331024,
"learning_rate": 0.00019319727891156462,
"loss": 1.0807,
"step": 30
},
{
"epoch": 0.13570822731128074,
"grad_norm": 0.22518040239810944,
"learning_rate": 0.0001909297052154195,
"loss": 1.0525,
"step": 40
},
{
"epoch": 0.16963528413910092,
"grad_norm": 0.2071247398853302,
"learning_rate": 0.0001886621315192744,
"loss": 1.0399,
"step": 50
},
{
"epoch": 0.2035623409669211,
"grad_norm": 0.21717332303524017,
"learning_rate": 0.00018639455782312926,
"loss": 0.9676,
"step": 60
},
{
"epoch": 0.23748939779474132,
"grad_norm": 0.23291213810443878,
"learning_rate": 0.00018412698412698412,
"loss": 0.9593,
"step": 70
},
{
"epoch": 0.2714164546225615,
"grad_norm": 0.24413667619228363,
"learning_rate": 0.000181859410430839,
"loss": 0.9712,
"step": 80
},
{
"epoch": 0.3053435114503817,
"grad_norm": 0.24343140423297882,
"learning_rate": 0.0001795918367346939,
"loss": 0.9398,
"step": 90
},
{
"epoch": 0.33927056827820185,
"grad_norm": 0.2566365897655487,
"learning_rate": 0.00017732426303854876,
"loss": 0.9026,
"step": 100
},
{
"epoch": 0.37319762510602206,
"grad_norm": 0.2497139573097229,
"learning_rate": 0.00017505668934240365,
"loss": 0.8716,
"step": 110
},
{
"epoch": 0.4071246819338422,
"grad_norm": 0.2669306993484497,
"learning_rate": 0.0001727891156462585,
"loss": 0.8351,
"step": 120
},
{
"epoch": 0.4410517387616624,
"grad_norm": 0.27545222640037537,
"learning_rate": 0.0001705215419501134,
"loss": 0.8784,
"step": 130
},
{
"epoch": 0.47497879558948264,
"grad_norm": 0.2573504149913788,
"learning_rate": 0.00016825396825396826,
"loss": 0.8346,
"step": 140
},
{
"epoch": 0.5089058524173028,
"grad_norm": 0.2542634904384613,
"learning_rate": 0.00016598639455782315,
"loss": 0.8113,
"step": 150
},
{
"epoch": 0.542832909245123,
"grad_norm": 0.314864844083786,
"learning_rate": 0.000163718820861678,
"loss": 0.8093,
"step": 160
},
{
"epoch": 0.5767599660729432,
"grad_norm": 0.2655967175960541,
"learning_rate": 0.00016145124716553287,
"loss": 0.8426,
"step": 170
},
{
"epoch": 0.6106870229007634,
"grad_norm": 0.27555230259895325,
"learning_rate": 0.00015918367346938776,
"loss": 0.8129,
"step": 180
},
{
"epoch": 0.6446140797285835,
"grad_norm": 0.28799182176589966,
"learning_rate": 0.00015691609977324265,
"loss": 0.7833,
"step": 190
},
{
"epoch": 0.6785411365564037,
"grad_norm": 0.27187833189964294,
"learning_rate": 0.00015464852607709753,
"loss": 0.7795,
"step": 200
},
{
"epoch": 0.712468193384224,
"grad_norm": 0.28134599328041077,
"learning_rate": 0.00015238095238095237,
"loss": 0.8008,
"step": 210
},
{
"epoch": 0.7463952502120441,
"grad_norm": 0.29679593443870544,
"learning_rate": 0.00015011337868480726,
"loss": 0.7823,
"step": 220
},
{
"epoch": 0.7803223070398643,
"grad_norm": 0.30081549286842346,
"learning_rate": 0.00014784580498866215,
"loss": 0.7772,
"step": 230
},
{
"epoch": 0.8142493638676844,
"grad_norm": 0.28066059947013855,
"learning_rate": 0.000145578231292517,
"loss": 0.7776,
"step": 240
},
{
"epoch": 0.8481764206955047,
"grad_norm": 0.2732291519641876,
"learning_rate": 0.0001433106575963719,
"loss": 0.7719,
"step": 250
},
{
"epoch": 0.8821034775233249,
"grad_norm": 0.2929159104824066,
"learning_rate": 0.00014104308390022676,
"loss": 0.778,
"step": 260
},
{
"epoch": 0.916030534351145,
"grad_norm": 0.29763197898864746,
"learning_rate": 0.00013877551020408165,
"loss": 0.8044,
"step": 270
},
{
"epoch": 0.9499575911789653,
"grad_norm": 0.28522127866744995,
"learning_rate": 0.0001365079365079365,
"loss": 0.7877,
"step": 280
},
{
"epoch": 0.9838846480067854,
"grad_norm": 0.3052780032157898,
"learning_rate": 0.0001342403628117914,
"loss": 0.7463,
"step": 290
},
{
"epoch": 1.01696352841391,
"grad_norm": 0.323665976524353,
"learning_rate": 0.00013197278911564626,
"loss": 0.7519,
"step": 300
},
{
"epoch": 1.0508905852417303,
"grad_norm": 0.3049149513244629,
"learning_rate": 0.00012970521541950114,
"loss": 0.7254,
"step": 310
},
{
"epoch": 1.0848176420695506,
"grad_norm": 0.36289119720458984,
"learning_rate": 0.000127437641723356,
"loss": 0.7228,
"step": 320
},
{
"epoch": 1.1187446988973706,
"grad_norm": 0.32274919748306274,
"learning_rate": 0.0001251700680272109,
"loss": 0.7348,
"step": 330
},
{
"epoch": 1.1526717557251909,
"grad_norm": 0.3286229372024536,
"learning_rate": 0.00012290249433106578,
"loss": 0.736,
"step": 340
},
{
"epoch": 1.1865988125530111,
"grad_norm": 0.3304899036884308,
"learning_rate": 0.00012063492063492063,
"loss": 0.7248,
"step": 350
},
{
"epoch": 1.2205258693808312,
"grad_norm": 0.30971524119377136,
"learning_rate": 0.00011836734693877552,
"loss": 0.726,
"step": 360
},
{
"epoch": 1.2544529262086515,
"grad_norm": 0.3118181526660919,
"learning_rate": 0.0001160997732426304,
"loss": 0.715,
"step": 370
},
{
"epoch": 1.2883799830364717,
"grad_norm": 0.35270431637763977,
"learning_rate": 0.00011383219954648527,
"loss": 0.7117,
"step": 380
},
{
"epoch": 1.3223070398642918,
"grad_norm": 0.3350945711135864,
"learning_rate": 0.00011156462585034013,
"loss": 0.7161,
"step": 390
},
{
"epoch": 1.356234096692112,
"grad_norm": 0.3284754455089569,
"learning_rate": 0.000109297052154195,
"loss": 0.7131,
"step": 400
},
{
"epoch": 1.390161153519932,
"grad_norm": 0.31873300671577454,
"learning_rate": 0.0001070294784580499,
"loss": 0.6879,
"step": 410
},
{
"epoch": 1.4240882103477523,
"grad_norm": 0.32634538412094116,
"learning_rate": 0.00010476190476190477,
"loss": 0.7141,
"step": 420
},
{
"epoch": 1.4580152671755724,
"grad_norm": 0.3236243724822998,
"learning_rate": 0.00010249433106575966,
"loss": 0.7128,
"step": 430
},
{
"epoch": 1.4919423240033927,
"grad_norm": 0.34859582781791687,
"learning_rate": 0.0001002267573696145,
"loss": 0.7329,
"step": 440
},
{
"epoch": 1.525869380831213,
"grad_norm": 0.3041052222251892,
"learning_rate": 9.79591836734694e-05,
"loss": 0.7001,
"step": 450
},
{
"epoch": 1.559796437659033,
"grad_norm": 0.33919453620910645,
"learning_rate": 9.569160997732427e-05,
"loss": 0.6973,
"step": 460
},
{
"epoch": 1.5937234944868532,
"grad_norm": 0.333812952041626,
"learning_rate": 9.342403628117914e-05,
"loss": 0.7114,
"step": 470
},
{
"epoch": 1.6276505513146735,
"grad_norm": 0.32383838295936584,
"learning_rate": 9.115646258503402e-05,
"loss": 0.712,
"step": 480
},
{
"epoch": 1.6615776081424936,
"grad_norm": 0.29588553309440613,
"learning_rate": 8.888888888888889e-05,
"loss": 0.6894,
"step": 490
},
{
"epoch": 1.6955046649703138,
"grad_norm": 0.3401544392108917,
"learning_rate": 8.662131519274377e-05,
"loss": 0.7074,
"step": 500
},
{
"epoch": 1.729431721798134,
"grad_norm": 0.3124183714389801,
"learning_rate": 8.435374149659864e-05,
"loss": 0.7054,
"step": 510
},
{
"epoch": 1.7633587786259541,
"grad_norm": 0.31472259759902954,
"learning_rate": 8.208616780045352e-05,
"loss": 0.6761,
"step": 520
},
{
"epoch": 1.7972858354537744,
"grad_norm": 0.31297537684440613,
"learning_rate": 7.981859410430839e-05,
"loss": 0.6715,
"step": 530
},
{
"epoch": 1.8312128922815947,
"grad_norm": 0.33949580788612366,
"learning_rate": 7.755102040816327e-05,
"loss": 0.6883,
"step": 540
},
{
"epoch": 1.8651399491094147,
"grad_norm": 0.34998491406440735,
"learning_rate": 7.528344671201814e-05,
"loss": 0.6878,
"step": 550
},
{
"epoch": 1.899067005937235,
"grad_norm": 0.3444700241088867,
"learning_rate": 7.301587301587302e-05,
"loss": 0.7044,
"step": 560
},
{
"epoch": 1.9329940627650553,
"grad_norm": 0.34009718894958496,
"learning_rate": 7.074829931972789e-05,
"loss": 0.6753,
"step": 570
},
{
"epoch": 1.9669211195928753,
"grad_norm": 0.2923238277435303,
"learning_rate": 6.848072562358277e-05,
"loss": 0.6843,
"step": 580
},
{
"epoch": 2.0,
"grad_norm": 0.43473541736602783,
"learning_rate": 6.621315192743764e-05,
"loss": 0.6944,
"step": 590
}
],
"logging_steps": 10,
"max_steps": 882,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.726134597059871e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}