yil384's picture
Upload folder using huggingface_hub
383be45 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.751971954425942,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.035056967572304996,
"grad_norm": 12.451324462890625,
"learning_rate": 1.8446601941747574e-06,
"loss": 3.237,
"step": 20
},
{
"epoch": 0.07011393514460999,
"grad_norm": 4.131791114807129,
"learning_rate": 3.7864077669902915e-06,
"loss": 2.5266,
"step": 40
},
{
"epoch": 0.10517090271691498,
"grad_norm": 2.2451958656311035,
"learning_rate": 5.728155339805825e-06,
"loss": 1.8755,
"step": 60
},
{
"epoch": 0.14022787028921999,
"grad_norm": 1.4421552419662476,
"learning_rate": 7.66990291262136e-06,
"loss": 1.3649,
"step": 80
},
{
"epoch": 0.175284837861525,
"grad_norm": 0.9530179500579834,
"learning_rate": 9.611650485436894e-06,
"loss": 1.0674,
"step": 100
},
{
"epoch": 0.21034180543382996,
"grad_norm": 0.7194477319717407,
"learning_rate": 9.99942798060303e-06,
"loss": 0.9241,
"step": 120
},
{
"epoch": 0.24539877300613497,
"grad_norm": 0.6556061506271362,
"learning_rate": 9.997104376116195e-06,
"loss": 0.8575,
"step": 140
},
{
"epoch": 0.28045574057843997,
"grad_norm": 0.5718048810958862,
"learning_rate": 9.992994265395959e-06,
"loss": 0.829,
"step": 160
},
{
"epoch": 0.31551270815074495,
"grad_norm": 0.4922148287296295,
"learning_rate": 9.987099117840969e-06,
"loss": 0.8034,
"step": 180
},
{
"epoch": 0.35056967572305,
"grad_norm": 0.47302234172821045,
"learning_rate": 9.979421041015336e-06,
"loss": 0.7839,
"step": 200
},
{
"epoch": 0.38562664329535495,
"grad_norm": 0.49009189009666443,
"learning_rate": 9.969962779895172e-06,
"loss": 0.768,
"step": 220
},
{
"epoch": 0.42068361086765993,
"grad_norm": 0.4963654577732086,
"learning_rate": 9.958727715887218e-06,
"loss": 0.7628,
"step": 240
},
{
"epoch": 0.45574057843996496,
"grad_norm": 0.5206854343414307,
"learning_rate": 9.94571986561998e-06,
"loss": 0.7488,
"step": 260
},
{
"epoch": 0.49079754601226994,
"grad_norm": 0.48924869298934937,
"learning_rate": 9.930943879507748e-06,
"loss": 0.7436,
"step": 280
},
{
"epoch": 0.5258545135845749,
"grad_norm": 0.43540337681770325,
"learning_rate": 9.914405040088026e-06,
"loss": 0.7375,
"step": 300
},
{
"epoch": 0.5609114811568799,
"grad_norm": 0.44258421659469604,
"learning_rate": 9.896109260132993e-06,
"loss": 0.7277,
"step": 320
},
{
"epoch": 0.595968448729185,
"grad_norm": 0.4955386519432068,
"learning_rate": 9.876063080535627e-06,
"loss": 0.7284,
"step": 340
},
{
"epoch": 0.6310254163014899,
"grad_norm": 0.5027541518211365,
"learning_rate": 9.85427366797129e-06,
"loss": 0.7231,
"step": 360
},
{
"epoch": 0.6660823838737949,
"grad_norm": 0.4675957262516022,
"learning_rate": 9.830748812335576e-06,
"loss": 0.7212,
"step": 380
},
{
"epoch": 0.7011393514461,
"grad_norm": 0.4283595383167267,
"learning_rate": 9.805496923959363e-06,
"loss": 0.7164,
"step": 400
},
{
"epoch": 0.7361963190184049,
"grad_norm": 0.452084481716156,
"learning_rate": 9.778527030602049e-06,
"loss": 0.711,
"step": 420
},
{
"epoch": 0.7712532865907099,
"grad_norm": 0.4737929105758667,
"learning_rate": 9.74984877422405e-06,
"loss": 0.7084,
"step": 440
},
{
"epoch": 0.8063102541630149,
"grad_norm": 0.4964485466480255,
"learning_rate": 9.719472407539725e-06,
"loss": 0.7028,
"step": 460
},
{
"epoch": 0.8413672217353199,
"grad_norm": 0.44363030791282654,
"learning_rate": 9.68740879035194e-06,
"loss": 0.7045,
"step": 480
},
{
"epoch": 0.8764241893076249,
"grad_norm": 0.5004998445510864,
"learning_rate": 9.6536693856696e-06,
"loss": 0.6937,
"step": 500
},
{
"epoch": 0.9114811568799299,
"grad_norm": 0.4564264118671417,
"learning_rate": 9.618266255609533e-06,
"loss": 0.699,
"step": 520
},
{
"epoch": 0.9465381244522348,
"grad_norm": 0.4558616280555725,
"learning_rate": 9.58121205708418e-06,
"loss": 0.691,
"step": 540
},
{
"epoch": 0.9815950920245399,
"grad_norm": 0.413114458322525,
"learning_rate": 9.542520037276636e-06,
"loss": 0.6891,
"step": 560
},
{
"epoch": 1.0157756354075373,
"grad_norm": 0.403679758310318,
"learning_rate": 9.502204028904687e-06,
"loss": 0.6812,
"step": 580
},
{
"epoch": 1.0508326029798423,
"grad_norm": 0.40308722853660583,
"learning_rate": 9.46027844527549e-06,
"loss": 0.6791,
"step": 600
},
{
"epoch": 1.0858895705521472,
"grad_norm": 0.4085083603858948,
"learning_rate": 9.416758275132693e-06,
"loss": 0.6803,
"step": 620
},
{
"epoch": 1.1209465381244523,
"grad_norm": 0.4475920796394348,
"learning_rate": 9.371659077297843e-06,
"loss": 0.6789,
"step": 640
},
{
"epoch": 1.1560035056967572,
"grad_norm": 0.4604188799858093,
"learning_rate": 9.324996975107978e-06,
"loss": 0.674,
"step": 660
},
{
"epoch": 1.1910604732690622,
"grad_norm": 0.4190482795238495,
"learning_rate": 9.276788650651392e-06,
"loss": 0.6746,
"step": 680
},
{
"epoch": 1.2261174408413673,
"grad_norm": 0.420953631401062,
"learning_rate": 9.227051338803656e-06,
"loss": 0.6692,
"step": 700
},
{
"epoch": 1.2611744084136722,
"grad_norm": 0.4463854432106018,
"learning_rate": 9.175802821066009e-06,
"loss": 0.6737,
"step": 720
},
{
"epoch": 1.2962313759859772,
"grad_norm": 0.44004735350608826,
"learning_rate": 9.12306141920832e-06,
"loss": 0.6673,
"step": 740
},
{
"epoch": 1.331288343558282,
"grad_norm": 0.42015475034713745,
"learning_rate": 9.068845988718906e-06,
"loss": 0.6676,
"step": 760
},
{
"epoch": 1.3663453111305872,
"grad_norm": 0.43683475255966187,
"learning_rate": 9.013175912063534e-06,
"loss": 0.6649,
"step": 780
},
{
"epoch": 1.4014022787028921,
"grad_norm": 0.4281805753707886,
"learning_rate": 8.956071091756036e-06,
"loss": 0.6658,
"step": 800
},
{
"epoch": 1.4364592462751973,
"grad_norm": 0.4270734190940857,
"learning_rate": 8.89755194324299e-06,
"loss": 0.6646,
"step": 820
},
{
"epoch": 1.4715162138475022,
"grad_norm": 0.4163481593132019,
"learning_rate": 8.837639387605031e-06,
"loss": 0.6658,
"step": 840
},
{
"epoch": 1.5065731814198071,
"grad_norm": 0.45280900597572327,
"learning_rate": 8.776354844077389e-06,
"loss": 0.6592,
"step": 860
},
{
"epoch": 1.541630148992112,
"grad_norm": 0.40485361218452454,
"learning_rate": 8.713720222392338e-06,
"loss": 0.6579,
"step": 880
},
{
"epoch": 1.5766871165644172,
"grad_norm": 0.42039763927459717,
"learning_rate": 8.649757914946284e-06,
"loss": 0.6616,
"step": 900
},
{
"epoch": 1.6117440841367223,
"grad_norm": 0.4760454595088959,
"learning_rate": 8.584490788794296e-06,
"loss": 0.6572,
"step": 920
},
{
"epoch": 1.6468010517090272,
"grad_norm": 0.43802690505981445,
"learning_rate": 8.517942177474943e-06,
"loss": 0.6548,
"step": 940
},
{
"epoch": 1.6818580192813322,
"grad_norm": 0.5002708435058594,
"learning_rate": 8.450135872668369e-06,
"loss": 0.6557,
"step": 960
},
{
"epoch": 1.716914986853637,
"grad_norm": 0.4160609543323517,
"learning_rate": 8.38109611569056e-06,
"loss": 0.6529,
"step": 980
},
{
"epoch": 1.751971954425942,
"grad_norm": 0.43179649114608765,
"learning_rate": 8.310847588826876e-06,
"loss": 0.6529,
"step": 1000
}
],
"logging_steps": 20,
"max_steps": 3426,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.769796028046508e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}