zaas12's picture
Upload folder using huggingface_hub
3b27f7b verified
{
"best_metric": 2.1521201133728027,
"best_model_checkpoint": "./checkpoints/ultrafeedback_binarized/phi-2-ultrafeedback_binarized-lambda0.15-ORPO-1-14-26/checkpoint-2994",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 2994,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016700066800267203,
"grad_norm": 120.0,
"learning_rate": 1e-07,
"loss": 3.6046,
"step": 50
},
{
"epoch": 0.033400133600534405,
"grad_norm": 42.5,
"learning_rate": 2e-07,
"loss": 3.5223,
"step": 100
},
{
"epoch": 0.050100200400801605,
"grad_norm": 34.0,
"learning_rate": 3e-07,
"loss": 3.4339,
"step": 150
},
{
"epoch": 0.06680026720106881,
"grad_norm": 38.0,
"learning_rate": 4e-07,
"loss": 3.4468,
"step": 200
},
{
"epoch": 0.08350033400133601,
"grad_norm": 39.25,
"learning_rate": 5e-07,
"loss": 3.3893,
"step": 250
},
{
"epoch": 0.10020040080160321,
"grad_norm": 49.75,
"learning_rate": 6e-07,
"loss": 3.3252,
"step": 300
},
{
"epoch": 0.11690046760187041,
"grad_norm": 111.5,
"learning_rate": 7e-07,
"loss": 3.2598,
"step": 350
},
{
"epoch": 0.13360053440213762,
"grad_norm": 31.0,
"learning_rate": 8e-07,
"loss": 3.2579,
"step": 400
},
{
"epoch": 0.15030060120240482,
"grad_norm": 111.0,
"learning_rate": 9e-07,
"loss": 3.2124,
"step": 450
},
{
"epoch": 0.16700066800267202,
"grad_norm": 356.0,
"learning_rate": 1e-06,
"loss": 3.1667,
"step": 500
},
{
"epoch": 0.18370073480293922,
"grad_norm": 22.875,
"learning_rate": 9.996127778044774e-07,
"loss": 3.1074,
"step": 550
},
{
"epoch": 0.20040080160320642,
"grad_norm": 154.0,
"learning_rate": 9.984517109820244e-07,
"loss": 3.0856,
"step": 600
},
{
"epoch": 0.21710086840347362,
"grad_norm": 197.0,
"learning_rate": 9.965185978960174e-07,
"loss": 3.0459,
"step": 650
},
{
"epoch": 0.23380093520374082,
"grad_norm": 83.0,
"learning_rate": 9.9381643272363e-07,
"loss": 2.9253,
"step": 700
},
{
"epoch": 0.250501002004008,
"grad_norm": 125.0,
"learning_rate": 9.90349400818185e-07,
"loss": 2.9436,
"step": 750
},
{
"epoch": 0.26720106880427524,
"grad_norm": 64.5,
"learning_rate": 9.86122872226508e-07,
"loss": 2.9872,
"step": 800
},
{
"epoch": 0.28390113560454244,
"grad_norm": 37.5,
"learning_rate": 9.811433933713217e-07,
"loss": 2.9256,
"step": 850
},
{
"epoch": 0.30060120240480964,
"grad_norm": 51.0,
"learning_rate": 9.754186769115654e-07,
"loss": 2.8895,
"step": 900
},
{
"epoch": 0.31730126920507684,
"grad_norm": 46.0,
"learning_rate": 9.689575897963448e-07,
"loss": 2.8348,
"step": 950
},
{
"epoch": 0.33400133600534404,
"grad_norm": 115.0,
"learning_rate": 9.617701395310122e-07,
"loss": 2.83,
"step": 1000
},
{
"epoch": 0.35070140280561124,
"grad_norm": 29.625,
"learning_rate": 9.538674586766557e-07,
"loss": 2.7793,
"step": 1050
},
{
"epoch": 0.36740146960587844,
"grad_norm": 286.0,
"learning_rate": 9.45261787606999e-07,
"loss": 2.7292,
"step": 1100
},
{
"epoch": 0.38410153640614564,
"grad_norm": 64.5,
"learning_rate": 9.359664555494242e-07,
"loss": 2.7686,
"step": 1150
},
{
"epoch": 0.40080160320641284,
"grad_norm": 117.5,
"learning_rate": 9.259958599394812e-07,
"loss": 2.7036,
"step": 1200
},
{
"epoch": 0.41750167000668004,
"grad_norm": 27.25,
"learning_rate": 9.153654441208611e-07,
"loss": 2.6712,
"step": 1250
},
{
"epoch": 0.43420173680694724,
"grad_norm": 99.5,
"learning_rate": 9.040916734253739e-07,
"loss": 2.6642,
"step": 1300
},
{
"epoch": 0.45090180360721444,
"grad_norm": 255.0,
"learning_rate": 8.921920096699821e-07,
"loss": 2.6795,
"step": 1350
},
{
"epoch": 0.46760187040748163,
"grad_norm": 143.0,
"learning_rate": 8.796848841103867e-07,
"loss": 2.603,
"step": 1400
},
{
"epoch": 0.48430193720774883,
"grad_norm": 286.0,
"learning_rate": 8.665896688930638e-07,
"loss": 2.6033,
"step": 1450
},
{
"epoch": 0.501002004008016,
"grad_norm": 25.25,
"learning_rate": 8.529266470499618e-07,
"loss": 2.5379,
"step": 1500
},
{
"epoch": 0.5177020708082832,
"grad_norm": 564.0,
"learning_rate": 8.387169810823436e-07,
"loss": 2.5032,
"step": 1550
},
{
"epoch": 0.5344021376085505,
"grad_norm": 45.25,
"learning_rate": 8.239826801824232e-07,
"loss": 2.4736,
"step": 1600
},
{
"epoch": 0.5511022044088176,
"grad_norm": 53.5,
"learning_rate": 8.087465661435767e-07,
"loss": 2.5829,
"step": 1650
},
{
"epoch": 0.5678022712090849,
"grad_norm": 66.0,
"learning_rate": 7.930322380119213e-07,
"loss": 2.4656,
"step": 1700
},
{
"epoch": 0.584502338009352,
"grad_norm": 22.125,
"learning_rate": 7.768640355340184e-07,
"loss": 2.4649,
"step": 1750
},
{
"epoch": 0.6012024048096193,
"grad_norm": 84.0,
"learning_rate": 7.602670014573127e-07,
"loss": 2.3869,
"step": 1800
},
{
"epoch": 0.6179024716098864,
"grad_norm": 26.5,
"learning_rate": 7.432668427417013e-07,
"loss": 2.4197,
"step": 1850
},
{
"epoch": 0.6346025384101537,
"grad_norm": 68.0,
"learning_rate": 7.258898907423128e-07,
"loss": 2.38,
"step": 1900
},
{
"epoch": 0.6513026052104208,
"grad_norm": 75.0,
"learning_rate": 7.081630604251657e-07,
"loss": 2.4333,
"step": 1950
},
{
"epoch": 0.6680026720106881,
"grad_norm": 29.5,
"learning_rate": 6.901138086788807e-07,
"loss": 2.3658,
"step": 2000
},
{
"epoch": 0.6847027388109552,
"grad_norm": 27.375,
"learning_rate": 6.717700917870123e-07,
"loss": 2.3418,
"step": 2050
},
{
"epoch": 0.7014028056112225,
"grad_norm": 20.125,
"learning_rate": 6.531603221268764e-07,
"loss": 2.3305,
"step": 2100
},
{
"epoch": 0.7181028724114896,
"grad_norm": 82.0,
"learning_rate": 6.343133241619368e-07,
"loss": 2.2667,
"step": 2150
},
{
"epoch": 0.7348029392117569,
"grad_norm": 63.25,
"learning_rate": 6.152582897959174e-07,
"loss": 2.2144,
"step": 2200
},
{
"epoch": 0.751503006012024,
"grad_norm": 91.0,
"learning_rate": 5.960247331577903e-07,
"loss": 2.3461,
"step": 2250
},
{
"epoch": 0.7682030728122913,
"grad_norm": 40.5,
"learning_rate": 5.766424448876717e-07,
"loss": 2.3372,
"step": 2300
},
{
"epoch": 0.7849031396125584,
"grad_norm": 112.5,
"learning_rate": 5.571414459944348e-07,
"loss": 2.2002,
"step": 2350
},
{
"epoch": 0.8016032064128257,
"grad_norm": 30.375,
"learning_rate": 5.375519413565045e-07,
"loss": 2.3136,
"step": 2400
},
{
"epoch": 0.8183032732130928,
"grad_norm": 98.0,
"learning_rate": 5.179042729378615e-07,
"loss": 2.2905,
"step": 2450
},
{
"epoch": 0.8350033400133601,
"grad_norm": 34.0,
"learning_rate": 4.982288727917136e-07,
"loss": 2.2557,
"step": 2500
},
{
"epoch": 0.8517034068136272,
"grad_norm": 35.25,
"learning_rate": 4.785562159246301e-07,
"loss": 2.2342,
"step": 2550
},
{
"epoch": 0.8684034736138945,
"grad_norm": 80.0,
"learning_rate": 4.5891677309414674e-07,
"loss": 2.2967,
"step": 2600
},
{
"epoch": 0.8851035404141616,
"grad_norm": 162.0,
"learning_rate": 4.3934096361294974e-07,
"loss": 2.2667,
"step": 2650
},
{
"epoch": 0.9018036072144289,
"grad_norm": 208.0,
"learning_rate": 4.198591082327453e-07,
"loss": 2.2909,
"step": 2700
},
{
"epoch": 0.918503674014696,
"grad_norm": 149.0,
"learning_rate": 4.0050138218078553e-07,
"loss": 2.2519,
"step": 2750
},
{
"epoch": 0.9352037408149633,
"grad_norm": 53.25,
"learning_rate": 3.812977684217997e-07,
"loss": 2.1819,
"step": 2800
},
{
"epoch": 0.9519038076152304,
"grad_norm": 21.875,
"learning_rate": 3.622780112177145e-07,
"loss": 2.1872,
"step": 2850
},
{
"epoch": 0.9686038744154977,
"grad_norm": 79.5,
"learning_rate": 3.4347157005710127e-07,
"loss": 2.254,
"step": 2900
},
{
"epoch": 0.9853039412157648,
"grad_norm": 73.5,
"learning_rate": 3.249075740257048e-07,
"loss": 2.1445,
"step": 2950
},
{
"epoch": 1.0,
"eval_loss": 2.1521201133728027,
"eval_runtime": 175.2067,
"eval_samples_per_second": 9.201,
"eval_steps_per_second": 1.153,
"step": 2994
}
],
"logging_steps": 50,
"max_steps": 4491,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.795126539858739e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}