clairedhx's picture
Upload adapters (job 4237630)
03503b9 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.8795316493966963,
"eval_steps": 1000,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04397658246983481,
"grad_norm": 1.0395877361297607,
"learning_rate": 0.00019138082673702725,
"loss": 1.3457,
"step": 50
},
{
"epoch": 0.08795316493966962,
"grad_norm": 1.0277217626571655,
"learning_rate": 0.00018258575197889184,
"loss": 1.0388,
"step": 100
},
{
"epoch": 0.13192974740950444,
"grad_norm": 0.9229634404182434,
"learning_rate": 0.0001737906772207564,
"loss": 0.9245,
"step": 150
},
{
"epoch": 0.17590632987933924,
"grad_norm": 0.8828799724578857,
"learning_rate": 0.00016499560246262094,
"loss": 0.8399,
"step": 200
},
{
"epoch": 0.21988291234917406,
"grad_norm": 1.018609642982483,
"learning_rate": 0.0001562005277044855,
"loss": 0.8015,
"step": 250
},
{
"epoch": 0.2638594948190089,
"grad_norm": 0.9031912684440613,
"learning_rate": 0.00014740545294635005,
"loss": 0.7248,
"step": 300
},
{
"epoch": 0.3078360772888437,
"grad_norm": 0.8781272768974304,
"learning_rate": 0.0001386103781882146,
"loss": 0.684,
"step": 350
},
{
"epoch": 0.3518126597586785,
"grad_norm": 0.8193464279174805,
"learning_rate": 0.00012981530343007916,
"loss": 0.6587,
"step": 400
},
{
"epoch": 0.3957892422285133,
"grad_norm": 0.84972083568573,
"learning_rate": 0.00012102022867194372,
"loss": 0.6357,
"step": 450
},
{
"epoch": 0.43976582469834813,
"grad_norm": 0.9738919734954834,
"learning_rate": 0.00011222515391380828,
"loss": 0.6107,
"step": 500
},
{
"epoch": 0.48374240716818295,
"grad_norm": 0.9879764318466187,
"learning_rate": 0.00010343007915567282,
"loss": 0.573,
"step": 550
},
{
"epoch": 0.5277189896380178,
"grad_norm": 0.9049842953681946,
"learning_rate": 9.463500439753739e-05,
"loss": 0.5274,
"step": 600
},
{
"epoch": 0.5716955721078526,
"grad_norm": 0.9793794751167297,
"learning_rate": 8.583992963940193e-05,
"loss": 0.527,
"step": 650
},
{
"epoch": 0.6156721545776874,
"grad_norm": 1.096916913986206,
"learning_rate": 7.704485488126649e-05,
"loss": 0.5072,
"step": 700
},
{
"epoch": 0.6596487370475222,
"grad_norm": 0.9264512062072754,
"learning_rate": 6.824978012313104e-05,
"loss": 0.4914,
"step": 750
},
{
"epoch": 0.703625319517357,
"grad_norm": 0.8605396151542664,
"learning_rate": 5.94547053649956e-05,
"loss": 0.4893,
"step": 800
},
{
"epoch": 0.7476019019871918,
"grad_norm": 0.9258248209953308,
"learning_rate": 5.0659630606860164e-05,
"loss": 0.4682,
"step": 850
},
{
"epoch": 0.7915784844570266,
"grad_norm": 0.962844729423523,
"learning_rate": 4.186455584872472e-05,
"loss": 0.4536,
"step": 900
},
{
"epoch": 0.8355550669268614,
"grad_norm": 0.933445930480957,
"learning_rate": 3.306948109058927e-05,
"loss": 0.4425,
"step": 950
},
{
"epoch": 0.8795316493966963,
"grad_norm": 1.0119765996932983,
"learning_rate": 2.4274406332453827e-05,
"loss": 0.4259,
"step": 1000
},
{
"epoch": 0.8795316493966963,
"eval_loss": 0.40038564801216125,
"eval_runtime": 2826.8322,
"eval_samples_per_second": 6.156,
"eval_steps_per_second": 0.77,
"step": 1000
}
],
"logging_steps": 50,
"max_steps": 1137,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.440889920408453e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}