planetoid-reader's picture
Upload folder using huggingface_hub
e735883 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 162,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"entropy": 2.4490234375,
"epoch": 0.18518518518518517,
"grad_norm": 1.605711817741394,
"learning_rate": 0.00019997919281892067,
"loss": 2.744666862487793,
"mean_token_accuracy": 0.4455879807472229,
"num_tokens": 35176.0,
"step": 10
},
{
"entropy": 2.14208984375,
"epoch": 0.37037037037037035,
"grad_norm": 1.9346004724502563,
"learning_rate": 0.00019749279121818235,
"loss": 1.9460367202758788,
"mean_token_accuracy": 0.5691858988255263,
"num_tokens": 70631.0,
"step": 20
},
{
"entropy": 1.50927734375,
"epoch": 0.5555555555555556,
"grad_norm": 0.859683096408844,
"learning_rate": 0.00019096319953545185,
"loss": 1.5292683601379395,
"mean_token_accuracy": 0.6657284118235112,
"num_tokens": 106615.0,
"step": 30
},
{
"entropy": 1.45556640625,
"epoch": 0.7407407407407407,
"grad_norm": 0.7568148970603943,
"learning_rate": 0.000180661210923753,
"loss": 1.4439837455749511,
"mean_token_accuracy": 0.6813905350863934,
"num_tokens": 142274.0,
"step": 40
},
{
"entropy": 1.4376953125,
"epoch": 0.9259259259259259,
"grad_norm": 0.7695605158805847,
"learning_rate": 0.00016701406618375596,
"loss": 1.434541606903076,
"mean_token_accuracy": 0.6835584975779057,
"num_tokens": 177779.0,
"step": 50
},
{
"entropy": 1.4240234375,
"epoch": 1.1111111111111112,
"grad_norm": 0.7485360503196716,
"learning_rate": 0.00015058773536894685,
"loss": 1.398463821411133,
"mean_token_accuracy": 0.6880027234554291,
"num_tokens": 213250.0,
"step": 60
},
{
"entropy": 1.3787109375,
"epoch": 1.2962962962962963,
"grad_norm": 0.8382265567779541,
"learning_rate": 0.00013206344605527355,
"loss": 1.3820528030395507,
"mean_token_accuracy": 0.6910028986632824,
"num_tokens": 249008.0,
"step": 70
},
{
"entropy": 1.39111328125,
"epoch": 1.4814814814814814,
"grad_norm": 0.8756011128425598,
"learning_rate": 0.000112209431687416,
"loss": 1.3737930297851562,
"mean_token_accuracy": 0.6952961266040802,
"num_tokens": 284762.0,
"step": 80
},
{
"entropy": 1.3533203125,
"epoch": 1.6666666666666665,
"grad_norm": 0.8062230348587036,
"learning_rate": 9.184907164529368e-05,
"loss": 1.3459887504577637,
"mean_token_accuracy": 0.6956925392150879,
"num_tokens": 320323.0,
"step": 90
},
{
"entropy": 1.3333984375,
"epoch": 1.8518518518518519,
"grad_norm": 0.7905425429344177,
"learning_rate": 7.182674431585704e-05,
"loss": 1.3476751327514649,
"mean_token_accuracy": 0.6990618519484997,
"num_tokens": 355791.0,
"step": 100
},
{
"entropy": 1.35595703125,
"epoch": 2.037037037037037,
"grad_norm": 0.8151571154594421,
"learning_rate": 5.297280930072632e-05,
"loss": 1.3445645332336427,
"mean_token_accuracy": 0.6957452893257141,
"num_tokens": 391407.0,
"step": 110
},
{
"entropy": 1.36494140625,
"epoch": 2.2222222222222223,
"grad_norm": 0.77378249168396,
"learning_rate": 3.606917100644487e-05,
"loss": 1.3390249252319335,
"mean_token_accuracy": 0.698172252625227,
"num_tokens": 427190.0,
"step": 120
},
{
"entropy": 1.342578125,
"epoch": 2.4074074074074074,
"grad_norm": 0.893750786781311,
"learning_rate": 2.1816851753197032e-05,
"loss": 1.322931671142578,
"mean_token_accuracy": 0.6984936438500882,
"num_tokens": 462696.0,
"step": 130
},
{
"entropy": 1.339453125,
"epoch": 2.5925925925925926,
"grad_norm": 0.82007896900177,
"learning_rate": 1.0806919199730615e-05,
"loss": 1.3379673957824707,
"mean_token_accuracy": 0.702365966886282,
"num_tokens": 498412.0,
"step": 140
},
{
"entropy": 1.33486328125,
"epoch": 2.7777777777777777,
"grad_norm": 0.8532996773719788,
"learning_rate": 3.495973773086014e-06,
"loss": 1.3067991256713867,
"mean_token_accuracy": 0.701677817851305,
"num_tokens": 533710.0,
"step": 150
},
{
"entropy": 1.347265625,
"epoch": 2.962962962962963,
"grad_norm": 0.8509896397590637,
"learning_rate": 1.8721268066330676e-07,
"loss": 1.3350863456726074,
"mean_token_accuracy": 0.7003593638539314,
"num_tokens": 569320.0,
"step": 160
}
],
"logging_steps": 10,
"max_steps": 162,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1796759427633408.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}