Adanato's picture
Add files using upload-large-folder tool
f0dbba6 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 196,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05118362124120281,
"grad_norm": 2.529698610305786,
"learning_rate": 4.5e-06,
"loss": 1.1138,
"step": 10
},
{
"epoch": 0.10236724248240563,
"grad_norm": 0.9076588153839111,
"learning_rate": 9.5e-06,
"loss": 1.0129,
"step": 20
},
{
"epoch": 0.15355086372360843,
"grad_norm": 0.6361640095710754,
"learning_rate": 9.935617890443557e-06,
"loss": 0.9279,
"step": 30
},
{
"epoch": 0.20473448496481125,
"grad_norm": 0.3602381944656372,
"learning_rate": 9.715190263989562e-06,
"loss": 0.8757,
"step": 40
},
{
"epoch": 0.2559181062060141,
"grad_norm": 0.38964927196502686,
"learning_rate": 9.344925248293837e-06,
"loss": 0.8633,
"step": 50
},
{
"epoch": 0.30710172744721687,
"grad_norm": 0.3243643343448639,
"learning_rate": 8.836588973708129e-06,
"loss": 0.8523,
"step": 60
},
{
"epoch": 0.3582853486884197,
"grad_norm": 0.3164478540420532,
"learning_rate": 8.206335142623305e-06,
"loss": 0.8351,
"step": 70
},
{
"epoch": 0.4094689699296225,
"grad_norm": 0.3379630446434021,
"learning_rate": 7.474191703716339e-06,
"loss": 0.8287,
"step": 80
},
{
"epoch": 0.46065259117082535,
"grad_norm": 0.3585202395915985,
"learning_rate": 6.663424411982121e-06,
"loss": 0.8378,
"step": 90
},
{
"epoch": 0.5118362124120281,
"grad_norm": 0.2979970872402191,
"learning_rate": 5.799797499079301e-06,
"loss": 0.8304,
"step": 100
},
{
"epoch": 0.5630198336532309,
"grad_norm": 0.3242439329624176,
"learning_rate": 4.91075494810577e-06,
"loss": 0.8161,
"step": 110
},
{
"epoch": 0.6142034548944337,
"grad_norm": 0.3153151273727417,
"learning_rate": 4.02454838991936e-06,
"loss": 0.809,
"step": 120
},
{
"epoch": 0.6653870761356366,
"grad_norm": 0.27772825956344604,
"learning_rate": 3.169339334358105e-06,
"loss": 0.8139,
"step": 130
},
{
"epoch": 0.7165706973768394,
"grad_norm": 0.2900350093841553,
"learning_rate": 2.372304265289436e-06,
"loss": 0.8131,
"step": 140
},
{
"epoch": 0.7677543186180422,
"grad_norm": 0.2812083661556244,
"learning_rate": 1.6587710374121203e-06,
"loss": 0.8024,
"step": 150
},
{
"epoch": 0.818937939859245,
"grad_norm": 0.2891785204410553,
"learning_rate": 1.0514140180404202e-06,
"loss": 0.8153,
"step": 160
},
{
"epoch": 0.8701215611004478,
"grad_norm": 0.2761625051498413,
"learning_rate": 5.69533550325988e-07,
"loss": 0.8039,
"step": 170
},
{
"epoch": 0.9213051823416507,
"grad_norm": 0.2586818337440491,
"learning_rate": 2.2844263484068097e-07,
"loss": 0.8165,
"step": 180
},
{
"epoch": 0.9724888035828535,
"grad_norm": 0.2864827811717987,
"learning_rate": 3.8980319302407976e-08,
"loss": 0.8128,
"step": 190
},
{
"epoch": 1.0,
"step": 196,
"total_flos": 3.2415631213991035e+18,
"train_loss": 0.8530918797668146,
"train_runtime": 3528.357,
"train_samples_per_second": 7.085,
"train_steps_per_second": 0.056
}
],
"logging_steps": 10,
"max_steps": 196,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 196,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.2415631213991035e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}