echo-medical-adapter / checkpoint-239 /trainer_state.json
Bmcbob76's picture
Upload folder using huggingface_hub
6a044d7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9994772608468374,
"eval_steps": 500,
"global_step": 239,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04181913225300575,
"grad_norm": 0.8873776197433472,
"learning_rate": 2e-05,
"loss": 1.8208,
"step": 10
},
{
"epoch": 0.0836382645060115,
"grad_norm": 0.5825883150100708,
"learning_rate": 4e-05,
"loss": 1.7048,
"step": 20
},
{
"epoch": 0.12545739675901724,
"grad_norm": 0.24893978238105774,
"learning_rate": 6e-05,
"loss": 1.4799,
"step": 30
},
{
"epoch": 0.167276529012023,
"grad_norm": 0.2235335111618042,
"learning_rate": 8e-05,
"loss": 1.3812,
"step": 40
},
{
"epoch": 0.20909566126502874,
"grad_norm": 0.18197111785411835,
"learning_rate": 0.0001,
"loss": 1.2497,
"step": 50
},
{
"epoch": 0.2509147935180345,
"grad_norm": 0.1728171855211258,
"learning_rate": 0.00012,
"loss": 1.1161,
"step": 60
},
{
"epoch": 0.29273392577104024,
"grad_norm": 0.1883663833141327,
"learning_rate": 0.00014,
"loss": 1.0589,
"step": 70
},
{
"epoch": 0.334553058024046,
"grad_norm": 0.19224515557289124,
"learning_rate": 0.00016,
"loss": 0.9908,
"step": 80
},
{
"epoch": 0.37637219027705177,
"grad_norm": 0.20248346030712128,
"learning_rate": 0.00018,
"loss": 0.9601,
"step": 90
},
{
"epoch": 0.4181913225300575,
"grad_norm": 0.2133675217628479,
"learning_rate": 0.0002,
"loss": 0.9457,
"step": 100
},
{
"epoch": 0.46001045478306324,
"grad_norm": 0.2351628541946411,
"learning_rate": 0.00019965482753212156,
"loss": 0.8952,
"step": 110
},
{
"epoch": 0.501829587036069,
"grad_norm": 0.22406116127967834,
"learning_rate": 0.00019862169300913785,
"loss": 0.8748,
"step": 120
},
{
"epoch": 0.5436487192890748,
"grad_norm": 0.2309526801109314,
"learning_rate": 0.0001969077286229078,
"loss": 0.9075,
"step": 130
},
{
"epoch": 0.5854678515420805,
"grad_norm": 0.25324976444244385,
"learning_rate": 0.00019452476663977248,
"loss": 0.8719,
"step": 140
},
{
"epoch": 0.6272869837950863,
"grad_norm": 0.22996799647808075,
"learning_rate": 0.00019148925771710347,
"loss": 0.8529,
"step": 150
},
{
"epoch": 0.669106116048092,
"grad_norm": 0.23832310736179352,
"learning_rate": 0.00018782215733702286,
"loss": 0.8631,
"step": 160
},
{
"epoch": 0.7109252483010977,
"grad_norm": 0.23019501566886902,
"learning_rate": 0.00018354878114129367,
"loss": 0.8408,
"step": 170
},
{
"epoch": 0.7527443805541035,
"grad_norm": 0.26359736919403076,
"learning_rate": 0.0001786986301660689,
"loss": 0.835,
"step": 180
},
{
"epoch": 0.7945635128071092,
"grad_norm": 0.24976789951324463,
"learning_rate": 0.00017330518718298264,
"loss": 0.8232,
"step": 190
},
{
"epoch": 0.836382645060115,
"grad_norm": 0.26566267013549805,
"learning_rate": 0.00016740568555253155,
"loss": 0.8125,
"step": 200
},
{
"epoch": 0.8782017773131208,
"grad_norm": 0.24996772408485413,
"learning_rate": 0.00016104085218545633,
"loss": 0.8277,
"step": 210
},
{
"epoch": 0.9200209095661265,
"grad_norm": 0.22901593148708344,
"learning_rate": 0.00015425462638657595,
"loss": 0.8178,
"step": 220
},
{
"epoch": 0.9618400418191323,
"grad_norm": 0.25155940651893616,
"learning_rate": 0.00014709385652202203,
"loss": 0.7874,
"step": 230
}
],
"logging_steps": 10,
"max_steps": 478,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.32804778098688e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}