finetuned_12_12 / checkpoint-150 /trainer_state.json
ImNotTam's picture
Upload full training folder with all checkpoints
70b18c5 verified
{
"best_global_step": 100,
"best_metric": 0.010165676474571228,
"best_model_checkpoint": "/teamspace/studios/this_studio/DATN/output/medgemma_finetuned/checkpoint-100",
"epoch": 5.778588807785888,
"eval_steps": 25,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.38929440389294406,
"grad_norm": 3.8712122440338135,
"learning_rate": 9.473684210526316e-05,
"loss": 0.4851,
"step": 10
},
{
"epoch": 0.7785888077858881,
"grad_norm": 1.850216269493103,
"learning_rate": 0.0002,
"loss": 0.0256,
"step": 20
},
{
"epoch": 0.9732360097323601,
"eval_loss": 0.015751386061310768,
"eval_runtime": 123.684,
"eval_samples_per_second": 0.412,
"eval_steps_per_second": 0.21,
"step": 25
},
{
"epoch": 1.1557177615571776,
"grad_norm": 0.2004503309726715,
"learning_rate": 0.00019814838910603481,
"loss": 0.0148,
"step": 30
},
{
"epoch": 1.5450121654501217,
"grad_norm": 0.07818326354026794,
"learning_rate": 0.0001926621256821922,
"loss": 0.0094,
"step": 40
},
{
"epoch": 1.9343065693430657,
"grad_norm": 0.0575609989464283,
"learning_rate": 0.00018374437823092724,
"loss": 0.009,
"step": 50
},
{
"epoch": 1.9343065693430657,
"eval_loss": 0.010756579227745533,
"eval_runtime": 101.2153,
"eval_samples_per_second": 0.504,
"eval_steps_per_second": 0.257,
"step": 50
},
{
"epoch": 2.3114355231143553,
"grad_norm": 0.03460681438446045,
"learning_rate": 0.0001717253907188477,
"loss": 0.0083,
"step": 60
},
{
"epoch": 2.7007299270072993,
"grad_norm": 0.03570393845438957,
"learning_rate": 0.0001570502529101896,
"loss": 0.008,
"step": 70
},
{
"epoch": 2.895377128953771,
"eval_loss": 0.011468109674751759,
"eval_runtime": 97.9517,
"eval_samples_per_second": 0.521,
"eval_steps_per_second": 0.265,
"step": 75
},
{
"epoch": 3.0778588807785887,
"grad_norm": 0.0510949082672596,
"learning_rate": 0.00014026241770569197,
"loss": 0.0078,
"step": 80
},
{
"epoch": 3.4671532846715327,
"grad_norm": 0.04285643249750137,
"learning_rate": 0.00012198357587636957,
"loss": 0.0073,
"step": 90
},
{
"epoch": 3.8564476885644767,
"grad_norm": 0.034959107637405396,
"learning_rate": 0.00010289063347542726,
"loss": 0.0075,
"step": 100
},
{
"epoch": 3.8564476885644767,
"eval_loss": 0.010165676474571228,
"eval_runtime": 98.2376,
"eval_samples_per_second": 0.519,
"eval_steps_per_second": 0.265,
"step": 100
},
{
"epoch": 4.233576642335766,
"grad_norm": 0.048688922077417374,
"learning_rate": 8.369064450581373e-05,
"loss": 0.0069,
"step": 110
},
{
"epoch": 4.622871046228711,
"grad_norm": 0.032025065273046494,
"learning_rate": 6.509462714233195e-05,
"loss": 0.0069,
"step": 120
},
{
"epoch": 4.817518248175182,
"eval_loss": 0.01066823210567236,
"eval_runtime": 98.7276,
"eval_samples_per_second": 0.517,
"eval_steps_per_second": 0.263,
"step": 125
},
{
"epoch": 5.0,
"grad_norm": 0.04177581146359444,
"learning_rate": 4.779123315167362e-05,
"loss": 0.0073,
"step": 130
},
{
"epoch": 5.3892944038929445,
"grad_norm": 0.021832605823874474,
"learning_rate": 3.242124559015234e-05,
"loss": 0.0068,
"step": 140
},
{
"epoch": 5.778588807785888,
"grad_norm": 0.025266585871577263,
"learning_rate": 1.9553849185948512e-05,
"loss": 0.0068,
"step": 150
},
{
"epoch": 5.778588807785888,
"eval_loss": 0.010451124049723148,
"eval_runtime": 97.7624,
"eval_samples_per_second": 0.522,
"eval_steps_per_second": 0.266,
"step": 150
}
],
"logging_steps": 10,
"max_steps": 182,
"num_input_tokens_seen": 0,
"num_train_epochs": 7,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 50,
"early_stopping_threshold": 0.0001
},
"attributes": {
"early_stopping_patience_counter": 2
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.2357930956632294e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}