Billing-Subscription-Deepseek / trainer_state.json
Hanumansai's picture
Upload folder using huggingface_hub
ae5b7b2 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 375,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"grad_norm": 0.03799356892704964,
"learning_rate": 0.00019783783783783784,
"loss": 0.0301,
"step": 10
},
{
"epoch": 0.16,
"grad_norm": 0.02971748076379299,
"learning_rate": 0.00019243243243243245,
"loss": 0.0302,
"step": 20
},
{
"epoch": 0.24,
"grad_norm": 0.05119084566831589,
"learning_rate": 0.00018702702702702703,
"loss": 0.0307,
"step": 30
},
{
"epoch": 0.32,
"grad_norm": 0.05611675605177879,
"learning_rate": 0.00018162162162162164,
"loss": 0.0304,
"step": 40
},
{
"epoch": 0.4,
"grad_norm": 0.045043572783470154,
"learning_rate": 0.00017621621621621622,
"loss": 0.0327,
"step": 50
},
{
"epoch": 0.48,
"grad_norm": 0.06347468495368958,
"learning_rate": 0.00017081081081081083,
"loss": 0.0325,
"step": 60
},
{
"epoch": 0.56,
"grad_norm": 0.04858771339058876,
"learning_rate": 0.0001654054054054054,
"loss": 0.0304,
"step": 70
},
{
"epoch": 0.64,
"grad_norm": 0.047175925225019455,
"learning_rate": 0.00016,
"loss": 0.032,
"step": 80
},
{
"epoch": 0.72,
"grad_norm": 0.04721968621015549,
"learning_rate": 0.0001545945945945946,
"loss": 0.0306,
"step": 90
},
{
"epoch": 0.8,
"grad_norm": 0.0320628359913826,
"learning_rate": 0.0001491891891891892,
"loss": 0.0318,
"step": 100
},
{
"epoch": 0.88,
"grad_norm": 0.030629610642790794,
"learning_rate": 0.00014378378378378378,
"loss": 0.0311,
"step": 110
},
{
"epoch": 0.96,
"grad_norm": 0.02791382186114788,
"learning_rate": 0.0001383783783783784,
"loss": 0.031,
"step": 120
},
{
"epoch": 1.04,
"grad_norm": 0.03312250226736069,
"learning_rate": 0.00013297297297297297,
"loss": 0.0302,
"step": 130
},
{
"epoch": 1.12,
"grad_norm": 0.04184908792376518,
"learning_rate": 0.00012756756756756758,
"loss": 0.0304,
"step": 140
},
{
"epoch": 1.2,
"grad_norm": 0.038243941962718964,
"learning_rate": 0.00012216216216216216,
"loss": 0.0305,
"step": 150
},
{
"epoch": 1.28,
"grad_norm": 0.024454083293676376,
"learning_rate": 0.00011675675675675676,
"loss": 0.0307,
"step": 160
},
{
"epoch": 1.3599999999999999,
"grad_norm": 0.021402811631560326,
"learning_rate": 0.00011135135135135135,
"loss": 0.0303,
"step": 170
},
{
"epoch": 1.44,
"grad_norm": 0.030341940000653267,
"learning_rate": 0.00010594594594594595,
"loss": 0.0309,
"step": 180
},
{
"epoch": 1.52,
"grad_norm": 0.030703404918313026,
"learning_rate": 0.00010054054054054053,
"loss": 0.0299,
"step": 190
},
{
"epoch": 1.6,
"grad_norm": 0.03214750811457634,
"learning_rate": 9.513513513513514e-05,
"loss": 0.0308,
"step": 200
},
{
"epoch": 1.6800000000000002,
"grad_norm": 0.04272659495472908,
"learning_rate": 8.972972972972973e-05,
"loss": 0.0295,
"step": 210
},
{
"epoch": 1.76,
"grad_norm": 0.024962130934000015,
"learning_rate": 8.432432432432433e-05,
"loss": 0.0302,
"step": 220
},
{
"epoch": 1.8399999999999999,
"grad_norm": 0.03459068015217781,
"learning_rate": 7.891891891891892e-05,
"loss": 0.0296,
"step": 230
},
{
"epoch": 1.92,
"grad_norm": 0.033658791333436966,
"learning_rate": 7.351351351351352e-05,
"loss": 0.0308,
"step": 240
},
{
"epoch": 2.0,
"grad_norm": 0.026565387845039368,
"learning_rate": 6.810810810810811e-05,
"loss": 0.0309,
"step": 250
},
{
"epoch": 2.08,
"grad_norm": 0.028347592800855637,
"learning_rate": 6.27027027027027e-05,
"loss": 0.0305,
"step": 260
},
{
"epoch": 2.16,
"grad_norm": 0.02861287258565426,
"learning_rate": 5.7297297297297305e-05,
"loss": 0.0298,
"step": 270
},
{
"epoch": 2.24,
"grad_norm": 0.01953195221722126,
"learning_rate": 5.18918918918919e-05,
"loss": 0.0302,
"step": 280
},
{
"epoch": 2.32,
"grad_norm": 0.025148887187242508,
"learning_rate": 4.648648648648649e-05,
"loss": 0.0297,
"step": 290
},
{
"epoch": 2.4,
"grad_norm": 0.021128997206687927,
"learning_rate": 4.108108108108109e-05,
"loss": 0.0296,
"step": 300
},
{
"epoch": 2.48,
"grad_norm": 0.020117945969104767,
"learning_rate": 3.567567567567568e-05,
"loss": 0.0295,
"step": 310
},
{
"epoch": 2.56,
"grad_norm": 0.021842550486326218,
"learning_rate": 3.0270270270270272e-05,
"loss": 0.0295,
"step": 320
},
{
"epoch": 2.64,
"grad_norm": 0.028773466125130653,
"learning_rate": 2.486486486486487e-05,
"loss": 0.03,
"step": 330
},
{
"epoch": 2.7199999999999998,
"grad_norm": 0.024690190330147743,
"learning_rate": 1.9459459459459463e-05,
"loss": 0.0301,
"step": 340
},
{
"epoch": 2.8,
"grad_norm": 0.020363792777061462,
"learning_rate": 1.4054054054054055e-05,
"loss": 0.0291,
"step": 350
},
{
"epoch": 2.88,
"grad_norm": 0.027392864227294922,
"learning_rate": 8.64864864864865e-06,
"loss": 0.0293,
"step": 360
},
{
"epoch": 2.96,
"grad_norm": 0.02251037396490574,
"learning_rate": 3.2432432432432437e-06,
"loss": 0.0303,
"step": 370
}
],
"logging_steps": 10,
"max_steps": 375,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.3833317953683456e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}