Croissantine / checkpoint-171 /trainer_state.json
XenocodeRCE's picture
Upload du modèle fine-tuné
01abc52 verified
{
"best_metric": 1.6572628021240234,
"best_model_checkpoint": "./modele-socratique-sft\\checkpoint-171",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 171,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11695906432748537,
"grad_norm": 4.373384475708008,
"learning_rate": 3.3333333333333335e-05,
"loss": 5.2688,
"step": 10
},
{
"epoch": 0.23391812865497075,
"grad_norm": 7.579962253570557,
"learning_rate": 6.666666666666667e-05,
"loss": 4.2978,
"step": 20
},
{
"epoch": 0.3508771929824561,
"grad_norm": 3.2114369869232178,
"learning_rate": 0.0001,
"loss": 2.864,
"step": 30
},
{
"epoch": 0.4678362573099415,
"grad_norm": 2.231703758239746,
"learning_rate": 0.00013333333333333334,
"loss": 2.2745,
"step": 40
},
{
"epoch": 0.5847953216374269,
"grad_norm": 1.8353674411773682,
"learning_rate": 0.0001666666666666667,
"loss": 2.0056,
"step": 50
},
{
"epoch": 0.7017543859649122,
"grad_norm": 3.654597282409668,
"learning_rate": 0.0002,
"loss": 1.9087,
"step": 60
},
{
"epoch": 0.8187134502923976,
"grad_norm": 1.4905436038970947,
"learning_rate": 0.00019626168224299065,
"loss": 1.751,
"step": 70
},
{
"epoch": 0.935672514619883,
"grad_norm": 1.4564584493637085,
"learning_rate": 0.00019252336448598133,
"loss": 1.7465,
"step": 80
},
{
"epoch": 0.9941520467836257,
"eval_loss": 1.743960976600647,
"eval_runtime": 1.1773,
"eval_samples_per_second": 64.554,
"eval_steps_per_second": 8.494,
"step": 85
},
{
"epoch": 1.0526315789473684,
"grad_norm": 1.3438069820404053,
"learning_rate": 0.00018878504672897197,
"loss": 1.6509,
"step": 90
},
{
"epoch": 1.1695906432748537,
"grad_norm": 1.4461097717285156,
"learning_rate": 0.00018504672897196262,
"loss": 1.5477,
"step": 100
},
{
"epoch": 1.286549707602339,
"grad_norm": 1.7722139358520508,
"learning_rate": 0.0001813084112149533,
"loss": 1.6173,
"step": 110
},
{
"epoch": 1.4035087719298245,
"grad_norm": 1.3352079391479492,
"learning_rate": 0.00017757009345794393,
"loss": 1.6119,
"step": 120
},
{
"epoch": 1.52046783625731,
"grad_norm": 1.5055865049362183,
"learning_rate": 0.00017383177570093458,
"loss": 1.5867,
"step": 130
},
{
"epoch": 1.6374269005847952,
"grad_norm": 1.6256341934204102,
"learning_rate": 0.00017009345794392523,
"loss": 1.5639,
"step": 140
},
{
"epoch": 1.7543859649122808,
"grad_norm": 1.4370089769363403,
"learning_rate": 0.0001663551401869159,
"loss": 1.526,
"step": 150
},
{
"epoch": 1.871345029239766,
"grad_norm": 1.7688274383544922,
"learning_rate": 0.00016261682242990654,
"loss": 1.5438,
"step": 160
},
{
"epoch": 1.9883040935672516,
"grad_norm": 1.3985792398452759,
"learning_rate": 0.0001588785046728972,
"loss": 1.5231,
"step": 170
},
{
"epoch": 2.0,
"eval_loss": 1.6572628021240234,
"eval_runtime": 1.1323,
"eval_samples_per_second": 67.121,
"eval_steps_per_second": 8.832,
"step": 171
}
],
"logging_steps": 10,
"max_steps": 595,
"num_input_tokens_seen": 0,
"num_train_epochs": 7,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 935836976234496.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}