LamaDiab's picture
Training checkpoint - Epoch 0, Step 30000
32d83d0 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.7238683524756298,
"eval_steps": 10000,
"global_step": 30000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2.412894508252099e-05,
"grad_norm": 4.8172993659973145,
"learning_rate": 0.0,
"loss": 3.8384,
"step": 1
},
{
"epoch": 0.24128945082520992,
"grad_norm": 2.6814990043640137,
"learning_rate": 3.6183432666554063e-06,
"loss": 2.9764,
"step": 10000
},
{
"epoch": 0.24128945082520992,
"eval_cosine_accuracy": 0.9525219202041626,
"eval_loss": 1.2816005945205688,
"eval_runtime": 25.5253,
"eval_samples_per_second": 370.495,
"eval_steps_per_second": 0.744,
"step": 10000
},
{
"epoch": 0.48257890165041983,
"grad_norm": 2.516390323638916,
"learning_rate": 7.235962647491736e-06,
"loss": 2.3257,
"step": 20000
},
{
"epoch": 0.48257890165041983,
"eval_cosine_accuracy": 0.9563286304473877,
"eval_loss": 1.2216517925262451,
"eval_runtime": 27.1816,
"eval_samples_per_second": 347.919,
"eval_steps_per_second": 0.699,
"step": 20000
},
{
"epoch": 0.7238683524756298,
"grad_norm": 2.6789209842681885,
"learning_rate": 1.0854305914147143e-05,
"loss": 2.1003,
"step": 30000
},
{
"epoch": 0.7238683524756298,
"eval_cosine_accuracy": 0.9602410793304443,
"eval_loss": 1.1404365301132202,
"eval_runtime": 29.5686,
"eval_samples_per_second": 319.833,
"eval_steps_per_second": 0.643,
"step": 30000
}
],
"logging_steps": 10000,
"max_steps": 165776,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 30000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 512,
"trial_name": null,
"trial_params": null
}