darwinkernelpanic's picture
Add files using upload-large-folder tool
fdaaa60 verified
{
"best_global_step": 100,
"best_metric": 1.053038239479065,
"best_model_checkpoint": "./luau-model/checkpoint-100",
"epoch": 0.6944444444444444,
"eval_steps": 100,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.034722222222222224,
"grad_norm": 0.19553522765636444,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.5215,
"step": 5
},
{
"epoch": 0.06944444444444445,
"grad_norm": 0.18851090967655182,
"learning_rate": 3.6e-05,
"loss": 1.4318,
"step": 10
},
{
"epoch": 0.10416666666666667,
"grad_norm": 0.2147289365530014,
"learning_rate": 5.6000000000000006e-05,
"loss": 1.4315,
"step": 15
},
{
"epoch": 0.1388888888888889,
"grad_norm": 0.22880084812641144,
"learning_rate": 7.6e-05,
"loss": 1.3475,
"step": 20
},
{
"epoch": 0.1736111111111111,
"grad_norm": 0.27205583453178406,
"learning_rate": 9.6e-05,
"loss": 1.3244,
"step": 25
},
{
"epoch": 0.20833333333333334,
"grad_norm": 0.2366049736738205,
"learning_rate": 0.000116,
"loss": 1.3784,
"step": 30
},
{
"epoch": 0.24305555555555555,
"grad_norm": 0.2528875768184662,
"learning_rate": 0.00013600000000000003,
"loss": 1.3578,
"step": 35
},
{
"epoch": 0.2777777777777778,
"grad_norm": 0.2789267897605896,
"learning_rate": 0.00015600000000000002,
"loss": 1.2761,
"step": 40
},
{
"epoch": 0.3125,
"grad_norm": 0.36334967613220215,
"learning_rate": 0.00017600000000000002,
"loss": 1.3073,
"step": 45
},
{
"epoch": 0.3472222222222222,
"grad_norm": 0.3194734454154968,
"learning_rate": 0.000196,
"loss": 1.2374,
"step": 50
},
{
"epoch": 0.3819444444444444,
"grad_norm": 0.4422107934951782,
"learning_rate": 0.00019790575916230367,
"loss": 1.2243,
"step": 55
},
{
"epoch": 0.4166666666666667,
"grad_norm": 0.36031872034072876,
"learning_rate": 0.00019528795811518326,
"loss": 1.236,
"step": 60
},
{
"epoch": 0.4513888888888889,
"grad_norm": 0.40430501103401184,
"learning_rate": 0.00019267015706806283,
"loss": 1.1484,
"step": 65
},
{
"epoch": 0.4861111111111111,
"grad_norm": 0.39032667875289917,
"learning_rate": 0.00019005235602094243,
"loss": 1.1352,
"step": 70
},
{
"epoch": 0.5208333333333334,
"grad_norm": 0.3691488206386566,
"learning_rate": 0.00018743455497382202,
"loss": 1.0341,
"step": 75
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.3820245563983917,
"learning_rate": 0.00018481675392670156,
"loss": 0.9911,
"step": 80
},
{
"epoch": 0.5902777777777778,
"grad_norm": 0.40899762511253357,
"learning_rate": 0.00018219895287958115,
"loss": 1.077,
"step": 85
},
{
"epoch": 0.625,
"grad_norm": 0.3846423029899597,
"learning_rate": 0.00017958115183246075,
"loss": 1.1241,
"step": 90
},
{
"epoch": 0.6597222222222222,
"grad_norm": 0.43537914752960205,
"learning_rate": 0.00017696335078534032,
"loss": 1.0522,
"step": 95
},
{
"epoch": 0.6944444444444444,
"grad_norm": 0.40991654992103577,
"learning_rate": 0.0001743455497382199,
"loss": 1.0116,
"step": 100
},
{
"epoch": 0.6944444444444444,
"eval_loss": 1.053038239479065,
"eval_runtime": 19.8507,
"eval_samples_per_second": 9.622,
"eval_steps_per_second": 1.612,
"step": 100
}
],
"logging_steps": 5,
"max_steps": 432,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.46522952286208e+16,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}