Stewart Slocum
Add fine-tuned model
556366c
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 82,
"global_step": 82,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.012195121951219513,
"grad_norm": 0.6484375,
"learning_rate": 1e-05,
"loss": 2.3233,
"step": 1
},
{
"epoch": 0.024390243902439025,
"grad_norm": 0.62109375,
"learning_rate": 9.878048780487805e-06,
"loss": 2.4552,
"step": 2
},
{
"epoch": 0.036585365853658534,
"grad_norm": 0.640625,
"learning_rate": 9.756097560975611e-06,
"loss": 2.4145,
"step": 3
},
{
"epoch": 0.04878048780487805,
"grad_norm": 0.59765625,
"learning_rate": 9.634146341463415e-06,
"loss": 2.3799,
"step": 4
},
{
"epoch": 0.06097560975609756,
"grad_norm": 0.5625,
"learning_rate": 9.51219512195122e-06,
"loss": 2.3369,
"step": 5
},
{
"epoch": 0.07317073170731707,
"grad_norm": 0.54296875,
"learning_rate": 9.390243902439025e-06,
"loss": 2.3515,
"step": 6
},
{
"epoch": 0.08536585365853659,
"grad_norm": 0.498046875,
"learning_rate": 9.268292682926831e-06,
"loss": 2.177,
"step": 7
},
{
"epoch": 0.0975609756097561,
"grad_norm": 0.48828125,
"learning_rate": 9.146341463414635e-06,
"loss": 2.2079,
"step": 8
},
{
"epoch": 0.10975609756097561,
"grad_norm": 0.4921875,
"learning_rate": 9.02439024390244e-06,
"loss": 2.2779,
"step": 9
},
{
"epoch": 0.12195121951219512,
"grad_norm": 0.451171875,
"learning_rate": 8.902439024390244e-06,
"loss": 2.1717,
"step": 10
},
{
"epoch": 0.13414634146341464,
"grad_norm": 0.48828125,
"learning_rate": 8.78048780487805e-06,
"loss": 2.2514,
"step": 11
},
{
"epoch": 0.14634146341463414,
"grad_norm": 0.4609375,
"learning_rate": 8.658536585365854e-06,
"loss": 2.2022,
"step": 12
},
{
"epoch": 0.15853658536585366,
"grad_norm": 0.431640625,
"learning_rate": 8.536585365853658e-06,
"loss": 2.0803,
"step": 13
},
{
"epoch": 0.17073170731707318,
"grad_norm": 0.4453125,
"learning_rate": 8.414634146341464e-06,
"loss": 2.1038,
"step": 14
},
{
"epoch": 0.18292682926829268,
"grad_norm": 0.451171875,
"learning_rate": 8.292682926829268e-06,
"loss": 2.0937,
"step": 15
},
{
"epoch": 0.1951219512195122,
"grad_norm": 0.44921875,
"learning_rate": 8.170731707317073e-06,
"loss": 2.0558,
"step": 16
},
{
"epoch": 0.2073170731707317,
"grad_norm": 0.431640625,
"learning_rate": 8.048780487804879e-06,
"loss": 2.0218,
"step": 17
},
{
"epoch": 0.21951219512195122,
"grad_norm": 0.435546875,
"learning_rate": 7.926829268292685e-06,
"loss": 2.0406,
"step": 18
},
{
"epoch": 0.23170731707317074,
"grad_norm": 0.416015625,
"learning_rate": 7.804878048780489e-06,
"loss": 1.9861,
"step": 19
},
{
"epoch": 0.24390243902439024,
"grad_norm": 0.439453125,
"learning_rate": 7.682926829268293e-06,
"loss": 2.0237,
"step": 20
},
{
"epoch": 0.25609756097560976,
"grad_norm": 0.3984375,
"learning_rate": 7.560975609756098e-06,
"loss": 1.9977,
"step": 21
},
{
"epoch": 0.2682926829268293,
"grad_norm": 0.3828125,
"learning_rate": 7.439024390243903e-06,
"loss": 1.972,
"step": 22
},
{
"epoch": 0.2804878048780488,
"grad_norm": 0.375,
"learning_rate": 7.317073170731707e-06,
"loss": 1.9552,
"step": 23
},
{
"epoch": 0.2926829268292683,
"grad_norm": 0.384765625,
"learning_rate": 7.1951219512195125e-06,
"loss": 1.9581,
"step": 24
},
{
"epoch": 0.3048780487804878,
"grad_norm": 0.3671875,
"learning_rate": 7.0731707317073175e-06,
"loss": 1.9023,
"step": 25
},
{
"epoch": 0.3170731707317073,
"grad_norm": 0.400390625,
"learning_rate": 6.951219512195122e-06,
"loss": 1.9077,
"step": 26
},
{
"epoch": 0.32926829268292684,
"grad_norm": 0.353515625,
"learning_rate": 6.829268292682928e-06,
"loss": 1.8831,
"step": 27
},
{
"epoch": 0.34146341463414637,
"grad_norm": 0.361328125,
"learning_rate": 6.707317073170733e-06,
"loss": 1.8655,
"step": 28
},
{
"epoch": 0.35365853658536583,
"grad_norm": 0.345703125,
"learning_rate": 6.585365853658538e-06,
"loss": 1.796,
"step": 29
},
{
"epoch": 0.36585365853658536,
"grad_norm": 0.328125,
"learning_rate": 6.463414634146342e-06,
"loss": 1.8275,
"step": 30
},
{
"epoch": 0.3780487804878049,
"grad_norm": 0.3203125,
"learning_rate": 6.341463414634147e-06,
"loss": 1.7658,
"step": 31
},
{
"epoch": 0.3902439024390244,
"grad_norm": 0.33203125,
"learning_rate": 6.219512195121951e-06,
"loss": 1.8462,
"step": 32
},
{
"epoch": 0.4024390243902439,
"grad_norm": 0.5390625,
"learning_rate": 6.0975609756097564e-06,
"loss": 1.7359,
"step": 33
},
{
"epoch": 0.4146341463414634,
"grad_norm": 0.326171875,
"learning_rate": 5.9756097560975615e-06,
"loss": 1.7696,
"step": 34
},
{
"epoch": 0.4268292682926829,
"grad_norm": 0.3125,
"learning_rate": 5.853658536585366e-06,
"loss": 1.7683,
"step": 35
},
{
"epoch": 0.43902439024390244,
"grad_norm": 0.33203125,
"learning_rate": 5.731707317073171e-06,
"loss": 1.8454,
"step": 36
},
{
"epoch": 0.45121951219512196,
"grad_norm": 0.318359375,
"learning_rate": 5.609756097560977e-06,
"loss": 1.7914,
"step": 37
},
{
"epoch": 0.4634146341463415,
"grad_norm": 0.29296875,
"learning_rate": 5.487804878048781e-06,
"loss": 1.7627,
"step": 38
},
{
"epoch": 0.47560975609756095,
"grad_norm": 0.30078125,
"learning_rate": 5.365853658536586e-06,
"loss": 1.7751,
"step": 39
},
{
"epoch": 0.4878048780487805,
"grad_norm": 0.298828125,
"learning_rate": 5.243902439024391e-06,
"loss": 1.7449,
"step": 40
},
{
"epoch": 0.5,
"grad_norm": 0.3046875,
"learning_rate": 5.121951219512195e-06,
"loss": 1.7489,
"step": 41
},
{
"epoch": 0.5121951219512195,
"grad_norm": 0.298828125,
"learning_rate": 5e-06,
"loss": 1.7532,
"step": 42
},
{
"epoch": 0.524390243902439,
"grad_norm": 0.271484375,
"learning_rate": 4.8780487804878055e-06,
"loss": 1.7089,
"step": 43
},
{
"epoch": 0.5365853658536586,
"grad_norm": 0.296875,
"learning_rate": 4.75609756097561e-06,
"loss": 1.784,
"step": 44
},
{
"epoch": 0.5487804878048781,
"grad_norm": 0.283203125,
"learning_rate": 4.634146341463416e-06,
"loss": 1.6915,
"step": 45
},
{
"epoch": 0.5609756097560976,
"grad_norm": 0.283203125,
"learning_rate": 4.51219512195122e-06,
"loss": 1.6932,
"step": 46
},
{
"epoch": 0.573170731707317,
"grad_norm": 0.26953125,
"learning_rate": 4.390243902439025e-06,
"loss": 1.6824,
"step": 47
},
{
"epoch": 0.5853658536585366,
"grad_norm": 0.28125,
"learning_rate": 4.268292682926829e-06,
"loss": 1.7119,
"step": 48
},
{
"epoch": 0.5975609756097561,
"grad_norm": 0.2890625,
"learning_rate": 4.146341463414634e-06,
"loss": 1.7439,
"step": 49
},
{
"epoch": 0.6097560975609756,
"grad_norm": 0.265625,
"learning_rate": 4.024390243902439e-06,
"loss": 1.6766,
"step": 50
},
{
"epoch": 0.6219512195121951,
"grad_norm": 0.28515625,
"learning_rate": 3.902439024390244e-06,
"loss": 1.7051,
"step": 51
},
{
"epoch": 0.6341463414634146,
"grad_norm": 0.267578125,
"learning_rate": 3.780487804878049e-06,
"loss": 1.6532,
"step": 52
},
{
"epoch": 0.6463414634146342,
"grad_norm": 0.26953125,
"learning_rate": 3.6585365853658537e-06,
"loss": 1.6673,
"step": 53
},
{
"epoch": 0.6585365853658537,
"grad_norm": 0.255859375,
"learning_rate": 3.5365853658536588e-06,
"loss": 1.6357,
"step": 54
},
{
"epoch": 0.6707317073170732,
"grad_norm": 0.26171875,
"learning_rate": 3.414634146341464e-06,
"loss": 1.6375,
"step": 55
},
{
"epoch": 0.6829268292682927,
"grad_norm": 0.248046875,
"learning_rate": 3.292682926829269e-06,
"loss": 1.5942,
"step": 56
},
{
"epoch": 0.6951219512195121,
"grad_norm": 0.2578125,
"learning_rate": 3.1707317073170736e-06,
"loss": 1.6206,
"step": 57
},
{
"epoch": 0.7073170731707317,
"grad_norm": 0.25,
"learning_rate": 3.0487804878048782e-06,
"loss": 1.6198,
"step": 58
},
{
"epoch": 0.7195121951219512,
"grad_norm": 0.2578125,
"learning_rate": 2.926829268292683e-06,
"loss": 1.6647,
"step": 59
},
{
"epoch": 0.7317073170731707,
"grad_norm": 0.25,
"learning_rate": 2.8048780487804884e-06,
"loss": 1.6164,
"step": 60
},
{
"epoch": 0.7439024390243902,
"grad_norm": 0.26171875,
"learning_rate": 2.682926829268293e-06,
"loss": 1.6352,
"step": 61
},
{
"epoch": 0.7560975609756098,
"grad_norm": 0.2490234375,
"learning_rate": 2.5609756097560977e-06,
"loss": 1.6524,
"step": 62
},
{
"epoch": 0.7682926829268293,
"grad_norm": 0.26171875,
"learning_rate": 2.4390243902439027e-06,
"loss": 1.6427,
"step": 63
},
{
"epoch": 0.7804878048780488,
"grad_norm": 0.267578125,
"learning_rate": 2.317073170731708e-06,
"loss": 1.6446,
"step": 64
},
{
"epoch": 0.7926829268292683,
"grad_norm": 0.25,
"learning_rate": 2.1951219512195125e-06,
"loss": 1.62,
"step": 65
},
{
"epoch": 0.8048780487804879,
"grad_norm": 0.259765625,
"learning_rate": 2.073170731707317e-06,
"loss": 1.6486,
"step": 66
},
{
"epoch": 0.8170731707317073,
"grad_norm": 0.26171875,
"learning_rate": 1.951219512195122e-06,
"loss": 1.6394,
"step": 67
},
{
"epoch": 0.8292682926829268,
"grad_norm": 0.259765625,
"learning_rate": 1.8292682926829268e-06,
"loss": 1.6132,
"step": 68
},
{
"epoch": 0.8414634146341463,
"grad_norm": 0.2578125,
"learning_rate": 1.707317073170732e-06,
"loss": 1.665,
"step": 69
},
{
"epoch": 0.8536585365853658,
"grad_norm": 0.259765625,
"learning_rate": 1.5853658536585368e-06,
"loss": 1.6227,
"step": 70
},
{
"epoch": 0.8658536585365854,
"grad_norm": 0.23828125,
"learning_rate": 1.4634146341463414e-06,
"loss": 1.6064,
"step": 71
},
{
"epoch": 0.8780487804878049,
"grad_norm": 0.251953125,
"learning_rate": 1.3414634146341465e-06,
"loss": 1.6649,
"step": 72
},
{
"epoch": 0.8902439024390244,
"grad_norm": 0.2412109375,
"learning_rate": 1.2195121951219514e-06,
"loss": 1.5657,
"step": 73
},
{
"epoch": 0.9024390243902439,
"grad_norm": 0.251953125,
"learning_rate": 1.0975609756097562e-06,
"loss": 1.6191,
"step": 74
},
{
"epoch": 0.9146341463414634,
"grad_norm": 0.24609375,
"learning_rate": 9.75609756097561e-07,
"loss": 1.618,
"step": 75
},
{
"epoch": 0.926829268292683,
"grad_norm": 0.2470703125,
"learning_rate": 8.53658536585366e-07,
"loss": 1.593,
"step": 76
},
{
"epoch": 0.9390243902439024,
"grad_norm": 0.255859375,
"learning_rate": 7.317073170731707e-07,
"loss": 1.6636,
"step": 77
},
{
"epoch": 0.9512195121951219,
"grad_norm": 0.259765625,
"learning_rate": 6.097560975609757e-07,
"loss": 1.6529,
"step": 78
},
{
"epoch": 0.9634146341463414,
"grad_norm": 0.259765625,
"learning_rate": 4.878048780487805e-07,
"loss": 1.6672,
"step": 79
},
{
"epoch": 0.975609756097561,
"grad_norm": 0.25390625,
"learning_rate": 3.6585365853658536e-07,
"loss": 1.6409,
"step": 80
},
{
"epoch": 0.9878048780487805,
"grad_norm": 0.267578125,
"learning_rate": 2.439024390243903e-07,
"loss": 1.6251,
"step": 81
},
{
"epoch": 1.0,
"grad_norm": 0.2421875,
"learning_rate": 1.2195121951219514e-07,
"loss": 1.5768,
"step": 82
},
{
"epoch": 1.0,
"eval_loss": 1.609056830406189,
"eval_runtime": 12.2057,
"eval_samples_per_second": 2.868,
"eval_steps_per_second": 0.41,
"step": 82
}
],
"logging_steps": 1.0,
"max_steps": 82,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 0,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.805261781696512e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}