act_patch_llama3.1_8b_llama3.1_8b / trainer_state.json
belindazli's picture
Upload folder using huggingface_hub
1d69119 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 500,
"global_step": 8400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.023809523809523808,
"grad_norm": 0.09493794292211533,
"learning_rate": 5e-05,
"loss": 0.2812,
"step": 10
},
{
"epoch": 0.047619047619047616,
"grad_norm": 0.06558936089277267,
"learning_rate": 5e-05,
"loss": 0.0724,
"step": 20
},
{
"epoch": 0.07142857142857142,
"grad_norm": 0.05934833735227585,
"learning_rate": 5e-05,
"loss": 0.0328,
"step": 30
},
{
"epoch": 0.09523809523809523,
"grad_norm": 0.0350530743598938,
"learning_rate": 5e-05,
"loss": 0.0267,
"step": 40
},
{
"epoch": 0.11904761904761904,
"grad_norm": 0.028712034225463867,
"learning_rate": 5e-05,
"loss": 0.0236,
"step": 50
},
{
"epoch": 0.14285714285714285,
"grad_norm": 0.024718789383769035,
"learning_rate": 5e-05,
"loss": 0.023,
"step": 60
},
{
"epoch": 0.16666666666666666,
"grad_norm": 0.022061526775360107,
"learning_rate": 5e-05,
"loss": 0.0226,
"step": 70
},
{
"epoch": 0.19047619047619047,
"grad_norm": 0.025670066475868225,
"learning_rate": 5e-05,
"loss": 0.0218,
"step": 80
},
{
"epoch": 0.21428571428571427,
"grad_norm": 0.02997678704559803,
"learning_rate": 5e-05,
"loss": 0.0209,
"step": 90
},
{
"epoch": 0.23809523809523808,
"grad_norm": 0.0373968742787838,
"learning_rate": 5e-05,
"loss": 0.0221,
"step": 100
},
{
"epoch": 0.2619047619047619,
"grad_norm": 0.02778824232518673,
"learning_rate": 5e-05,
"loss": 0.0207,
"step": 110
},
{
"epoch": 0.2857142857142857,
"grad_norm": 0.018951816484332085,
"learning_rate": 5e-05,
"loss": 0.0212,
"step": 120
},
{
"epoch": 0.30952380952380953,
"grad_norm": 0.03219084069132805,
"learning_rate": 5e-05,
"loss": 0.0197,
"step": 130
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.035099539905786514,
"learning_rate": 5e-05,
"loss": 0.0212,
"step": 140
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.022762970998883247,
"learning_rate": 5e-05,
"loss": 0.0205,
"step": 150
},
{
"epoch": 0.38095238095238093,
"grad_norm": 0.021571127697825432,
"learning_rate": 5e-05,
"loss": 0.0192,
"step": 160
},
{
"epoch": 0.40476190476190477,
"grad_norm": 0.028216874226927757,
"learning_rate": 5e-05,
"loss": 0.0194,
"step": 170
},
{
"epoch": 0.42857142857142855,
"grad_norm": 0.018433215096592903,
"learning_rate": 5e-05,
"loss": 0.0201,
"step": 180
},
{
"epoch": 0.4523809523809524,
"grad_norm": 0.03548741340637207,
"learning_rate": 5e-05,
"loss": 0.0193,
"step": 190
},
{
"epoch": 0.47619047619047616,
"grad_norm": 0.027879556640982628,
"learning_rate": 5e-05,
"loss": 0.0187,
"step": 200
},
{
"epoch": 0.5,
"grad_norm": 0.022470738738775253,
"learning_rate": 5e-05,
"loss": 0.0186,
"step": 210
},
{
"epoch": 0.5238095238095238,
"grad_norm": 0.02206714078783989,
"learning_rate": 5e-05,
"loss": 0.0184,
"step": 220
},
{
"epoch": 0.5476190476190477,
"grad_norm": 0.024714656174182892,
"learning_rate": 5e-05,
"loss": 0.0188,
"step": 230
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.03632698208093643,
"learning_rate": 5e-05,
"loss": 0.0188,
"step": 240
},
{
"epoch": 0.5952380952380952,
"grad_norm": 0.017954664304852486,
"learning_rate": 5e-05,
"loss": 0.0177,
"step": 250
},
{
"epoch": 0.6190476190476191,
"grad_norm": 0.026338977739214897,
"learning_rate": 5e-05,
"loss": 0.0166,
"step": 260
},
{
"epoch": 0.6428571428571429,
"grad_norm": 0.028515340760350227,
"learning_rate": 5e-05,
"loss": 0.0177,
"step": 270
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.0308682881295681,
"learning_rate": 5e-05,
"loss": 0.0176,
"step": 280
},
{
"epoch": 0.6904761904761905,
"grad_norm": 0.018636852502822876,
"learning_rate": 5e-05,
"loss": 0.0167,
"step": 290
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.024639299139380455,
"learning_rate": 5e-05,
"loss": 0.0171,
"step": 300
},
{
"epoch": 0.7380952380952381,
"grad_norm": 0.02328549325466156,
"learning_rate": 5e-05,
"loss": 0.018,
"step": 310
},
{
"epoch": 0.7619047619047619,
"grad_norm": 0.02998117357492447,
"learning_rate": 5e-05,
"loss": 0.0166,
"step": 320
},
{
"epoch": 0.7857142857142857,
"grad_norm": 0.022752949967980385,
"learning_rate": 5e-05,
"loss": 0.017,
"step": 330
},
{
"epoch": 0.8095238095238095,
"grad_norm": 0.02922181785106659,
"learning_rate": 5e-05,
"loss": 0.0176,
"step": 340
},
{
"epoch": 0.8333333333333334,
"grad_norm": 0.026469197124242783,
"learning_rate": 5e-05,
"loss": 0.0183,
"step": 350
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.03358051925897598,
"learning_rate": 5e-05,
"loss": 0.0188,
"step": 360
},
{
"epoch": 0.8809523809523809,
"grad_norm": 0.0185205340385437,
"learning_rate": 5e-05,
"loss": 0.0173,
"step": 370
},
{
"epoch": 0.9047619047619048,
"grad_norm": 0.018949517980217934,
"learning_rate": 5e-05,
"loss": 0.0169,
"step": 380
},
{
"epoch": 0.9285714285714286,
"grad_norm": 0.022263336926698685,
"learning_rate": 5e-05,
"loss": 0.017,
"step": 390
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.023463713005185127,
"learning_rate": 5e-05,
"loss": 0.0172,
"step": 400
},
{
"epoch": 0.9761904761904762,
"grad_norm": 0.023527899757027626,
"learning_rate": 5e-05,
"loss": 0.0156,
"step": 410
},
{
"epoch": 1.0,
"grad_norm": 0.026089975610375404,
"learning_rate": 5e-05,
"loss": 0.0154,
"step": 420
},
{
"epoch": 1.0238095238095237,
"grad_norm": 0.017531510442495346,
"learning_rate": 5e-05,
"loss": 0.0175,
"step": 430
},
{
"epoch": 1.0476190476190477,
"grad_norm": 0.017702311277389526,
"learning_rate": 5e-05,
"loss": 0.0151,
"step": 440
},
{
"epoch": 1.0714285714285714,
"grad_norm": 0.018016375601291656,
"learning_rate": 5e-05,
"loss": 0.0168,
"step": 450
},
{
"epoch": 1.0952380952380953,
"grad_norm": 0.028256898745894432,
"learning_rate": 5e-05,
"loss": 0.0164,
"step": 460
},
{
"epoch": 1.119047619047619,
"grad_norm": 0.017876937985420227,
"learning_rate": 5e-05,
"loss": 0.0148,
"step": 470
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.018716128543019295,
"learning_rate": 5e-05,
"loss": 0.0144,
"step": 480
},
{
"epoch": 1.1666666666666667,
"grad_norm": 0.02162998355925083,
"learning_rate": 5e-05,
"loss": 0.0148,
"step": 490
},
{
"epoch": 1.1904761904761905,
"grad_norm": 0.023180391639471054,
"learning_rate": 5e-05,
"loss": 0.0156,
"step": 500
},
{
"epoch": 1.2142857142857142,
"grad_norm": 0.022495009005069733,
"learning_rate": 5e-05,
"loss": 0.0149,
"step": 510
},
{
"epoch": 1.2380952380952381,
"grad_norm": 0.024750931188464165,
"learning_rate": 5e-05,
"loss": 0.0158,
"step": 520
},
{
"epoch": 1.2619047619047619,
"grad_norm": 0.027094174176454544,
"learning_rate": 5e-05,
"loss": 0.0149,
"step": 530
},
{
"epoch": 1.2857142857142856,
"grad_norm": 0.021923832595348358,
"learning_rate": 5e-05,
"loss": 0.0146,
"step": 540
},
{
"epoch": 1.3095238095238095,
"grad_norm": 0.025097332894802094,
"learning_rate": 5e-05,
"loss": 0.0136,
"step": 550
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.034609246999025345,
"learning_rate": 5e-05,
"loss": 0.0152,
"step": 560
},
{
"epoch": 1.3571428571428572,
"grad_norm": 0.02195250615477562,
"learning_rate": 5e-05,
"loss": 0.0141,
"step": 570
},
{
"epoch": 1.380952380952381,
"grad_norm": 0.01835169643163681,
"learning_rate": 5e-05,
"loss": 0.0148,
"step": 580
},
{
"epoch": 1.4047619047619047,
"grad_norm": 0.026779156178236008,
"learning_rate": 5e-05,
"loss": 0.0154,
"step": 590
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.025482457131147385,
"learning_rate": 5e-05,
"loss": 0.0142,
"step": 600
},
{
"epoch": 1.4523809523809523,
"grad_norm": 0.022315753623843193,
"learning_rate": 5e-05,
"loss": 0.0137,
"step": 610
},
{
"epoch": 1.4761904761904763,
"grad_norm": 0.02147059701383114,
"learning_rate": 5e-05,
"loss": 0.0137,
"step": 620
},
{
"epoch": 1.5,
"grad_norm": 0.021188456565141678,
"learning_rate": 5e-05,
"loss": 0.0135,
"step": 630
},
{
"epoch": 1.5238095238095237,
"grad_norm": 0.018250903114676476,
"learning_rate": 5e-05,
"loss": 0.0137,
"step": 640
},
{
"epoch": 1.5476190476190477,
"grad_norm": 0.02014111913740635,
"learning_rate": 5e-05,
"loss": 0.0141,
"step": 650
},
{
"epoch": 1.5714285714285714,
"grad_norm": 0.031997792422771454,
"learning_rate": 5e-05,
"loss": 0.0145,
"step": 660
},
{
"epoch": 1.5952380952380953,
"grad_norm": 0.02311546355485916,
"learning_rate": 5e-05,
"loss": 0.0139,
"step": 670
},
{
"epoch": 1.619047619047619,
"grad_norm": 0.022159090265631676,
"learning_rate": 5e-05,
"loss": 0.0127,
"step": 680
},
{
"epoch": 1.6428571428571428,
"grad_norm": 0.023639395833015442,
"learning_rate": 5e-05,
"loss": 0.0139,
"step": 690
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.025874590501189232,
"learning_rate": 5e-05,
"loss": 0.0142,
"step": 700
},
{
"epoch": 1.6904761904761905,
"grad_norm": 0.015242703258991241,
"learning_rate": 5e-05,
"loss": 0.0131,
"step": 710
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.01896982081234455,
"learning_rate": 5e-05,
"loss": 0.0129,
"step": 720
},
{
"epoch": 1.7380952380952381,
"grad_norm": 0.019098645076155663,
"learning_rate": 5e-05,
"loss": 0.014,
"step": 730
},
{
"epoch": 1.7619047619047619,
"grad_norm": 0.020913679152727127,
"learning_rate": 5e-05,
"loss": 0.0139,
"step": 740
},
{
"epoch": 1.7857142857142856,
"grad_norm": 0.02301224134862423,
"learning_rate": 5e-05,
"loss": 0.0134,
"step": 750
},
{
"epoch": 1.8095238095238095,
"grad_norm": 0.020251275971531868,
"learning_rate": 5e-05,
"loss": 0.0126,
"step": 760
},
{
"epoch": 1.8333333333333335,
"grad_norm": 0.023471834138035774,
"learning_rate": 5e-05,
"loss": 0.0136,
"step": 770
},
{
"epoch": 1.8571428571428572,
"grad_norm": 0.024632664397358894,
"learning_rate": 5e-05,
"loss": 0.0145,
"step": 780
},
{
"epoch": 1.880952380952381,
"grad_norm": 0.021192265674471855,
"learning_rate": 5e-05,
"loss": 0.0138,
"step": 790
},
{
"epoch": 1.9047619047619047,
"grad_norm": 0.021170521154999733,
"learning_rate": 5e-05,
"loss": 0.0136,
"step": 800
},
{
"epoch": 1.9285714285714286,
"grad_norm": 0.018560606986284256,
"learning_rate": 5e-05,
"loss": 0.0129,
"step": 810
},
{
"epoch": 1.9523809523809523,
"grad_norm": 0.020409613847732544,
"learning_rate": 5e-05,
"loss": 0.0138,
"step": 820
},
{
"epoch": 1.9761904761904763,
"grad_norm": 0.02413373813033104,
"learning_rate": 5e-05,
"loss": 0.0128,
"step": 830
},
{
"epoch": 2.0,
"grad_norm": 0.022253163158893585,
"learning_rate": 5e-05,
"loss": 0.0127,
"step": 840
},
{
"epoch": 2.0238095238095237,
"grad_norm": 0.017652267590165138,
"learning_rate": 5e-05,
"loss": 0.0143,
"step": 850
},
{
"epoch": 2.0476190476190474,
"grad_norm": 0.018153948709368706,
"learning_rate": 5e-05,
"loss": 0.0128,
"step": 860
},
{
"epoch": 2.0714285714285716,
"grad_norm": 0.01955491490662098,
"learning_rate": 5e-05,
"loss": 0.0132,
"step": 870
},
{
"epoch": 2.0952380952380953,
"grad_norm": 0.021146738901734352,
"learning_rate": 5e-05,
"loss": 0.0124,
"step": 880
},
{
"epoch": 2.119047619047619,
"grad_norm": 0.016339635476469994,
"learning_rate": 5e-05,
"loss": 0.0122,
"step": 890
},
{
"epoch": 2.142857142857143,
"grad_norm": 0.017886130139231682,
"learning_rate": 5e-05,
"loss": 0.0113,
"step": 900
},
{
"epoch": 2.1666666666666665,
"grad_norm": 0.02060827612876892,
"learning_rate": 5e-05,
"loss": 0.0117,
"step": 910
},
{
"epoch": 2.1904761904761907,
"grad_norm": 0.016803547739982605,
"learning_rate": 5e-05,
"loss": 0.0132,
"step": 920
},
{
"epoch": 2.2142857142857144,
"grad_norm": 0.018075264990329742,
"learning_rate": 5e-05,
"loss": 0.0119,
"step": 930
},
{
"epoch": 2.238095238095238,
"grad_norm": 0.023032259196043015,
"learning_rate": 5e-05,
"loss": 0.0121,
"step": 940
},
{
"epoch": 2.261904761904762,
"grad_norm": 0.028263242915272713,
"learning_rate": 5e-05,
"loss": 0.0114,
"step": 950
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.02737823873758316,
"learning_rate": 5e-05,
"loss": 0.0112,
"step": 960
},
{
"epoch": 2.3095238095238093,
"grad_norm": 0.021527152508497238,
"learning_rate": 5e-05,
"loss": 0.0111,
"step": 970
},
{
"epoch": 2.3333333333333335,
"grad_norm": 0.027928918600082397,
"learning_rate": 5e-05,
"loss": 0.0123,
"step": 980
},
{
"epoch": 2.357142857142857,
"grad_norm": 0.024291587993502617,
"learning_rate": 5e-05,
"loss": 0.0112,
"step": 990
},
{
"epoch": 2.380952380952381,
"grad_norm": 0.024933788925409317,
"learning_rate": 5e-05,
"loss": 0.0122,
"step": 1000
},
{
"epoch": 2.4047619047619047,
"grad_norm": 0.025925207883119583,
"learning_rate": 5e-05,
"loss": 0.0126,
"step": 1010
},
{
"epoch": 2.4285714285714284,
"grad_norm": 0.021988635882735252,
"learning_rate": 5e-05,
"loss": 0.0111,
"step": 1020
},
{
"epoch": 2.4523809523809526,
"grad_norm": 0.029331982135772705,
"learning_rate": 5e-05,
"loss": 0.0113,
"step": 1030
},
{
"epoch": 2.4761904761904763,
"grad_norm": 0.023319706320762634,
"learning_rate": 5e-05,
"loss": 0.0111,
"step": 1040
},
{
"epoch": 2.5,
"grad_norm": 0.016594231128692627,
"learning_rate": 5e-05,
"loss": 0.0106,
"step": 1050
},
{
"epoch": 2.5238095238095237,
"grad_norm": 0.022670956328511238,
"learning_rate": 5e-05,
"loss": 0.0109,
"step": 1060
},
{
"epoch": 2.5476190476190474,
"grad_norm": 0.02389667183160782,
"learning_rate": 5e-05,
"loss": 0.0116,
"step": 1070
},
{
"epoch": 2.571428571428571,
"grad_norm": 0.03487304970622063,
"learning_rate": 5e-05,
"loss": 0.0114,
"step": 1080
},
{
"epoch": 2.5952380952380953,
"grad_norm": 0.02180272527039051,
"learning_rate": 5e-05,
"loss": 0.0116,
"step": 1090
},
{
"epoch": 2.619047619047619,
"grad_norm": 0.021106529980897903,
"learning_rate": 5e-05,
"loss": 0.0105,
"step": 1100
},
{
"epoch": 2.642857142857143,
"grad_norm": 0.023611431941390038,
"learning_rate": 5e-05,
"loss": 0.0119,
"step": 1110
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.024628831073641777,
"learning_rate": 5e-05,
"loss": 0.0114,
"step": 1120
},
{
"epoch": 2.6904761904761907,
"grad_norm": 0.014470246620476246,
"learning_rate": 5e-05,
"loss": 0.0102,
"step": 1130
},
{
"epoch": 2.7142857142857144,
"grad_norm": 0.022718733176589012,
"learning_rate": 5e-05,
"loss": 0.0103,
"step": 1140
},
{
"epoch": 2.738095238095238,
"grad_norm": 0.020995931699872017,
"learning_rate": 5e-05,
"loss": 0.0106,
"step": 1150
},
{
"epoch": 2.761904761904762,
"grad_norm": 0.024527721107006073,
"learning_rate": 5e-05,
"loss": 0.0107,
"step": 1160
},
{
"epoch": 2.7857142857142856,
"grad_norm": 0.021473698318004608,
"learning_rate": 5e-05,
"loss": 0.0104,
"step": 1170
},
{
"epoch": 2.8095238095238093,
"grad_norm": 0.018530316650867462,
"learning_rate": 5e-05,
"loss": 0.0101,
"step": 1180
},
{
"epoch": 2.8333333333333335,
"grad_norm": 0.02302338182926178,
"learning_rate": 5e-05,
"loss": 0.0105,
"step": 1190
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.025742432102560997,
"learning_rate": 5e-05,
"loss": 0.0114,
"step": 1200
},
{
"epoch": 2.880952380952381,
"grad_norm": 0.027784395962953568,
"learning_rate": 5e-05,
"loss": 0.0111,
"step": 1210
},
{
"epoch": 2.9047619047619047,
"grad_norm": 0.021398941054940224,
"learning_rate": 5e-05,
"loss": 0.0102,
"step": 1220
},
{
"epoch": 2.928571428571429,
"grad_norm": 0.027173219248652458,
"learning_rate": 5e-05,
"loss": 0.0101,
"step": 1230
},
{
"epoch": 2.9523809523809526,
"grad_norm": 0.02305479347705841,
"learning_rate": 5e-05,
"loss": 0.0113,
"step": 1240
},
{
"epoch": 2.9761904761904763,
"grad_norm": 0.024614600464701653,
"learning_rate": 5e-05,
"loss": 0.0105,
"step": 1250
},
{
"epoch": 3.0,
"grad_norm": 0.01538562960922718,
"learning_rate": 5e-05,
"loss": 0.0103,
"step": 1260
},
{
"epoch": 3.0238095238095237,
"grad_norm": 0.018940571695566177,
"learning_rate": 5e-05,
"loss": 0.0117,
"step": 1270
},
{
"epoch": 3.0476190476190474,
"grad_norm": 0.014765114523470402,
"learning_rate": 5e-05,
"loss": 0.0104,
"step": 1280
},
{
"epoch": 3.0714285714285716,
"grad_norm": 0.02444819174706936,
"learning_rate": 5e-05,
"loss": 0.0115,
"step": 1290
},
{
"epoch": 3.0952380952380953,
"grad_norm": 0.02040857821702957,
"learning_rate": 5e-05,
"loss": 0.0102,
"step": 1300
},
{
"epoch": 3.119047619047619,
"grad_norm": 0.01789598912000656,
"learning_rate": 5e-05,
"loss": 0.0095,
"step": 1310
},
{
"epoch": 3.142857142857143,
"grad_norm": 0.020275596529245377,
"learning_rate": 5e-05,
"loss": 0.0084,
"step": 1320
},
{
"epoch": 3.1666666666666665,
"grad_norm": 0.02351970411837101,
"learning_rate": 5e-05,
"loss": 0.0098,
"step": 1330
},
{
"epoch": 3.1904761904761907,
"grad_norm": 0.017525261268019676,
"learning_rate": 5e-05,
"loss": 0.0103,
"step": 1340
},
{
"epoch": 3.2142857142857144,
"grad_norm": 0.019003944471478462,
"learning_rate": 5e-05,
"loss": 0.0094,
"step": 1350
},
{
"epoch": 3.238095238095238,
"grad_norm": 0.02266862988471985,
"learning_rate": 5e-05,
"loss": 0.0094,
"step": 1360
},
{
"epoch": 3.261904761904762,
"grad_norm": 0.019711384549736977,
"learning_rate": 5e-05,
"loss": 0.0093,
"step": 1370
},
{
"epoch": 3.2857142857142856,
"grad_norm": 0.02152351662516594,
"learning_rate": 5e-05,
"loss": 0.0082,
"step": 1380
},
{
"epoch": 3.3095238095238093,
"grad_norm": 0.018401287496089935,
"learning_rate": 5e-05,
"loss": 0.0082,
"step": 1390
},
{
"epoch": 3.3333333333333335,
"grad_norm": 0.030380433425307274,
"learning_rate": 5e-05,
"loss": 0.0095,
"step": 1400
},
{
"epoch": 3.357142857142857,
"grad_norm": 0.018700776621699333,
"learning_rate": 5e-05,
"loss": 0.0087,
"step": 1410
},
{
"epoch": 3.380952380952381,
"grad_norm": 0.01758432574570179,
"learning_rate": 5e-05,
"loss": 0.0092,
"step": 1420
},
{
"epoch": 3.4047619047619047,
"grad_norm": 0.027525080367922783,
"learning_rate": 5e-05,
"loss": 0.0096,
"step": 1430
},
{
"epoch": 3.4285714285714284,
"grad_norm": 0.023578746244311333,
"learning_rate": 5e-05,
"loss": 0.0087,
"step": 1440
},
{
"epoch": 3.4523809523809526,
"grad_norm": 0.020561877638101578,
"learning_rate": 5e-05,
"loss": 0.0086,
"step": 1450
},
{
"epoch": 3.4761904761904763,
"grad_norm": 0.024523900821805,
"learning_rate": 5e-05,
"loss": 0.0089,
"step": 1460
},
{
"epoch": 3.5,
"grad_norm": 0.017780443653464317,
"learning_rate": 5e-05,
"loss": 0.0082,
"step": 1470
},
{
"epoch": 3.5238095238095237,
"grad_norm": 0.025551529601216316,
"learning_rate": 5e-05,
"loss": 0.0083,
"step": 1480
},
{
"epoch": 3.5476190476190474,
"grad_norm": 0.024370500817894936,
"learning_rate": 5e-05,
"loss": 0.0091,
"step": 1490
},
{
"epoch": 3.571428571428571,
"grad_norm": 0.03639150410890579,
"learning_rate": 5e-05,
"loss": 0.0092,
"step": 1500
},
{
"epoch": 3.5952380952380953,
"grad_norm": 0.02289510704576969,
"learning_rate": 5e-05,
"loss": 0.009,
"step": 1510
},
{
"epoch": 3.619047619047619,
"grad_norm": 0.019606849178671837,
"learning_rate": 5e-05,
"loss": 0.0088,
"step": 1520
},
{
"epoch": 3.642857142857143,
"grad_norm": 0.037268321961164474,
"learning_rate": 5e-05,
"loss": 0.0088,
"step": 1530
},
{
"epoch": 3.6666666666666665,
"grad_norm": 0.028575578704476357,
"learning_rate": 5e-05,
"loss": 0.0099,
"step": 1540
},
{
"epoch": 3.6904761904761907,
"grad_norm": 0.013424446806311607,
"learning_rate": 5e-05,
"loss": 0.0076,
"step": 1550
},
{
"epoch": 3.7142857142857144,
"grad_norm": 0.01701328158378601,
"learning_rate": 5e-05,
"loss": 0.0084,
"step": 1560
},
{
"epoch": 3.738095238095238,
"grad_norm": 0.025259124115109444,
"learning_rate": 5e-05,
"loss": 0.0081,
"step": 1570
},
{
"epoch": 3.761904761904762,
"grad_norm": 0.024821562692523003,
"learning_rate": 5e-05,
"loss": 0.0086,
"step": 1580
},
{
"epoch": 3.7857142857142856,
"grad_norm": 0.024605993181467056,
"learning_rate": 5e-05,
"loss": 0.0084,
"step": 1590
},
{
"epoch": 3.8095238095238093,
"grad_norm": 0.02455594390630722,
"learning_rate": 5e-05,
"loss": 0.0075,
"step": 1600
},
{
"epoch": 3.8333333333333335,
"grad_norm": 0.025864994153380394,
"learning_rate": 5e-05,
"loss": 0.008,
"step": 1610
},
{
"epoch": 3.857142857142857,
"grad_norm": 0.02395598590373993,
"learning_rate": 5e-05,
"loss": 0.0087,
"step": 1620
},
{
"epoch": 3.880952380952381,
"grad_norm": 0.015691502019762993,
"learning_rate": 5e-05,
"loss": 0.0084,
"step": 1630
},
{
"epoch": 3.9047619047619047,
"grad_norm": 0.0223532821983099,
"learning_rate": 5e-05,
"loss": 0.0077,
"step": 1640
},
{
"epoch": 3.928571428571429,
"grad_norm": 0.028256218880414963,
"learning_rate": 5e-05,
"loss": 0.0076,
"step": 1650
},
{
"epoch": 3.9523809523809526,
"grad_norm": 0.023097924888134003,
"learning_rate": 5e-05,
"loss": 0.0091,
"step": 1660
},
{
"epoch": 3.9761904761904763,
"grad_norm": 0.02783946879208088,
"learning_rate": 5e-05,
"loss": 0.0076,
"step": 1670
},
{
"epoch": 4.0,
"grad_norm": 0.013011032715439796,
"learning_rate": 5e-05,
"loss": 0.008,
"step": 1680
},
{
"epoch": 4.023809523809524,
"grad_norm": 0.02342393808066845,
"learning_rate": 5e-05,
"loss": 0.0092,
"step": 1690
},
{
"epoch": 4.0476190476190474,
"grad_norm": 0.021954068914055824,
"learning_rate": 5e-05,
"loss": 0.009,
"step": 1700
},
{
"epoch": 4.071428571428571,
"grad_norm": 0.0207274928689003,
"learning_rate": 5e-05,
"loss": 0.0092,
"step": 1710
},
{
"epoch": 4.095238095238095,
"grad_norm": 0.03118881769478321,
"learning_rate": 5e-05,
"loss": 0.008,
"step": 1720
},
{
"epoch": 4.119047619047619,
"grad_norm": 0.015900803729891777,
"learning_rate": 5e-05,
"loss": 0.0074,
"step": 1730
},
{
"epoch": 4.142857142857143,
"grad_norm": 0.013303752988576889,
"learning_rate": 5e-05,
"loss": 0.0069,
"step": 1740
},
{
"epoch": 4.166666666666667,
"grad_norm": 0.022115593776106834,
"learning_rate": 5e-05,
"loss": 0.0078,
"step": 1750
},
{
"epoch": 4.190476190476191,
"grad_norm": 0.021266015246510506,
"learning_rate": 5e-05,
"loss": 0.008,
"step": 1760
},
{
"epoch": 4.214285714285714,
"grad_norm": 0.023192718625068665,
"learning_rate": 5e-05,
"loss": 0.0075,
"step": 1770
},
{
"epoch": 4.238095238095238,
"grad_norm": 0.02218669466674328,
"learning_rate": 5e-05,
"loss": 0.007,
"step": 1780
},
{
"epoch": 4.261904761904762,
"grad_norm": 0.024710968136787415,
"learning_rate": 5e-05,
"loss": 0.0071,
"step": 1790
},
{
"epoch": 4.285714285714286,
"grad_norm": 0.022398030385375023,
"learning_rate": 5e-05,
"loss": 0.0057,
"step": 1800
},
{
"epoch": 4.309523809523809,
"grad_norm": 0.025463633239269257,
"learning_rate": 5e-05,
"loss": 0.0066,
"step": 1810
},
{
"epoch": 4.333333333333333,
"grad_norm": 0.024359669536352158,
"learning_rate": 5e-05,
"loss": 0.0072,
"step": 1820
},
{
"epoch": 4.357142857142857,
"grad_norm": 0.015460989437997341,
"learning_rate": 5e-05,
"loss": 0.0063,
"step": 1830
},
{
"epoch": 4.380952380952381,
"grad_norm": 0.028968673199415207,
"learning_rate": 5e-05,
"loss": 0.0075,
"step": 1840
},
{
"epoch": 4.404761904761905,
"grad_norm": 0.021456323564052582,
"learning_rate": 5e-05,
"loss": 0.0074,
"step": 1850
},
{
"epoch": 4.428571428571429,
"grad_norm": 0.016484368592500687,
"learning_rate": 5e-05,
"loss": 0.0069,
"step": 1860
},
{
"epoch": 4.4523809523809526,
"grad_norm": 0.03759278357028961,
"learning_rate": 5e-05,
"loss": 0.0073,
"step": 1870
},
{
"epoch": 4.476190476190476,
"grad_norm": 0.02179703302681446,
"learning_rate": 5e-05,
"loss": 0.007,
"step": 1880
},
{
"epoch": 4.5,
"grad_norm": 0.023886868730187416,
"learning_rate": 5e-05,
"loss": 0.0068,
"step": 1890
},
{
"epoch": 4.523809523809524,
"grad_norm": 0.020535631105303764,
"learning_rate": 5e-05,
"loss": 0.0063,
"step": 1900
},
{
"epoch": 4.5476190476190474,
"grad_norm": 0.024215754121541977,
"learning_rate": 5e-05,
"loss": 0.0065,
"step": 1910
},
{
"epoch": 4.571428571428571,
"grad_norm": 0.02726290002465248,
"learning_rate": 5e-05,
"loss": 0.0068,
"step": 1920
},
{
"epoch": 4.595238095238095,
"grad_norm": 0.022762540727853775,
"learning_rate": 5e-05,
"loss": 0.0066,
"step": 1930
},
{
"epoch": 4.619047619047619,
"grad_norm": 0.03385463356971741,
"learning_rate": 5e-05,
"loss": 0.0066,
"step": 1940
},
{
"epoch": 4.642857142857143,
"grad_norm": 0.029403358697891235,
"learning_rate": 5e-05,
"loss": 0.0065,
"step": 1950
},
{
"epoch": 4.666666666666667,
"grad_norm": 0.03025682643055916,
"learning_rate": 5e-05,
"loss": 0.0078,
"step": 1960
},
{
"epoch": 4.690476190476191,
"grad_norm": 0.012874845415353775,
"learning_rate": 5e-05,
"loss": 0.0063,
"step": 1970
},
{
"epoch": 4.714285714285714,
"grad_norm": 0.021130386739969254,
"learning_rate": 5e-05,
"loss": 0.007,
"step": 1980
},
{
"epoch": 4.738095238095238,
"grad_norm": 0.02132946252822876,
"learning_rate": 5e-05,
"loss": 0.0063,
"step": 1990
},
{
"epoch": 4.761904761904762,
"grad_norm": 0.03074180707335472,
"learning_rate": 5e-05,
"loss": 0.0061,
"step": 2000
},
{
"epoch": 4.785714285714286,
"grad_norm": 0.02292218618094921,
"learning_rate": 5e-05,
"loss": 0.0063,
"step": 2010
},
{
"epoch": 4.809523809523809,
"grad_norm": 0.01976919360458851,
"learning_rate": 5e-05,
"loss": 0.0061,
"step": 2020
},
{
"epoch": 4.833333333333333,
"grad_norm": 0.030889755114912987,
"learning_rate": 5e-05,
"loss": 0.0064,
"step": 2030
},
{
"epoch": 4.857142857142857,
"grad_norm": 0.02626785635948181,
"learning_rate": 5e-05,
"loss": 0.0071,
"step": 2040
},
{
"epoch": 4.880952380952381,
"grad_norm": 0.016065414994955063,
"learning_rate": 5e-05,
"loss": 0.0067,
"step": 2050
},
{
"epoch": 4.904761904761905,
"grad_norm": 0.022564269602298737,
"learning_rate": 5e-05,
"loss": 0.0057,
"step": 2060
},
{
"epoch": 4.928571428571429,
"grad_norm": 0.03417288884520531,
"learning_rate": 5e-05,
"loss": 0.0063,
"step": 2070
},
{
"epoch": 4.9523809523809526,
"grad_norm": 0.02042423002421856,
"learning_rate": 5e-05,
"loss": 0.0067,
"step": 2080
},
{
"epoch": 4.976190476190476,
"grad_norm": 0.038521867245435715,
"learning_rate": 5e-05,
"loss": 0.0064,
"step": 2090
},
{
"epoch": 5.0,
"grad_norm": 0.026785830035805702,
"learning_rate": 5e-05,
"loss": 0.0061,
"step": 2100
},
{
"epoch": 5.023809523809524,
"grad_norm": 0.01977149210870266,
"learning_rate": 5e-05,
"loss": 0.0067,
"step": 2110
},
{
"epoch": 5.0476190476190474,
"grad_norm": 0.028995908796787262,
"learning_rate": 5e-05,
"loss": 0.0065,
"step": 2120
},
{
"epoch": 5.071428571428571,
"grad_norm": 0.027958383783698082,
"learning_rate": 5e-05,
"loss": 0.0067,
"step": 2130
},
{
"epoch": 5.095238095238095,
"grad_norm": 0.024838177487254143,
"learning_rate": 5e-05,
"loss": 0.0062,
"step": 2140
},
{
"epoch": 5.119047619047619,
"grad_norm": 0.019940726459026337,
"learning_rate": 5e-05,
"loss": 0.0056,
"step": 2150
},
{
"epoch": 5.142857142857143,
"grad_norm": 0.023204155266284943,
"learning_rate": 5e-05,
"loss": 0.0055,
"step": 2160
},
{
"epoch": 5.166666666666667,
"grad_norm": 0.02349969372153282,
"learning_rate": 5e-05,
"loss": 0.006,
"step": 2170
},
{
"epoch": 5.190476190476191,
"grad_norm": 0.030205531045794487,
"learning_rate": 5e-05,
"loss": 0.0065,
"step": 2180
},
{
"epoch": 5.214285714285714,
"grad_norm": 0.0321480929851532,
"learning_rate": 5e-05,
"loss": 0.0074,
"step": 2190
},
{
"epoch": 5.238095238095238,
"grad_norm": 0.020824981853365898,
"learning_rate": 5e-05,
"loss": 0.006,
"step": 2200
},
{
"epoch": 5.261904761904762,
"grad_norm": 0.0227160956710577,
"learning_rate": 5e-05,
"loss": 0.0063,
"step": 2210
},
{
"epoch": 5.285714285714286,
"grad_norm": 0.029231227934360504,
"learning_rate": 5e-05,
"loss": 0.005,
"step": 2220
},
{
"epoch": 5.309523809523809,
"grad_norm": 0.02282622829079628,
"learning_rate": 5e-05,
"loss": 0.0045,
"step": 2230
},
{
"epoch": 5.333333333333333,
"grad_norm": 0.03982138633728027,
"learning_rate": 5e-05,
"loss": 0.0061,
"step": 2240
},
{
"epoch": 5.357142857142857,
"grad_norm": 0.018466891720891,
"learning_rate": 5e-05,
"loss": 0.0048,
"step": 2250
},
{
"epoch": 5.380952380952381,
"grad_norm": 0.024992188438773155,
"learning_rate": 5e-05,
"loss": 0.0063,
"step": 2260
},
{
"epoch": 5.404761904761905,
"grad_norm": 0.029194045811891556,
"learning_rate": 5e-05,
"loss": 0.0062,
"step": 2270
},
{
"epoch": 5.428571428571429,
"grad_norm": 0.027634933590888977,
"learning_rate": 5e-05,
"loss": 0.0056,
"step": 2280
},
{
"epoch": 5.4523809523809526,
"grad_norm": 0.03338102251291275,
"learning_rate": 5e-05,
"loss": 0.0061,
"step": 2290
},
{
"epoch": 5.476190476190476,
"grad_norm": 0.020240414887666702,
"learning_rate": 5e-05,
"loss": 0.0053,
"step": 2300
},
{
"epoch": 5.5,
"grad_norm": 0.01747560314834118,
"learning_rate": 5e-05,
"loss": 0.0053,
"step": 2310
},
{
"epoch": 5.523809523809524,
"grad_norm": 0.02589523047208786,
"learning_rate": 5e-05,
"loss": 0.0052,
"step": 2320
},
{
"epoch": 5.5476190476190474,
"grad_norm": 0.03360776975750923,
"learning_rate": 5e-05,
"loss": 0.0051,
"step": 2330
},
{
"epoch": 5.571428571428571,
"grad_norm": 0.0385347418487072,
"learning_rate": 5e-05,
"loss": 0.0053,
"step": 2340
},
{
"epoch": 5.595238095238095,
"grad_norm": 0.02222476154565811,
"learning_rate": 5e-05,
"loss": 0.0051,
"step": 2350
},
{
"epoch": 5.619047619047619,
"grad_norm": 0.021084798499941826,
"learning_rate": 5e-05,
"loss": 0.0058,
"step": 2360
},
{
"epoch": 5.642857142857143,
"grad_norm": 0.02974940650165081,
"learning_rate": 5e-05,
"loss": 0.0052,
"step": 2370
},
{
"epoch": 5.666666666666667,
"grad_norm": 0.01862935908138752,
"learning_rate": 5e-05,
"loss": 0.0061,
"step": 2380
},
{
"epoch": 5.690476190476191,
"grad_norm": 0.01248252298682928,
"learning_rate": 5e-05,
"loss": 0.0051,
"step": 2390
},
{
"epoch": 5.714285714285714,
"grad_norm": 0.023803608492016792,
"learning_rate": 5e-05,
"loss": 0.0054,
"step": 2400
},
{
"epoch": 5.738095238095238,
"grad_norm": 0.01642240583896637,
"learning_rate": 5e-05,
"loss": 0.0051,
"step": 2410
},
{
"epoch": 5.761904761904762,
"grad_norm": 0.027454275637865067,
"learning_rate": 5e-05,
"loss": 0.0048,
"step": 2420
},
{
"epoch": 5.785714285714286,
"grad_norm": 0.03252597153186798,
"learning_rate": 5e-05,
"loss": 0.0051,
"step": 2430
},
{
"epoch": 5.809523809523809,
"grad_norm": 0.031316157430410385,
"learning_rate": 5e-05,
"loss": 0.0045,
"step": 2440
},
{
"epoch": 5.833333333333333,
"grad_norm": 0.022797273471951485,
"learning_rate": 5e-05,
"loss": 0.0057,
"step": 2450
},
{
"epoch": 5.857142857142857,
"grad_norm": 0.03269220516085625,
"learning_rate": 5e-05,
"loss": 0.0061,
"step": 2460
},
{
"epoch": 5.880952380952381,
"grad_norm": 0.02592204324901104,
"learning_rate": 5e-05,
"loss": 0.0063,
"step": 2470
},
{
"epoch": 5.904761904761905,
"grad_norm": 0.020954346284270287,
"learning_rate": 5e-05,
"loss": 0.005,
"step": 2480
},
{
"epoch": 5.928571428571429,
"grad_norm": 0.03389604389667511,
"learning_rate": 5e-05,
"loss": 0.0044,
"step": 2490
},
{
"epoch": 5.9523809523809526,
"grad_norm": 0.022789567708969116,
"learning_rate": 5e-05,
"loss": 0.005,
"step": 2500
},
{
"epoch": 5.976190476190476,
"grad_norm": 0.021143564954400063,
"learning_rate": 5e-05,
"loss": 0.0052,
"step": 2510
},
{
"epoch": 6.0,
"grad_norm": 0.01593031920492649,
"learning_rate": 5e-05,
"loss": 0.0048,
"step": 2520
},
{
"epoch": 6.023809523809524,
"grad_norm": 0.024208232760429382,
"learning_rate": 5e-05,
"loss": 0.0054,
"step": 2530
},
{
"epoch": 6.0476190476190474,
"grad_norm": 0.020461222156882286,
"learning_rate": 5e-05,
"loss": 0.0052,
"step": 2540
},
{
"epoch": 6.071428571428571,
"grad_norm": 0.027278559282422066,
"learning_rate": 5e-05,
"loss": 0.0057,
"step": 2550
},
{
"epoch": 6.095238095238095,
"grad_norm": 0.02183406613767147,
"learning_rate": 5e-05,
"loss": 0.005,
"step": 2560
},
{
"epoch": 6.119047619047619,
"grad_norm": 0.024204745888710022,
"learning_rate": 5e-05,
"loss": 0.0047,
"step": 2570
},
{
"epoch": 6.142857142857143,
"grad_norm": 0.020112114027142525,
"learning_rate": 5e-05,
"loss": 0.0042,
"step": 2580
},
{
"epoch": 6.166666666666667,
"grad_norm": 0.03133062273263931,
"learning_rate": 5e-05,
"loss": 0.0043,
"step": 2590
},
{
"epoch": 6.190476190476191,
"grad_norm": 0.016416160389780998,
"learning_rate": 5e-05,
"loss": 0.0051,
"step": 2600
},
{
"epoch": 6.214285714285714,
"grad_norm": 0.02232821099460125,
"learning_rate": 5e-05,
"loss": 0.0049,
"step": 2610
},
{
"epoch": 6.238095238095238,
"grad_norm": 0.023397671058773994,
"learning_rate": 5e-05,
"loss": 0.0044,
"step": 2620
},
{
"epoch": 6.261904761904762,
"grad_norm": 0.021031634882092476,
"learning_rate": 5e-05,
"loss": 0.0048,
"step": 2630
},
{
"epoch": 6.285714285714286,
"grad_norm": 0.05748249217867851,
"learning_rate": 5e-05,
"loss": 0.0043,
"step": 2640
},
{
"epoch": 6.309523809523809,
"grad_norm": 0.030488435178995132,
"learning_rate": 5e-05,
"loss": 0.0039,
"step": 2650
},
{
"epoch": 6.333333333333333,
"grad_norm": 0.019017908722162247,
"learning_rate": 5e-05,
"loss": 0.0041,
"step": 2660
},
{
"epoch": 6.357142857142857,
"grad_norm": 0.021291224285960197,
"learning_rate": 5e-05,
"loss": 0.0046,
"step": 2670
},
{
"epoch": 6.380952380952381,
"grad_norm": 0.02494419366121292,
"learning_rate": 5e-05,
"loss": 0.0047,
"step": 2680
},
{
"epoch": 6.404761904761905,
"grad_norm": 0.02740258164703846,
"learning_rate": 5e-05,
"loss": 0.0051,
"step": 2690
},
{
"epoch": 6.428571428571429,
"grad_norm": 0.02902950905263424,
"learning_rate": 5e-05,
"loss": 0.0046,
"step": 2700
},
{
"epoch": 6.4523809523809526,
"grad_norm": 0.020637964829802513,
"learning_rate": 5e-05,
"loss": 0.0044,
"step": 2710
},
{
"epoch": 6.476190476190476,
"grad_norm": 0.020837081596255302,
"learning_rate": 5e-05,
"loss": 0.0038,
"step": 2720
},
{
"epoch": 6.5,
"grad_norm": 0.020413970574736595,
"learning_rate": 5e-05,
"loss": 0.0035,
"step": 2730
},
{
"epoch": 6.523809523809524,
"grad_norm": 0.01466563530266285,
"learning_rate": 5e-05,
"loss": 0.004,
"step": 2740
},
{
"epoch": 6.5476190476190474,
"grad_norm": 0.02238455042243004,
"learning_rate": 5e-05,
"loss": 0.0035,
"step": 2750
},
{
"epoch": 6.571428571428571,
"grad_norm": 0.027074338868260384,
"learning_rate": 5e-05,
"loss": 0.0038,
"step": 2760
},
{
"epoch": 6.595238095238095,
"grad_norm": 0.03330984711647034,
"learning_rate": 5e-05,
"loss": 0.0042,
"step": 2770
},
{
"epoch": 6.619047619047619,
"grad_norm": 0.025337036699056625,
"learning_rate": 5e-05,
"loss": 0.0044,
"step": 2780
},
{
"epoch": 6.642857142857143,
"grad_norm": 0.034126244485378265,
"learning_rate": 5e-05,
"loss": 0.0047,
"step": 2790
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.02013178914785385,
"learning_rate": 5e-05,
"loss": 0.0053,
"step": 2800
},
{
"epoch": 6.690476190476191,
"grad_norm": 0.012780356220901012,
"learning_rate": 5e-05,
"loss": 0.0041,
"step": 2810
},
{
"epoch": 6.714285714285714,
"grad_norm": 0.01535839308053255,
"learning_rate": 5e-05,
"loss": 0.0042,
"step": 2820
},
{
"epoch": 6.738095238095238,
"grad_norm": 0.018024824559688568,
"learning_rate": 5e-05,
"loss": 0.0039,
"step": 2830
},
{
"epoch": 6.761904761904762,
"grad_norm": 0.02146134339272976,
"learning_rate": 5e-05,
"loss": 0.0041,
"step": 2840
},
{
"epoch": 6.785714285714286,
"grad_norm": 0.015317169018089771,
"learning_rate": 5e-05,
"loss": 0.0034,
"step": 2850
},
{
"epoch": 6.809523809523809,
"grad_norm": 0.029330266639590263,
"learning_rate": 5e-05,
"loss": 0.0037,
"step": 2860
},
{
"epoch": 6.833333333333333,
"grad_norm": 0.032067060470581055,
"learning_rate": 5e-05,
"loss": 0.0044,
"step": 2870
},
{
"epoch": 6.857142857142857,
"grad_norm": 0.016328303143382072,
"learning_rate": 5e-05,
"loss": 0.0045,
"step": 2880
},
{
"epoch": 6.880952380952381,
"grad_norm": 0.021490612998604774,
"learning_rate": 5e-05,
"loss": 0.0044,
"step": 2890
},
{
"epoch": 6.904761904761905,
"grad_norm": 0.014762201346457005,
"learning_rate": 5e-05,
"loss": 0.0037,
"step": 2900
},
{
"epoch": 6.928571428571429,
"grad_norm": 0.02523043379187584,
"learning_rate": 5e-05,
"loss": 0.0035,
"step": 2910
},
{
"epoch": 6.9523809523809526,
"grad_norm": 0.01821116730570793,
"learning_rate": 5e-05,
"loss": 0.0037,
"step": 2920
},
{
"epoch": 6.976190476190476,
"grad_norm": 0.025133155286312103,
"learning_rate": 5e-05,
"loss": 0.0039,
"step": 2930
},
{
"epoch": 7.0,
"grad_norm": 0.01947784051299095,
"learning_rate": 5e-05,
"loss": 0.004,
"step": 2940
},
{
"epoch": 7.023809523809524,
"grad_norm": 0.019346952438354492,
"learning_rate": 5e-05,
"loss": 0.0044,
"step": 2950
},
{
"epoch": 7.0476190476190474,
"grad_norm": 0.02163790725171566,
"learning_rate": 5e-05,
"loss": 0.0038,
"step": 2960
},
{
"epoch": 7.071428571428571,
"grad_norm": 0.015155022032558918,
"learning_rate": 5e-05,
"loss": 0.0044,
"step": 2970
},
{
"epoch": 7.095238095238095,
"grad_norm": 0.02857312373816967,
"learning_rate": 5e-05,
"loss": 0.004,
"step": 2980
},
{
"epoch": 7.119047619047619,
"grad_norm": 0.015818042680621147,
"learning_rate": 5e-05,
"loss": 0.0036,
"step": 2990
},
{
"epoch": 7.142857142857143,
"grad_norm": 0.01129070483148098,
"learning_rate": 5e-05,
"loss": 0.0028,
"step": 3000
},
{
"epoch": 7.166666666666667,
"grad_norm": 0.040307771414518356,
"learning_rate": 5e-05,
"loss": 0.0037,
"step": 3010
},
{
"epoch": 7.190476190476191,
"grad_norm": 0.02898922935128212,
"learning_rate": 5e-05,
"loss": 0.0036,
"step": 3020
},
{
"epoch": 7.214285714285714,
"grad_norm": 0.022514350712299347,
"learning_rate": 5e-05,
"loss": 0.0037,
"step": 3030
},
{
"epoch": 7.238095238095238,
"grad_norm": 0.013335715979337692,
"learning_rate": 5e-05,
"loss": 0.0035,
"step": 3040
},
{
"epoch": 7.261904761904762,
"grad_norm": 0.031238842755556107,
"learning_rate": 5e-05,
"loss": 0.0039,
"step": 3050
},
{
"epoch": 7.285714285714286,
"grad_norm": 0.025638382881879807,
"learning_rate": 5e-05,
"loss": 0.0028,
"step": 3060
},
{
"epoch": 7.309523809523809,
"grad_norm": 0.046518560498952866,
"learning_rate": 5e-05,
"loss": 0.003,
"step": 3070
},
{
"epoch": 7.333333333333333,
"grad_norm": 0.024228127673268318,
"learning_rate": 5e-05,
"loss": 0.0038,
"step": 3080
},
{
"epoch": 7.357142857142857,
"grad_norm": 0.02967258170247078,
"learning_rate": 5e-05,
"loss": 0.003,
"step": 3090
},
{
"epoch": 7.380952380952381,
"grad_norm": 0.019939076155424118,
"learning_rate": 5e-05,
"loss": 0.004,
"step": 3100
},
{
"epoch": 7.404761904761905,
"grad_norm": 0.02764073945581913,
"learning_rate": 5e-05,
"loss": 0.0037,
"step": 3110
},
{
"epoch": 7.428571428571429,
"grad_norm": 0.02370736561715603,
"learning_rate": 5e-05,
"loss": 0.0033,
"step": 3120
},
{
"epoch": 7.4523809523809526,
"grad_norm": 0.020530644804239273,
"learning_rate": 5e-05,
"loss": 0.0033,
"step": 3130
},
{
"epoch": 7.476190476190476,
"grad_norm": 0.019800107926130295,
"learning_rate": 5e-05,
"loss": 0.0033,
"step": 3140
},
{
"epoch": 7.5,
"grad_norm": 0.018785228952765465,
"learning_rate": 5e-05,
"loss": 0.0029,
"step": 3150
},
{
"epoch": 7.523809523809524,
"grad_norm": 0.02633030340075493,
"learning_rate": 5e-05,
"loss": 0.0033,
"step": 3160
},
{
"epoch": 7.5476190476190474,
"grad_norm": 0.08342000097036362,
"learning_rate": 5e-05,
"loss": 0.0031,
"step": 3170
},
{
"epoch": 7.571428571428571,
"grad_norm": 0.008880950510501862,
"learning_rate": 5e-05,
"loss": 0.0026,
"step": 3180
},
{
"epoch": 7.595238095238095,
"grad_norm": 0.02733513154089451,
"learning_rate": 5e-05,
"loss": 0.0028,
"step": 3190
},
{
"epoch": 7.619047619047619,
"grad_norm": 0.03516070544719696,
"learning_rate": 5e-05,
"loss": 0.0032,
"step": 3200
},
{
"epoch": 7.642857142857143,
"grad_norm": 0.03665764257311821,
"learning_rate": 5e-05,
"loss": 0.0035,
"step": 3210
},
{
"epoch": 7.666666666666667,
"grad_norm": 0.03748982027173042,
"learning_rate": 5e-05,
"loss": 0.0039,
"step": 3220
},
{
"epoch": 7.690476190476191,
"grad_norm": 0.014199526980519295,
"learning_rate": 5e-05,
"loss": 0.0036,
"step": 3230
},
{
"epoch": 7.714285714285714,
"grad_norm": 0.01877681352198124,
"learning_rate": 5e-05,
"loss": 0.0036,
"step": 3240
},
{
"epoch": 7.738095238095238,
"grad_norm": 0.013927435502409935,
"learning_rate": 5e-05,
"loss": 0.0033,
"step": 3250
},
{
"epoch": 7.761904761904762,
"grad_norm": 0.012296963483095169,
"learning_rate": 5e-05,
"loss": 0.003,
"step": 3260
},
{
"epoch": 7.785714285714286,
"grad_norm": 0.022298045456409454,
"learning_rate": 5e-05,
"loss": 0.0028,
"step": 3270
},
{
"epoch": 7.809523809523809,
"grad_norm": 0.04318477213382721,
"learning_rate": 5e-05,
"loss": 0.0023,
"step": 3280
},
{
"epoch": 7.833333333333333,
"grad_norm": 0.03864269703626633,
"learning_rate": 5e-05,
"loss": 0.0033,
"step": 3290
},
{
"epoch": 7.857142857142857,
"grad_norm": 0.02180645242333412,
"learning_rate": 5e-05,
"loss": 0.0038,
"step": 3300
},
{
"epoch": 7.880952380952381,
"grad_norm": 0.03274507075548172,
"learning_rate": 5e-05,
"loss": 0.0039,
"step": 3310
},
{
"epoch": 7.904761904761905,
"grad_norm": 0.015056164003908634,
"learning_rate": 5e-05,
"loss": 0.0028,
"step": 3320
},
{
"epoch": 7.928571428571429,
"grad_norm": 0.024232054129242897,
"learning_rate": 5e-05,
"loss": 0.003,
"step": 3330
},
{
"epoch": 7.9523809523809526,
"grad_norm": 0.01742437854409218,
"learning_rate": 5e-05,
"loss": 0.0031,
"step": 3340
},
{
"epoch": 7.976190476190476,
"grad_norm": 0.024546030908823013,
"learning_rate": 5e-05,
"loss": 0.0031,
"step": 3350
},
{
"epoch": 8.0,
"grad_norm": 0.020465753972530365,
"learning_rate": 5e-05,
"loss": 0.0037,
"step": 3360
},
{
"epoch": 8.023809523809524,
"grad_norm": 0.02927527017891407,
"learning_rate": 5e-05,
"loss": 0.0041,
"step": 3370
},
{
"epoch": 8.047619047619047,
"grad_norm": 0.01250431314110756,
"learning_rate": 5e-05,
"loss": 0.0032,
"step": 3380
},
{
"epoch": 8.071428571428571,
"grad_norm": 0.014103593304753304,
"learning_rate": 5e-05,
"loss": 0.0028,
"step": 3390
},
{
"epoch": 8.095238095238095,
"grad_norm": 0.018405349925160408,
"learning_rate": 5e-05,
"loss": 0.0033,
"step": 3400
},
{
"epoch": 8.119047619047619,
"grad_norm": 0.020473845303058624,
"learning_rate": 5e-05,
"loss": 0.0029,
"step": 3410
},
{
"epoch": 8.142857142857142,
"grad_norm": 0.035039015114307404,
"learning_rate": 5e-05,
"loss": 0.0025,
"step": 3420
},
{
"epoch": 8.166666666666666,
"grad_norm": 0.032077498733997345,
"learning_rate": 5e-05,
"loss": 0.0028,
"step": 3430
},
{
"epoch": 8.19047619047619,
"grad_norm": 0.037181805819272995,
"learning_rate": 5e-05,
"loss": 0.003,
"step": 3440
},
{
"epoch": 8.214285714285714,
"grad_norm": 0.018256695941090584,
"learning_rate": 5e-05,
"loss": 0.0029,
"step": 3450
},
{
"epoch": 8.238095238095237,
"grad_norm": 0.01342601515352726,
"learning_rate": 5e-05,
"loss": 0.0034,
"step": 3460
},
{
"epoch": 8.261904761904763,
"grad_norm": 0.03634987398982048,
"learning_rate": 5e-05,
"loss": 0.0032,
"step": 3470
},
{
"epoch": 8.285714285714286,
"grad_norm": 0.050673890858888626,
"learning_rate": 5e-05,
"loss": 0.0021,
"step": 3480
},
{
"epoch": 8.30952380952381,
"grad_norm": 0.013780270703136921,
"learning_rate": 5e-05,
"loss": 0.0019,
"step": 3490
},
{
"epoch": 8.333333333333334,
"grad_norm": 0.028823478147387505,
"learning_rate": 5e-05,
"loss": 0.0027,
"step": 3500
},
{
"epoch": 8.357142857142858,
"grad_norm": 0.024434154853224754,
"learning_rate": 5e-05,
"loss": 0.0031,
"step": 3510
},
{
"epoch": 8.380952380952381,
"grad_norm": 0.025538841262459755,
"learning_rate": 5e-05,
"loss": 0.0024,
"step": 3520
},
{
"epoch": 8.404761904761905,
"grad_norm": 0.02040352113544941,
"learning_rate": 5e-05,
"loss": 0.0029,
"step": 3530
},
{
"epoch": 8.428571428571429,
"grad_norm": 0.031944241374731064,
"learning_rate": 5e-05,
"loss": 0.002,
"step": 3540
},
{
"epoch": 8.452380952380953,
"grad_norm": 0.017712410539388657,
"learning_rate": 5e-05,
"loss": 0.0025,
"step": 3550
},
{
"epoch": 8.476190476190476,
"grad_norm": 0.030166227370500565,
"learning_rate": 5e-05,
"loss": 0.0023,
"step": 3560
},
{
"epoch": 8.5,
"grad_norm": 0.027017861604690552,
"learning_rate": 5e-05,
"loss": 0.0027,
"step": 3570
},
{
"epoch": 8.523809523809524,
"grad_norm": 0.025174567475914955,
"learning_rate": 5e-05,
"loss": 0.0028,
"step": 3580
},
{
"epoch": 8.547619047619047,
"grad_norm": 0.02370358444750309,
"learning_rate": 5e-05,
"loss": 0.0029,
"step": 3590
},
{
"epoch": 8.571428571428571,
"grad_norm": 0.019355561584234238,
"learning_rate": 5e-05,
"loss": 0.0029,
"step": 3600
},
{
"epoch": 8.595238095238095,
"grad_norm": 0.07427547127008438,
"learning_rate": 5e-05,
"loss": 0.0027,
"step": 3610
},
{
"epoch": 8.619047619047619,
"grad_norm": 0.011759583838284016,
"learning_rate": 5e-05,
"loss": 0.0021,
"step": 3620
},
{
"epoch": 8.642857142857142,
"grad_norm": 0.02567645162343979,
"learning_rate": 5e-05,
"loss": 0.0031,
"step": 3630
},
{
"epoch": 8.666666666666666,
"grad_norm": 0.02148609049618244,
"learning_rate": 5e-05,
"loss": 0.0044,
"step": 3640
},
{
"epoch": 8.69047619047619,
"grad_norm": 0.021046146750450134,
"learning_rate": 5e-05,
"loss": 0.0036,
"step": 3650
},
{
"epoch": 8.714285714285714,
"grad_norm": 0.022530779242515564,
"learning_rate": 5e-05,
"loss": 0.0041,
"step": 3660
},
{
"epoch": 8.738095238095237,
"grad_norm": 0.008970026858150959,
"learning_rate": 5e-05,
"loss": 0.0034,
"step": 3670
},
{
"epoch": 8.761904761904763,
"grad_norm": 0.018136192113161087,
"learning_rate": 5e-05,
"loss": 0.0033,
"step": 3680
},
{
"epoch": 8.785714285714286,
"grad_norm": 0.015567361377179623,
"learning_rate": 5e-05,
"loss": 0.0019,
"step": 3690
},
{
"epoch": 8.80952380952381,
"grad_norm": 0.0197913758456707,
"learning_rate": 5e-05,
"loss": 0.0017,
"step": 3700
},
{
"epoch": 8.833333333333334,
"grad_norm": 0.03988071531057358,
"learning_rate": 5e-05,
"loss": 0.0023,
"step": 3710
},
{
"epoch": 8.857142857142858,
"grad_norm": 0.03059978410601616,
"learning_rate": 5e-05,
"loss": 0.0034,
"step": 3720
},
{
"epoch": 8.880952380952381,
"grad_norm": 0.014758502133190632,
"learning_rate": 5e-05,
"loss": 0.0031,
"step": 3730
},
{
"epoch": 8.904761904761905,
"grad_norm": 0.017123445868492126,
"learning_rate": 5e-05,
"loss": 0.0033,
"step": 3740
},
{
"epoch": 8.928571428571429,
"grad_norm": 0.02551579289138317,
"learning_rate": 5e-05,
"loss": 0.0028,
"step": 3750
},
{
"epoch": 8.952380952380953,
"grad_norm": 0.014496712014079094,
"learning_rate": 5e-05,
"loss": 0.0031,
"step": 3760
},
{
"epoch": 8.976190476190476,
"grad_norm": 0.02045934647321701,
"learning_rate": 5e-05,
"loss": 0.0027,
"step": 3770
},
{
"epoch": 9.0,
"grad_norm": 0.02440367080271244,
"learning_rate": 5e-05,
"loss": 0.0035,
"step": 3780
},
{
"epoch": 9.023809523809524,
"grad_norm": 0.01835225522518158,
"learning_rate": 5e-05,
"loss": 0.0036,
"step": 3790
},
{
"epoch": 9.047619047619047,
"grad_norm": 0.007453398313373327,
"learning_rate": 5e-05,
"loss": 0.0027,
"step": 3800
},
{
"epoch": 9.071428571428571,
"grad_norm": 0.02707696706056595,
"learning_rate": 5e-05,
"loss": 0.0027,
"step": 3810
},
{
"epoch": 9.095238095238095,
"grad_norm": 0.020089933648705482,
"learning_rate": 5e-05,
"loss": 0.0022,
"step": 3820
},
{
"epoch": 9.119047619047619,
"grad_norm": 0.02362995594739914,
"learning_rate": 5e-05,
"loss": 0.002,
"step": 3830
},
{
"epoch": 9.142857142857142,
"grad_norm": 0.00633473414927721,
"learning_rate": 5e-05,
"loss": 0.0019,
"step": 3840
},
{
"epoch": 9.166666666666666,
"grad_norm": 0.013043406419456005,
"learning_rate": 5e-05,
"loss": 0.0017,
"step": 3850
},
{
"epoch": 9.19047619047619,
"grad_norm": 0.010314088314771652,
"learning_rate": 5e-05,
"loss": 0.0025,
"step": 3860
},
{
"epoch": 9.214285714285714,
"grad_norm": 0.024023326113820076,
"learning_rate": 5e-05,
"loss": 0.0025,
"step": 3870
},
{
"epoch": 9.238095238095237,
"grad_norm": 0.033713992685079575,
"learning_rate": 5e-05,
"loss": 0.0019,
"step": 3880
},
{
"epoch": 9.261904761904763,
"grad_norm": 0.024672770872712135,
"learning_rate": 5e-05,
"loss": 0.0028,
"step": 3890
},
{
"epoch": 9.285714285714286,
"grad_norm": 0.017616739496588707,
"learning_rate": 5e-05,
"loss": 0.002,
"step": 3900
},
{
"epoch": 9.30952380952381,
"grad_norm": 0.023528527468442917,
"learning_rate": 5e-05,
"loss": 0.0021,
"step": 3910
},
{
"epoch": 9.333333333333334,
"grad_norm": 0.04894119128584862,
"learning_rate": 5e-05,
"loss": 0.0026,
"step": 3920
},
{
"epoch": 9.357142857142858,
"grad_norm": 0.019267156720161438,
"learning_rate": 5e-05,
"loss": 0.0025,
"step": 3930
},
{
"epoch": 9.380952380952381,
"grad_norm": 0.020611826330423355,
"learning_rate": 5e-05,
"loss": 0.0026,
"step": 3940
},
{
"epoch": 9.404761904761905,
"grad_norm": 0.020003391429781914,
"learning_rate": 5e-05,
"loss": 0.002,
"step": 3950
},
{
"epoch": 9.428571428571429,
"grad_norm": 0.017253611236810684,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 3960
},
{
"epoch": 9.452380952380953,
"grad_norm": 0.045779161155223846,
"learning_rate": 5e-05,
"loss": 0.0019,
"step": 3970
},
{
"epoch": 9.476190476190476,
"grad_norm": 0.024241678416728973,
"learning_rate": 5e-05,
"loss": 0.002,
"step": 3980
},
{
"epoch": 9.5,
"grad_norm": 0.017647111788392067,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 3990
},
{
"epoch": 9.523809523809524,
"grad_norm": 0.028641676530241966,
"learning_rate": 5e-05,
"loss": 0.0019,
"step": 4000
},
{
"epoch": 9.547619047619047,
"grad_norm": 0.047451332211494446,
"learning_rate": 5e-05,
"loss": 0.0016,
"step": 4010
},
{
"epoch": 9.571428571428571,
"grad_norm": 0.01619701273739338,
"learning_rate": 5e-05,
"loss": 0.0018,
"step": 4020
},
{
"epoch": 9.595238095238095,
"grad_norm": 0.020237745717167854,
"learning_rate": 5e-05,
"loss": 0.0018,
"step": 4030
},
{
"epoch": 9.619047619047619,
"grad_norm": 0.040860798209905624,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 4040
},
{
"epoch": 9.642857142857142,
"grad_norm": 0.01412968896329403,
"learning_rate": 5e-05,
"loss": 0.0017,
"step": 4050
},
{
"epoch": 9.666666666666666,
"grad_norm": 0.05102560669183731,
"learning_rate": 5e-05,
"loss": 0.0019,
"step": 4060
},
{
"epoch": 9.69047619047619,
"grad_norm": 0.04887241870164871,
"learning_rate": 5e-05,
"loss": 0.0029,
"step": 4070
},
{
"epoch": 9.714285714285714,
"grad_norm": 0.014414262026548386,
"learning_rate": 5e-05,
"loss": 0.0026,
"step": 4080
},
{
"epoch": 9.738095238095237,
"grad_norm": 0.011788238771259785,
"learning_rate": 5e-05,
"loss": 0.0024,
"step": 4090
},
{
"epoch": 9.761904761904763,
"grad_norm": 0.01252928376197815,
"learning_rate": 5e-05,
"loss": 0.0023,
"step": 4100
},
{
"epoch": 9.785714285714286,
"grad_norm": 0.02384638413786888,
"learning_rate": 5e-05,
"loss": 0.0019,
"step": 4110
},
{
"epoch": 9.80952380952381,
"grad_norm": 0.02536243200302124,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 4120
},
{
"epoch": 9.833333333333334,
"grad_norm": 0.015807705000042915,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 4130
},
{
"epoch": 9.857142857142858,
"grad_norm": 0.01973266527056694,
"learning_rate": 5e-05,
"loss": 0.0018,
"step": 4140
},
{
"epoch": 9.880952380952381,
"grad_norm": 0.030758731067180634,
"learning_rate": 5e-05,
"loss": 0.0025,
"step": 4150
},
{
"epoch": 9.904761904761905,
"grad_norm": 0.01584693230688572,
"learning_rate": 5e-05,
"loss": 0.0024,
"step": 4160
},
{
"epoch": 9.928571428571429,
"grad_norm": 0.014015723019838333,
"learning_rate": 5e-05,
"loss": 0.002,
"step": 4170
},
{
"epoch": 9.952380952380953,
"grad_norm": 0.03584891930222511,
"learning_rate": 5e-05,
"loss": 0.0025,
"step": 4180
},
{
"epoch": 9.976190476190476,
"grad_norm": 0.047324661165475845,
"learning_rate": 5e-05,
"loss": 0.0028,
"step": 4190
},
{
"epoch": 10.0,
"grad_norm": 0.06452364474534988,
"learning_rate": 5e-05,
"loss": 0.0023,
"step": 4200
},
{
"epoch": 10.023809523809524,
"grad_norm": 0.044528111815452576,
"learning_rate": 5e-05,
"loss": 0.0025,
"step": 4210
},
{
"epoch": 10.047619047619047,
"grad_norm": 0.02596374787390232,
"learning_rate": 5e-05,
"loss": 0.0023,
"step": 4220
},
{
"epoch": 10.071428571428571,
"grad_norm": 0.01841367408633232,
"learning_rate": 5e-05,
"loss": 0.002,
"step": 4230
},
{
"epoch": 10.095238095238095,
"grad_norm": 0.03294382989406586,
"learning_rate": 5e-05,
"loss": 0.0017,
"step": 4240
},
{
"epoch": 10.119047619047619,
"grad_norm": 0.026590291410684586,
"learning_rate": 5e-05,
"loss": 0.0021,
"step": 4250
},
{
"epoch": 10.142857142857142,
"grad_norm": 0.019535573199391365,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 4260
},
{
"epoch": 10.166666666666666,
"grad_norm": 0.04901567101478577,
"learning_rate": 5e-05,
"loss": 0.0018,
"step": 4270
},
{
"epoch": 10.19047619047619,
"grad_norm": 0.023335624486207962,
"learning_rate": 5e-05,
"loss": 0.002,
"step": 4280
},
{
"epoch": 10.214285714285714,
"grad_norm": 0.02412569336593151,
"learning_rate": 5e-05,
"loss": 0.0021,
"step": 4290
},
{
"epoch": 10.238095238095237,
"grad_norm": 0.015281541272997856,
"learning_rate": 5e-05,
"loss": 0.0018,
"step": 4300
},
{
"epoch": 10.261904761904763,
"grad_norm": 0.037770889699459076,
"learning_rate": 5e-05,
"loss": 0.0017,
"step": 4310
},
{
"epoch": 10.285714285714286,
"grad_norm": 0.02363361231982708,
"learning_rate": 5e-05,
"loss": 0.0017,
"step": 4320
},
{
"epoch": 10.30952380952381,
"grad_norm": 0.019674105569720268,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 4330
},
{
"epoch": 10.333333333333334,
"grad_norm": 0.02023392915725708,
"learning_rate": 5e-05,
"loss": 0.0032,
"step": 4340
},
{
"epoch": 10.357142857142858,
"grad_norm": 0.01189426425844431,
"learning_rate": 5e-05,
"loss": 0.002,
"step": 4350
},
{
"epoch": 10.380952380952381,
"grad_norm": 0.041460879147052765,
"learning_rate": 5e-05,
"loss": 0.0024,
"step": 4360
},
{
"epoch": 10.404761904761905,
"grad_norm": 0.0068189771845936775,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 4370
},
{
"epoch": 10.428571428571429,
"grad_norm": 0.019572466611862183,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 4380
},
{
"epoch": 10.452380952380953,
"grad_norm": 0.01345143560320139,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 4390
},
{
"epoch": 10.476190476190476,
"grad_norm": 0.009295144118368626,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 4400
},
{
"epoch": 10.5,
"grad_norm": 0.02156994678080082,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 4410
},
{
"epoch": 10.523809523809524,
"grad_norm": 0.020932510495185852,
"learning_rate": 5e-05,
"loss": 0.0018,
"step": 4420
},
{
"epoch": 10.547619047619047,
"grad_norm": 0.01585063897073269,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 4430
},
{
"epoch": 10.571428571428571,
"grad_norm": 0.009516189806163311,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 4440
},
{
"epoch": 10.595238095238095,
"grad_norm": 0.026152120903134346,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 4450
},
{
"epoch": 10.619047619047619,
"grad_norm": 0.0138620026409626,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 4460
},
{
"epoch": 10.642857142857142,
"grad_norm": 0.03089461848139763,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 4470
},
{
"epoch": 10.666666666666666,
"grad_norm": 0.03294903039932251,
"learning_rate": 5e-05,
"loss": 0.0019,
"step": 4480
},
{
"epoch": 10.69047619047619,
"grad_norm": 0.00928263645619154,
"learning_rate": 5e-05,
"loss": 0.002,
"step": 4490
},
{
"epoch": 10.714285714285714,
"grad_norm": 0.029545562341809273,
"learning_rate": 5e-05,
"loss": 0.0017,
"step": 4500
},
{
"epoch": 10.738095238095237,
"grad_norm": 0.009242654778063297,
"learning_rate": 5e-05,
"loss": 0.0018,
"step": 4510
},
{
"epoch": 10.761904761904763,
"grad_norm": 0.014272899366915226,
"learning_rate": 5e-05,
"loss": 0.0019,
"step": 4520
},
{
"epoch": 10.785714285714286,
"grad_norm": 0.02279002219438553,
"learning_rate": 5e-05,
"loss": 0.0013,
"step": 4530
},
{
"epoch": 10.80952380952381,
"grad_norm": 0.03252875804901123,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 4540
},
{
"epoch": 10.833333333333334,
"grad_norm": 0.009702946059405804,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 4550
},
{
"epoch": 10.857142857142858,
"grad_norm": 0.03491836413741112,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 4560
},
{
"epoch": 10.880952380952381,
"grad_norm": 0.03947118669748306,
"learning_rate": 5e-05,
"loss": 0.0024,
"step": 4570
},
{
"epoch": 10.904761904761905,
"grad_norm": 0.025778034701943398,
"learning_rate": 5e-05,
"loss": 0.0019,
"step": 4580
},
{
"epoch": 10.928571428571429,
"grad_norm": 0.02018180675804615,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 4590
},
{
"epoch": 10.952380952380953,
"grad_norm": 0.005483316257596016,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 4600
},
{
"epoch": 10.976190476190476,
"grad_norm": 0.017275342717766762,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 4610
},
{
"epoch": 11.0,
"grad_norm": 0.019064828753471375,
"learning_rate": 5e-05,
"loss": 0.0022,
"step": 4620
},
{
"epoch": 11.023809523809524,
"grad_norm": 0.013529101386666298,
"learning_rate": 5e-05,
"loss": 0.0018,
"step": 4630
},
{
"epoch": 11.047619047619047,
"grad_norm": 0.01693836972117424,
"learning_rate": 5e-05,
"loss": 0.0022,
"step": 4640
},
{
"epoch": 11.071428571428571,
"grad_norm": 0.02852066047489643,
"learning_rate": 5e-05,
"loss": 0.0018,
"step": 4650
},
{
"epoch": 11.095238095238095,
"grad_norm": 0.018933508545160294,
"learning_rate": 5e-05,
"loss": 0.0018,
"step": 4660
},
{
"epoch": 11.119047619047619,
"grad_norm": 0.021359335631132126,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 4670
},
{
"epoch": 11.142857142857142,
"grad_norm": 0.019422901794314384,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 4680
},
{
"epoch": 11.166666666666666,
"grad_norm": 0.013169987127184868,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 4690
},
{
"epoch": 11.19047619047619,
"grad_norm": 0.03702198714017868,
"learning_rate": 5e-05,
"loss": 0.0018,
"step": 4700
},
{
"epoch": 11.214285714285714,
"grad_norm": 0.030341317877173424,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 4710
},
{
"epoch": 11.238095238095237,
"grad_norm": 0.020110273733735085,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 4720
},
{
"epoch": 11.261904761904763,
"grad_norm": 0.02008882723748684,
"learning_rate": 5e-05,
"loss": 0.0013,
"step": 4730
},
{
"epoch": 11.285714285714286,
"grad_norm": 0.06150029972195625,
"learning_rate": 5e-05,
"loss": 0.0013,
"step": 4740
},
{
"epoch": 11.30952380952381,
"grad_norm": 0.0267238300293684,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 4750
},
{
"epoch": 11.333333333333334,
"grad_norm": 0.008372998796403408,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 4760
},
{
"epoch": 11.357142857142858,
"grad_norm": 0.03507944568991661,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 4770
},
{
"epoch": 11.380952380952381,
"grad_norm": 0.018310125917196274,
"learning_rate": 5e-05,
"loss": 0.0017,
"step": 4780
},
{
"epoch": 11.404761904761905,
"grad_norm": 0.009987925179302692,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 4790
},
{
"epoch": 11.428571428571429,
"grad_norm": 0.0077149407006800175,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 4800
},
{
"epoch": 11.452380952380953,
"grad_norm": 0.013563682325184345,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 4810
},
{
"epoch": 11.476190476190476,
"grad_norm": 0.026644017547369003,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 4820
},
{
"epoch": 11.5,
"grad_norm": 0.00553143722936511,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 4830
},
{
"epoch": 11.523809523809524,
"grad_norm": 0.014055385254323483,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 4840
},
{
"epoch": 11.547619047619047,
"grad_norm": 0.02219008095562458,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 4850
},
{
"epoch": 11.571428571428571,
"grad_norm": 0.021185655146837234,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 4860
},
{
"epoch": 11.595238095238095,
"grad_norm": 0.012521096505224705,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 4870
},
{
"epoch": 11.619047619047619,
"grad_norm": 0.027117032557725906,
"learning_rate": 5e-05,
"loss": 0.0021,
"step": 4880
},
{
"epoch": 11.642857142857142,
"grad_norm": 0.03251611068844795,
"learning_rate": 5e-05,
"loss": 0.0019,
"step": 4890
},
{
"epoch": 11.666666666666666,
"grad_norm": 0.014405048452317715,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 4900
},
{
"epoch": 11.69047619047619,
"grad_norm": 0.037754759192466736,
"learning_rate": 5e-05,
"loss": 0.0013,
"step": 4910
},
{
"epoch": 11.714285714285714,
"grad_norm": 0.008099643513560295,
"learning_rate": 5e-05,
"loss": 0.0013,
"step": 4920
},
{
"epoch": 11.738095238095237,
"grad_norm": 0.01963508315384388,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 4930
},
{
"epoch": 11.761904761904763,
"grad_norm": 0.017585638910531998,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 4940
},
{
"epoch": 11.785714285714286,
"grad_norm": 0.030090732499957085,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 4950
},
{
"epoch": 11.80952380952381,
"grad_norm": 0.018853867426514626,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 4960
},
{
"epoch": 11.833333333333334,
"grad_norm": 0.009620056487619877,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 4970
},
{
"epoch": 11.857142857142858,
"grad_norm": 0.05379125103354454,
"learning_rate": 5e-05,
"loss": 0.0016,
"step": 4980
},
{
"epoch": 11.880952380952381,
"grad_norm": 0.022303573787212372,
"learning_rate": 5e-05,
"loss": 0.0019,
"step": 4990
},
{
"epoch": 11.904761904761905,
"grad_norm": 0.008755918592214584,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 5000
},
{
"epoch": 11.928571428571429,
"grad_norm": 0.009491810575127602,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 5010
},
{
"epoch": 11.952380952380953,
"grad_norm": 0.02040296606719494,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 5020
},
{
"epoch": 11.976190476190476,
"grad_norm": 0.012556660920381546,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 5030
},
{
"epoch": 12.0,
"grad_norm": 0.008164563216269016,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5040
},
{
"epoch": 12.023809523809524,
"grad_norm": 0.02336413785815239,
"learning_rate": 5e-05,
"loss": 0.0013,
"step": 5050
},
{
"epoch": 12.047619047619047,
"grad_norm": 0.0061961328610777855,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 5060
},
{
"epoch": 12.071428571428571,
"grad_norm": 0.012313502840697765,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 5070
},
{
"epoch": 12.095238095238095,
"grad_norm": 0.010624333284795284,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 5080
},
{
"epoch": 12.119047619047619,
"grad_norm": 0.01863931119441986,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 5090
},
{
"epoch": 12.142857142857142,
"grad_norm": 0.01563103497028351,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 5100
},
{
"epoch": 12.166666666666666,
"grad_norm": 0.014355426654219627,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 5110
},
{
"epoch": 12.19047619047619,
"grad_norm": 0.011460881680250168,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 5120
},
{
"epoch": 12.214285714285714,
"grad_norm": 0.01539954636245966,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 5130
},
{
"epoch": 12.238095238095237,
"grad_norm": 0.022988399490714073,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 5140
},
{
"epoch": 12.261904761904763,
"grad_norm": 0.04906224086880684,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 5150
},
{
"epoch": 12.285714285714286,
"grad_norm": 0.020332837477326393,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5160
},
{
"epoch": 12.30952380952381,
"grad_norm": 0.008848396129906178,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 5170
},
{
"epoch": 12.333333333333334,
"grad_norm": 0.016721338033676147,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 5180
},
{
"epoch": 12.357142857142858,
"grad_norm": 0.024787932634353638,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 5190
},
{
"epoch": 12.380952380952381,
"grad_norm": 0.016609011217951775,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5200
},
{
"epoch": 12.404761904761905,
"grad_norm": 0.02383277751505375,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 5210
},
{
"epoch": 12.428571428571429,
"grad_norm": 0.009788766503334045,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 5220
},
{
"epoch": 12.452380952380953,
"grad_norm": 0.008273842744529247,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 5230
},
{
"epoch": 12.476190476190476,
"grad_norm": 0.0200930405408144,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 5240
},
{
"epoch": 12.5,
"grad_norm": 0.004213349893689156,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 5250
},
{
"epoch": 12.523809523809524,
"grad_norm": 0.010746212676167488,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5260
},
{
"epoch": 12.547619047619047,
"grad_norm": 0.009187485091388226,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 5270
},
{
"epoch": 12.571428571428571,
"grad_norm": 0.011904279701411724,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 5280
},
{
"epoch": 12.595238095238095,
"grad_norm": 0.011605814099311829,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 5290
},
{
"epoch": 12.619047619047619,
"grad_norm": 0.0053769913502037525,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 5300
},
{
"epoch": 12.642857142857142,
"grad_norm": 0.028224363923072815,
"learning_rate": 5e-05,
"loss": 0.0015,
"step": 5310
},
{
"epoch": 12.666666666666666,
"grad_norm": 0.009584328159689903,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 5320
},
{
"epoch": 12.69047619047619,
"grad_norm": 0.012837843038141727,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5330
},
{
"epoch": 12.714285714285714,
"grad_norm": 0.013618317432701588,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 5340
},
{
"epoch": 12.738095238095237,
"grad_norm": 0.010568618774414062,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 5350
},
{
"epoch": 12.761904761904763,
"grad_norm": 0.015289129689335823,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 5360
},
{
"epoch": 12.785714285714286,
"grad_norm": 0.015249863266944885,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 5370
},
{
"epoch": 12.80952380952381,
"grad_norm": 0.012764991261065006,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5380
},
{
"epoch": 12.833333333333334,
"grad_norm": 0.018289143219590187,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 5390
},
{
"epoch": 12.857142857142858,
"grad_norm": 0.006316890940070152,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 5400
},
{
"epoch": 12.880952380952381,
"grad_norm": 0.017579294741153717,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5410
},
{
"epoch": 12.904761904761905,
"grad_norm": 0.01720363274216652,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 5420
},
{
"epoch": 12.928571428571429,
"grad_norm": 0.012609842233359814,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 5430
},
{
"epoch": 12.952380952380953,
"grad_norm": 0.006522722542285919,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 5440
},
{
"epoch": 12.976190476190476,
"grad_norm": 0.015605674125254154,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 5450
},
{
"epoch": 13.0,
"grad_norm": 0.009054956026375294,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 5460
},
{
"epoch": 13.023809523809524,
"grad_norm": 0.016910897567868233,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 5470
},
{
"epoch": 13.047619047619047,
"grad_norm": 0.012559573166072369,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5480
},
{
"epoch": 13.071428571428571,
"grad_norm": 0.01342256274074316,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5490
},
{
"epoch": 13.095238095238095,
"grad_norm": 0.008237732574343681,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 5500
},
{
"epoch": 13.119047619047619,
"grad_norm": 0.01400575041770935,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5510
},
{
"epoch": 13.142857142857142,
"grad_norm": 0.019757332280278206,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 5520
},
{
"epoch": 13.166666666666666,
"grad_norm": 0.018210873007774353,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5530
},
{
"epoch": 13.19047619047619,
"grad_norm": 0.0070494236424565315,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 5540
},
{
"epoch": 13.214285714285714,
"grad_norm": 0.0030856772791594267,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 5550
},
{
"epoch": 13.238095238095237,
"grad_norm": 0.0075521948747336864,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 5560
},
{
"epoch": 13.261904761904763,
"grad_norm": 0.01915762387216091,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5570
},
{
"epoch": 13.285714285714286,
"grad_norm": 0.007310226559638977,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5580
},
{
"epoch": 13.30952380952381,
"grad_norm": 0.023431280627846718,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 5590
},
{
"epoch": 13.333333333333334,
"grad_norm": 0.0055434429086744785,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 5600
},
{
"epoch": 13.357142857142858,
"grad_norm": 0.051787495613098145,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 5610
},
{
"epoch": 13.380952380952381,
"grad_norm": 0.010900055058300495,
"learning_rate": 5e-05,
"loss": 0.0017,
"step": 5620
},
{
"epoch": 13.404761904761905,
"grad_norm": 0.01772812381386757,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 5630
},
{
"epoch": 13.428571428571429,
"grad_norm": 0.003741204971447587,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5640
},
{
"epoch": 13.452380952380953,
"grad_norm": 0.009689460508525372,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 5650
},
{
"epoch": 13.476190476190476,
"grad_norm": 0.004325892776250839,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 5660
},
{
"epoch": 13.5,
"grad_norm": 0.01864994689822197,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 5670
},
{
"epoch": 13.523809523809524,
"grad_norm": 0.011471171863377094,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 5680
},
{
"epoch": 13.547619047619047,
"grad_norm": 0.022726034745573997,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 5690
},
{
"epoch": 13.571428571428571,
"grad_norm": 0.0025451872497797012,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 5700
},
{
"epoch": 13.595238095238095,
"grad_norm": 0.0019215985666960478,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 5710
},
{
"epoch": 13.619047619047619,
"grad_norm": 0.03238370642066002,
"learning_rate": 5e-05,
"loss": 0.0012,
"step": 5720
},
{
"epoch": 13.642857142857142,
"grad_norm": 0.042818158864974976,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 5730
},
{
"epoch": 13.666666666666666,
"grad_norm": 0.019736241549253464,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 5740
},
{
"epoch": 13.69047619047619,
"grad_norm": 0.009512594901025295,
"learning_rate": 5e-05,
"loss": 0.0013,
"step": 5750
},
{
"epoch": 13.714285714285714,
"grad_norm": 0.01680351048707962,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 5760
},
{
"epoch": 13.738095238095237,
"grad_norm": 0.02108100987970829,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 5770
},
{
"epoch": 13.761904761904763,
"grad_norm": 0.01012613344937563,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 5780
},
{
"epoch": 13.785714285714286,
"grad_norm": 0.012497921474277973,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 5790
},
{
"epoch": 13.80952380952381,
"grad_norm": 0.006895515602082014,
"learning_rate": 5e-05,
"loss": 0.0013,
"step": 5800
},
{
"epoch": 13.833333333333334,
"grad_norm": 0.026326004415750504,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 5810
},
{
"epoch": 13.857142857142858,
"grad_norm": 0.02858036197721958,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 5820
},
{
"epoch": 13.880952380952381,
"grad_norm": 0.014894322492182255,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 5830
},
{
"epoch": 13.904761904761905,
"grad_norm": 0.006153046153485775,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 5840
},
{
"epoch": 13.928571428571429,
"grad_norm": 0.02046913094818592,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 5850
},
{
"epoch": 13.952380952380953,
"grad_norm": 0.05420629680156708,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5860
},
{
"epoch": 13.976190476190476,
"grad_norm": 0.01419808715581894,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 5870
},
{
"epoch": 14.0,
"grad_norm": 0.00912909209728241,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 5880
},
{
"epoch": 14.023809523809524,
"grad_norm": 0.017824770882725716,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 5890
},
{
"epoch": 14.047619047619047,
"grad_norm": 0.008159978315234184,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5900
},
{
"epoch": 14.071428571428571,
"grad_norm": 0.02015644870698452,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 5910
},
{
"epoch": 14.095238095238095,
"grad_norm": 0.020154140889644623,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5920
},
{
"epoch": 14.119047619047619,
"grad_norm": 0.008028664626181126,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 5930
},
{
"epoch": 14.142857142857142,
"grad_norm": 0.01114033441990614,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 5940
},
{
"epoch": 14.166666666666666,
"grad_norm": 0.009640121832489967,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 5950
},
{
"epoch": 14.19047619047619,
"grad_norm": 0.009514660574495792,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 5960
},
{
"epoch": 14.214285714285714,
"grad_norm": 0.010505247861146927,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 5970
},
{
"epoch": 14.238095238095237,
"grad_norm": 0.0075341821648180485,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 5980
},
{
"epoch": 14.261904761904763,
"grad_norm": 0.007736085448414087,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 5990
},
{
"epoch": 14.285714285714286,
"grad_norm": 0.006500266492366791,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6000
},
{
"epoch": 14.30952380952381,
"grad_norm": 0.003755223471671343,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6010
},
{
"epoch": 14.333333333333334,
"grad_norm": 0.03438568115234375,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 6020
},
{
"epoch": 14.357142857142858,
"grad_norm": 0.003875867696478963,
"learning_rate": 5e-05,
"loss": 0.0002,
"step": 6030
},
{
"epoch": 14.380952380952381,
"grad_norm": 0.041189443320035934,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6040
},
{
"epoch": 14.404761904761905,
"grad_norm": 0.044477108865976334,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 6050
},
{
"epoch": 14.428571428571429,
"grad_norm": 0.021848198026418686,
"learning_rate": 5e-05,
"loss": 0.0013,
"step": 6060
},
{
"epoch": 14.452380952380953,
"grad_norm": 0.009198046289384365,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6070
},
{
"epoch": 14.476190476190476,
"grad_norm": 0.00906557496637106,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6080
},
{
"epoch": 14.5,
"grad_norm": 0.027908943593502045,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 6090
},
{
"epoch": 14.523809523809524,
"grad_norm": 0.010970670729875565,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6100
},
{
"epoch": 14.547619047619047,
"grad_norm": 0.02569267526268959,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6110
},
{
"epoch": 14.571428571428571,
"grad_norm": 0.00839224737137556,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 6120
},
{
"epoch": 14.595238095238095,
"grad_norm": 0.00571605795994401,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6130
},
{
"epoch": 14.619047619047619,
"grad_norm": 0.01305799838155508,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 6140
},
{
"epoch": 14.642857142857142,
"grad_norm": 0.033176738768815994,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 6150
},
{
"epoch": 14.666666666666666,
"grad_norm": 0.08488498628139496,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 6160
},
{
"epoch": 14.69047619047619,
"grad_norm": 0.008058450184762478,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6170
},
{
"epoch": 14.714285714285714,
"grad_norm": 0.028126804158091545,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 6180
},
{
"epoch": 14.738095238095237,
"grad_norm": 0.01536962017416954,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 6190
},
{
"epoch": 14.761904761904763,
"grad_norm": 0.007554737385362387,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 6200
},
{
"epoch": 14.785714285714286,
"grad_norm": 0.009313613176345825,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 6210
},
{
"epoch": 14.80952380952381,
"grad_norm": 0.03818364813923836,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 6220
},
{
"epoch": 14.833333333333334,
"grad_norm": 0.004172751680016518,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6230
},
{
"epoch": 14.857142857142858,
"grad_norm": 0.02367938496172428,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 6240
},
{
"epoch": 14.880952380952381,
"grad_norm": 0.029346376657485962,
"learning_rate": 5e-05,
"loss": 0.0013,
"step": 6250
},
{
"epoch": 14.904761904761905,
"grad_norm": 0.008894797414541245,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6260
},
{
"epoch": 14.928571428571429,
"grad_norm": 0.0210129227489233,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6270
},
{
"epoch": 14.952380952380953,
"grad_norm": 0.006997206248342991,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 6280
},
{
"epoch": 14.976190476190476,
"grad_norm": 0.027477620169520378,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 6290
},
{
"epoch": 15.0,
"grad_norm": 0.01882118359208107,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 6300
},
{
"epoch": 15.023809523809524,
"grad_norm": 0.004850170575082302,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6310
},
{
"epoch": 15.047619047619047,
"grad_norm": 0.03450813889503479,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 6320
},
{
"epoch": 15.071428571428571,
"grad_norm": 0.012980910018086433,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 6330
},
{
"epoch": 15.095238095238095,
"grad_norm": 0.01641819067299366,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6340
},
{
"epoch": 15.119047619047619,
"grad_norm": 0.0036320008803159,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 6350
},
{
"epoch": 15.142857142857142,
"grad_norm": 0.005455750040709972,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 6360
},
{
"epoch": 15.166666666666666,
"grad_norm": 0.007276155520230532,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6370
},
{
"epoch": 15.19047619047619,
"grad_norm": 0.02196861244738102,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 6380
},
{
"epoch": 15.214285714285714,
"grad_norm": 0.004664530977606773,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 6390
},
{
"epoch": 15.238095238095237,
"grad_norm": 0.008527121506631374,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 6400
},
{
"epoch": 15.261904761904763,
"grad_norm": 0.015898073092103004,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6410
},
{
"epoch": 15.285714285714286,
"grad_norm": 0.007485649082809687,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6420
},
{
"epoch": 15.30952380952381,
"grad_norm": 0.011031672358512878,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6430
},
{
"epoch": 15.333333333333334,
"grad_norm": 0.017634835094213486,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6440
},
{
"epoch": 15.357142857142858,
"grad_norm": 0.010040491819381714,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 6450
},
{
"epoch": 15.380952380952381,
"grad_norm": 0.00803917832672596,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6460
},
{
"epoch": 15.404761904761905,
"grad_norm": 0.002537789987400174,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 6470
},
{
"epoch": 15.428571428571429,
"grad_norm": 0.004426058381795883,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 6480
},
{
"epoch": 15.452380952380953,
"grad_norm": 0.004302819259464741,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6490
},
{
"epoch": 15.476190476190476,
"grad_norm": 0.006018012762069702,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6500
},
{
"epoch": 15.5,
"grad_norm": 0.00840548425912857,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6510
},
{
"epoch": 15.523809523809524,
"grad_norm": 0.011578329838812351,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 6520
},
{
"epoch": 15.547619047619047,
"grad_norm": 0.013048551976680756,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 6530
},
{
"epoch": 15.571428571428571,
"grad_norm": 0.004024627152830362,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 6540
},
{
"epoch": 15.595238095238095,
"grad_norm": 0.007286364212632179,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 6550
},
{
"epoch": 15.619047619047619,
"grad_norm": 0.014821533113718033,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 6560
},
{
"epoch": 15.642857142857142,
"grad_norm": 0.002453850582242012,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6570
},
{
"epoch": 15.666666666666666,
"grad_norm": 0.03152019903063774,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 6580
},
{
"epoch": 15.69047619047619,
"grad_norm": 0.046064406633377075,
"learning_rate": 5e-05,
"loss": 0.0014,
"step": 6590
},
{
"epoch": 15.714285714285714,
"grad_norm": 0.019682571291923523,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 6600
},
{
"epoch": 15.738095238095237,
"grad_norm": 0.0024483215529471636,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6610
},
{
"epoch": 15.761904761904763,
"grad_norm": 0.008708986453711987,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6620
},
{
"epoch": 15.785714285714286,
"grad_norm": 0.006394381169229746,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6630
},
{
"epoch": 15.80952380952381,
"grad_norm": 0.03494114428758621,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6640
},
{
"epoch": 15.833333333333334,
"grad_norm": 0.02163984254002571,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6650
},
{
"epoch": 15.857142857142858,
"grad_norm": 0.00985416304320097,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 6660
},
{
"epoch": 15.880952380952381,
"grad_norm": 0.008056841790676117,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 6670
},
{
"epoch": 15.904761904761905,
"grad_norm": 0.004157788585871458,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6680
},
{
"epoch": 15.928571428571429,
"grad_norm": 0.00597544526681304,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 6690
},
{
"epoch": 15.952380952380953,
"grad_norm": 0.023443806916475296,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 6700
},
{
"epoch": 15.976190476190476,
"grad_norm": 0.012778760865330696,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 6710
},
{
"epoch": 16.0,
"grad_norm": 0.015170056372880936,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6720
},
{
"epoch": 16.023809523809526,
"grad_norm": 0.01816493459045887,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 6730
},
{
"epoch": 16.047619047619047,
"grad_norm": 0.00630978075787425,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6740
},
{
"epoch": 16.071428571428573,
"grad_norm": 0.004600324667990208,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 6750
},
{
"epoch": 16.095238095238095,
"grad_norm": 0.03235204890370369,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 6760
},
{
"epoch": 16.11904761904762,
"grad_norm": 0.012307102791965008,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6770
},
{
"epoch": 16.142857142857142,
"grad_norm": 0.0052837226539850235,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6780
},
{
"epoch": 16.166666666666668,
"grad_norm": 0.02251107059419155,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6790
},
{
"epoch": 16.19047619047619,
"grad_norm": 0.011189977638423443,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6800
},
{
"epoch": 16.214285714285715,
"grad_norm": 0.05799144506454468,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 6810
},
{
"epoch": 16.238095238095237,
"grad_norm": 0.023233134299516678,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6820
},
{
"epoch": 16.261904761904763,
"grad_norm": 0.005226644221693277,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 6830
},
{
"epoch": 16.285714285714285,
"grad_norm": 0.022896727547049522,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6840
},
{
"epoch": 16.30952380952381,
"grad_norm": 0.009060421027243137,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 6850
},
{
"epoch": 16.333333333333332,
"grad_norm": 0.019654234871268272,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 6860
},
{
"epoch": 16.357142857142858,
"grad_norm": 0.01879570446908474,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6870
},
{
"epoch": 16.38095238095238,
"grad_norm": 0.015871930867433548,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 6880
},
{
"epoch": 16.404761904761905,
"grad_norm": 0.005459831561893225,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6890
},
{
"epoch": 16.428571428571427,
"grad_norm": 0.002002556109800935,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 6900
},
{
"epoch": 16.452380952380953,
"grad_norm": 0.00361634767614305,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6910
},
{
"epoch": 16.476190476190474,
"grad_norm": 0.008024908602237701,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6920
},
{
"epoch": 16.5,
"grad_norm": 0.0015706607373431325,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 6930
},
{
"epoch": 16.523809523809526,
"grad_norm": 0.015266403555870056,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 6940
},
{
"epoch": 16.547619047619047,
"grad_norm": 0.03144041448831558,
"learning_rate": 5e-05,
"loss": 0.0013,
"step": 6950
},
{
"epoch": 16.571428571428573,
"grad_norm": 0.0020375235471874475,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6960
},
{
"epoch": 16.595238095238095,
"grad_norm": 0.03112536109983921,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 6970
},
{
"epoch": 16.61904761904762,
"grad_norm": 0.005854692310094833,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 6980
},
{
"epoch": 16.642857142857142,
"grad_norm": 0.01317546609789133,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 6990
},
{
"epoch": 16.666666666666668,
"grad_norm": 0.008511519059538841,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 7000
},
{
"epoch": 16.69047619047619,
"grad_norm": 0.006545855663716793,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7010
},
{
"epoch": 16.714285714285715,
"grad_norm": 0.010415365919470787,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7020
},
{
"epoch": 16.738095238095237,
"grad_norm": 0.01626661792397499,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 7030
},
{
"epoch": 16.761904761904763,
"grad_norm": 0.005767187103629112,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7040
},
{
"epoch": 16.785714285714285,
"grad_norm": 0.01930147223174572,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 7050
},
{
"epoch": 16.80952380952381,
"grad_norm": 0.022985486313700676,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 7060
},
{
"epoch": 16.833333333333332,
"grad_norm": 0.015567641705274582,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7070
},
{
"epoch": 16.857142857142858,
"grad_norm": 0.023370876908302307,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7080
},
{
"epoch": 16.88095238095238,
"grad_norm": 0.007470586337149143,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7090
},
{
"epoch": 16.904761904761905,
"grad_norm": 0.006150704808533192,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 7100
},
{
"epoch": 16.928571428571427,
"grad_norm": 0.0042198821902275085,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 7110
},
{
"epoch": 16.952380952380953,
"grad_norm": 0.017077218741178513,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 7120
},
{
"epoch": 16.976190476190474,
"grad_norm": 0.023657051846385002,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7130
},
{
"epoch": 17.0,
"grad_norm": 0.016245121136307716,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7140
},
{
"epoch": 17.023809523809526,
"grad_norm": 0.0030179405584931374,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 7150
},
{
"epoch": 17.047619047619047,
"grad_norm": 0.025457536801695824,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7160
},
{
"epoch": 17.071428571428573,
"grad_norm": 0.03168899938464165,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 7170
},
{
"epoch": 17.095238095238095,
"grad_norm": 0.036503903567790985,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 7180
},
{
"epoch": 17.11904761904762,
"grad_norm": 0.0242981668561697,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 7190
},
{
"epoch": 17.142857142857142,
"grad_norm": 0.014298885129392147,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7200
},
{
"epoch": 17.166666666666668,
"grad_norm": 0.02301887236535549,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 7210
},
{
"epoch": 17.19047619047619,
"grad_norm": 0.03424462303519249,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 7220
},
{
"epoch": 17.214285714285715,
"grad_norm": 0.023424234241247177,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 7230
},
{
"epoch": 17.238095238095237,
"grad_norm": 0.023134572431445122,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 7240
},
{
"epoch": 17.261904761904763,
"grad_norm": 0.025860372930765152,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 7250
},
{
"epoch": 17.285714285714285,
"grad_norm": 0.01248745433986187,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 7260
},
{
"epoch": 17.30952380952381,
"grad_norm": 0.018757270649075508,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7270
},
{
"epoch": 17.333333333333332,
"grad_norm": 0.011798521503806114,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7280
},
{
"epoch": 17.357142857142858,
"grad_norm": 0.020112033933401108,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 7290
},
{
"epoch": 17.38095238095238,
"grad_norm": 0.009371660649776459,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7300
},
{
"epoch": 17.404761904761905,
"grad_norm": 0.010503299534320831,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7310
},
{
"epoch": 17.428571428571427,
"grad_norm": 0.02050066739320755,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7320
},
{
"epoch": 17.452380952380953,
"grad_norm": 0.034579671919345856,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 7330
},
{
"epoch": 17.476190476190474,
"grad_norm": 0.02491345815360546,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7340
},
{
"epoch": 17.5,
"grad_norm": 0.0013771721860393882,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7350
},
{
"epoch": 17.523809523809526,
"grad_norm": 0.012594480067491531,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 7360
},
{
"epoch": 17.547619047619047,
"grad_norm": 0.025901375338435173,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7370
},
{
"epoch": 17.571428571428573,
"grad_norm": 0.004503455013036728,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7380
},
{
"epoch": 17.595238095238095,
"grad_norm": 0.01678163930773735,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7390
},
{
"epoch": 17.61904761904762,
"grad_norm": 0.01972806081175804,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7400
},
{
"epoch": 17.642857142857142,
"grad_norm": 0.005659343674778938,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 7410
},
{
"epoch": 17.666666666666668,
"grad_norm": 0.010966219939291477,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7420
},
{
"epoch": 17.69047619047619,
"grad_norm": 0.01521807536482811,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7430
},
{
"epoch": 17.714285714285715,
"grad_norm": 0.0045644501224160194,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7440
},
{
"epoch": 17.738095238095237,
"grad_norm": 0.0009657703340053558,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 7450
},
{
"epoch": 17.761904761904763,
"grad_norm": 0.03967520594596863,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7460
},
{
"epoch": 17.785714285714285,
"grad_norm": 0.003580782562494278,
"learning_rate": 5e-05,
"loss": 0.0002,
"step": 7470
},
{
"epoch": 17.80952380952381,
"grad_norm": 0.02645096555352211,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7480
},
{
"epoch": 17.833333333333332,
"grad_norm": 0.004722685553133488,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 7490
},
{
"epoch": 17.857142857142858,
"grad_norm": 0.026108304038643837,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7500
},
{
"epoch": 17.88095238095238,
"grad_norm": 0.008777507580816746,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7510
},
{
"epoch": 17.904761904761905,
"grad_norm": 0.011482530273497105,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7520
},
{
"epoch": 17.928571428571427,
"grad_norm": 0.003905653487890959,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 7530
},
{
"epoch": 17.952380952380953,
"grad_norm": 0.020037874579429626,
"learning_rate": 5e-05,
"loss": 0.001,
"step": 7540
},
{
"epoch": 17.976190476190474,
"grad_norm": 0.017041349783539772,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 7550
},
{
"epoch": 18.0,
"grad_norm": 0.0016616099746897817,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7560
},
{
"epoch": 18.023809523809526,
"grad_norm": 0.021135887131094933,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 7570
},
{
"epoch": 18.047619047619047,
"grad_norm": 0.010796349495649338,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7580
},
{
"epoch": 18.071428571428573,
"grad_norm": 0.018032172694802284,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7590
},
{
"epoch": 18.095238095238095,
"grad_norm": 0.028129780665040016,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 7600
},
{
"epoch": 18.11904761904762,
"grad_norm": 0.0026430708821862936,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 7610
},
{
"epoch": 18.142857142857142,
"grad_norm": 0.00862652063369751,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7620
},
{
"epoch": 18.166666666666668,
"grad_norm": 0.0017266141949221492,
"learning_rate": 5e-05,
"loss": 0.0002,
"step": 7630
},
{
"epoch": 18.19047619047619,
"grad_norm": 0.002414742251858115,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7640
},
{
"epoch": 18.214285714285715,
"grad_norm": 0.002050422364845872,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 7650
},
{
"epoch": 18.238095238095237,
"grad_norm": 0.0194852314889431,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7660
},
{
"epoch": 18.261904761904763,
"grad_norm": 0.01717439480125904,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 7670
},
{
"epoch": 18.285714285714285,
"grad_norm": 0.011901612393558025,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 7680
},
{
"epoch": 18.30952380952381,
"grad_norm": 0.029567129909992218,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 7690
},
{
"epoch": 18.333333333333332,
"grad_norm": 0.006910389289259911,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 7700
},
{
"epoch": 18.357142857142858,
"grad_norm": 0.007212989963591099,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7710
},
{
"epoch": 18.38095238095238,
"grad_norm": 0.017439018934965134,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7720
},
{
"epoch": 18.404761904761905,
"grad_norm": 0.0020984190050512552,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 7730
},
{
"epoch": 18.428571428571427,
"grad_norm": 0.011482183821499348,
"learning_rate": 5e-05,
"loss": 0.0009,
"step": 7740
},
{
"epoch": 18.452380952380953,
"grad_norm": 0.006130084861069918,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7750
},
{
"epoch": 18.476190476190474,
"grad_norm": 0.018064623698592186,
"learning_rate": 5e-05,
"loss": 0.0011,
"step": 7760
},
{
"epoch": 18.5,
"grad_norm": 0.007949289865791798,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7770
},
{
"epoch": 18.523809523809526,
"grad_norm": 0.0036398067604750395,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7780
},
{
"epoch": 18.547619047619047,
"grad_norm": 0.021670356392860413,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7790
},
{
"epoch": 18.571428571428573,
"grad_norm": 0.0020873206667602062,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7800
},
{
"epoch": 18.595238095238095,
"grad_norm": 0.007899454794824123,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7810
},
{
"epoch": 18.61904761904762,
"grad_norm": 0.03517717495560646,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7820
},
{
"epoch": 18.642857142857142,
"grad_norm": 0.010151904076337814,
"learning_rate": 5e-05,
"loss": 0.0002,
"step": 7830
},
{
"epoch": 18.666666666666668,
"grad_norm": 0.001768286689184606,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7840
},
{
"epoch": 18.69047619047619,
"grad_norm": 0.005438578315079212,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7850
},
{
"epoch": 18.714285714285715,
"grad_norm": 0.01367899402976036,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7860
},
{
"epoch": 18.738095238095237,
"grad_norm": 0.005496785510331392,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7870
},
{
"epoch": 18.761904761904763,
"grad_norm": 0.010219060815870762,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7880
},
{
"epoch": 18.785714285714285,
"grad_norm": 0.005748805124312639,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 7890
},
{
"epoch": 18.80952380952381,
"grad_norm": 0.0068986667320132256,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7900
},
{
"epoch": 18.833333333333332,
"grad_norm": 0.008341987617313862,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 7910
},
{
"epoch": 18.857142857142858,
"grad_norm": 0.015715043991804123,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7920
},
{
"epoch": 18.88095238095238,
"grad_norm": 0.009235808625817299,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7930
},
{
"epoch": 18.904761904761905,
"grad_norm": 0.004791698418557644,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 7940
},
{
"epoch": 18.928571428571427,
"grad_norm": 0.020587580278515816,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 7950
},
{
"epoch": 18.952380952380953,
"grad_norm": 0.0022148280404508114,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7960
},
{
"epoch": 18.976190476190474,
"grad_norm": 0.0040055266581475735,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7970
},
{
"epoch": 19.0,
"grad_norm": 0.0041480157524347305,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 7980
},
{
"epoch": 19.023809523809526,
"grad_norm": 0.009851422160863876,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 7990
},
{
"epoch": 19.047619047619047,
"grad_norm": 0.029928863048553467,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 8000
},
{
"epoch": 19.071428571428573,
"grad_norm": 0.005271303467452526,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 8010
},
{
"epoch": 19.095238095238095,
"grad_norm": 0.011121473275125027,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 8020
},
{
"epoch": 19.11904761904762,
"grad_norm": 0.002328911330550909,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 8030
},
{
"epoch": 19.142857142857142,
"grad_norm": 0.0005802198429591954,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 8040
},
{
"epoch": 19.166666666666668,
"grad_norm": 0.003331305691972375,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 8050
},
{
"epoch": 19.19047619047619,
"grad_norm": 0.0036068561021238565,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 8060
},
{
"epoch": 19.214285714285715,
"grad_norm": 0.008253911510109901,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 8070
},
{
"epoch": 19.238095238095237,
"grad_norm": 0.023871315643191338,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 8080
},
{
"epoch": 19.261904761904763,
"grad_norm": 0.004133463837206364,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 8090
},
{
"epoch": 19.285714285714285,
"grad_norm": 0.0016440710751339793,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 8100
},
{
"epoch": 19.30952380952381,
"grad_norm": 0.004808527417480946,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 8110
},
{
"epoch": 19.333333333333332,
"grad_norm": 0.03961317241191864,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 8120
},
{
"epoch": 19.357142857142858,
"grad_norm": 0.021009011194109917,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 8130
},
{
"epoch": 19.38095238095238,
"grad_norm": 0.009781712666153908,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 8140
},
{
"epoch": 19.404761904761905,
"grad_norm": 0.0019496449967846274,
"learning_rate": 5e-05,
"loss": 0.0002,
"step": 8150
},
{
"epoch": 19.428571428571427,
"grad_norm": 0.00960745383054018,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 8160
},
{
"epoch": 19.452380952380953,
"grad_norm": 0.008466621860861778,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 8170
},
{
"epoch": 19.476190476190474,
"grad_norm": 0.014924910850822926,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 8180
},
{
"epoch": 19.5,
"grad_norm": 0.006802048999816179,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 8190
},
{
"epoch": 19.523809523809526,
"grad_norm": 0.0138113833963871,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 8200
},
{
"epoch": 19.547619047619047,
"grad_norm": 0.01139379758387804,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 8210
},
{
"epoch": 19.571428571428573,
"grad_norm": 0.03176883980631828,
"learning_rate": 5e-05,
"loss": 0.0008,
"step": 8220
},
{
"epoch": 19.595238095238095,
"grad_norm": 0.007066241931170225,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 8230
},
{
"epoch": 19.61904761904762,
"grad_norm": 0.001747833681292832,
"learning_rate": 5e-05,
"loss": 0.0002,
"step": 8240
},
{
"epoch": 19.642857142857142,
"grad_norm": 0.004946041852235794,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 8250
},
{
"epoch": 19.666666666666668,
"grad_norm": 0.0037244544364511967,
"learning_rate": 5e-05,
"loss": 0.0002,
"step": 8260
},
{
"epoch": 19.69047619047619,
"grad_norm": 0.007341946009546518,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 8270
},
{
"epoch": 19.714285714285715,
"grad_norm": 0.002960850251838565,
"learning_rate": 5e-05,
"loss": 0.0007,
"step": 8280
},
{
"epoch": 19.738095238095237,
"grad_norm": 0.0019498078618198633,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 8290
},
{
"epoch": 19.761904761904763,
"grad_norm": 0.017022594809532166,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 8300
},
{
"epoch": 19.785714285714285,
"grad_norm": 0.001329585094936192,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 8310
},
{
"epoch": 19.80952380952381,
"grad_norm": 0.003296843497082591,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 8320
},
{
"epoch": 19.833333333333332,
"grad_norm": 0.015899814665317535,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 8330
},
{
"epoch": 19.857142857142858,
"grad_norm": 0.010661507025361061,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 8340
},
{
"epoch": 19.88095238095238,
"grad_norm": 0.0018181676277890801,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 8350
},
{
"epoch": 19.904761904761905,
"grad_norm": 0.029869144782423973,
"learning_rate": 5e-05,
"loss": 0.0006,
"step": 8360
},
{
"epoch": 19.928571428571427,
"grad_norm": 0.016890952363610268,
"learning_rate": 5e-05,
"loss": 0.0002,
"step": 8370
},
{
"epoch": 19.952380952380953,
"grad_norm": 0.001414968748576939,
"learning_rate": 5e-05,
"loss": 0.0003,
"step": 8380
},
{
"epoch": 19.976190476190474,
"grad_norm": 0.0023363372310996056,
"learning_rate": 5e-05,
"loss": 0.0004,
"step": 8390
},
{
"epoch": 20.0,
"grad_norm": 0.001184182707220316,
"learning_rate": 5e-05,
"loss": 0.0005,
"step": 8400
}
],
"logging_steps": 10,
"max_steps": 8400,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.2637663864989696e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}