paperstack_document_data_retrieval
/
summarizer
/tmp_multilingual_results
/checkpoint-7500
/trainer_state.json
| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 8.19672131147541, | |
| "eval_steps": 500, | |
| "global_step": 7500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0546448087431694, | |
| "grad_norm": 2.1467645168304443, | |
| "learning_rate": 0.0004973224043715847, | |
| "loss": 5.4078, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1092896174863388, | |
| "grad_norm": 0.8721267580986023, | |
| "learning_rate": 0.0004945901639344262, | |
| "loss": 1.2654, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.16393442622950818, | |
| "grad_norm": 0.8103071451187134, | |
| "learning_rate": 0.0004918579234972678, | |
| "loss": 1.0538, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.2185792349726776, | |
| "grad_norm": 1.3929318189620972, | |
| "learning_rate": 0.0004891256830601093, | |
| "loss": 1.0236, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.273224043715847, | |
| "grad_norm": 0.970960259437561, | |
| "learning_rate": 0.00048639344262295083, | |
| "loss": 1.075, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.32786885245901637, | |
| "grad_norm": 0.9727946519851685, | |
| "learning_rate": 0.00048366120218579234, | |
| "loss": 1.0434, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.3825136612021858, | |
| "grad_norm": 0.6411604285240173, | |
| "learning_rate": 0.0004809289617486339, | |
| "loss": 0.9285, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.4371584699453552, | |
| "grad_norm": 0.8864787817001343, | |
| "learning_rate": 0.0004781967213114754, | |
| "loss": 0.9315, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.4918032786885246, | |
| "grad_norm": 0.8886504173278809, | |
| "learning_rate": 0.0004754644808743169, | |
| "loss": 0.8818, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.546448087431694, | |
| "grad_norm": 0.7289233803749084, | |
| "learning_rate": 0.0004727322404371585, | |
| "loss": 0.92, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6010928961748634, | |
| "grad_norm": 1.0301029682159424, | |
| "learning_rate": 0.00047, | |
| "loss": 0.8222, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.6557377049180327, | |
| "grad_norm": 0.808475136756897, | |
| "learning_rate": 0.00046726775956284155, | |
| "loss": 0.8767, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7103825136612022, | |
| "grad_norm": 0.8232861161231995, | |
| "learning_rate": 0.0004645355191256831, | |
| "loss": 0.835, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.7650273224043715, | |
| "grad_norm": 0.9552908539772034, | |
| "learning_rate": 0.0004618032786885246, | |
| "loss": 0.8203, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.819672131147541, | |
| "grad_norm": 0.7888686656951904, | |
| "learning_rate": 0.00045907103825136613, | |
| "loss": 0.8296, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.8743169398907104, | |
| "grad_norm": 0.7473599910736084, | |
| "learning_rate": 0.0004563387978142077, | |
| "loss": 0.7801, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.9289617486338798, | |
| "grad_norm": 0.8543986678123474, | |
| "learning_rate": 0.0004536065573770492, | |
| "loss": 0.9248, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.9836065573770492, | |
| "grad_norm": 0.7910193204879761, | |
| "learning_rate": 0.0004508743169398907, | |
| "loss": 0.8386, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.6989373564720154, | |
| "eval_runtime": 35.1236, | |
| "eval_samples_per_second": 23.175, | |
| "eval_steps_per_second": 2.904, | |
| "step": 915 | |
| }, | |
| { | |
| "epoch": 1.0382513661202186, | |
| "grad_norm": 1.1451815366744995, | |
| "learning_rate": 0.00044814207650273227, | |
| "loss": 0.7198, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.092896174863388, | |
| "grad_norm": 0.7096536755561829, | |
| "learning_rate": 0.0004454098360655738, | |
| "loss": 0.7374, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.1475409836065573, | |
| "grad_norm": 0.8436596393585205, | |
| "learning_rate": 0.0004426775956284153, | |
| "loss": 0.7471, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.2021857923497268, | |
| "grad_norm": 0.8844044208526611, | |
| "learning_rate": 0.00043994535519125685, | |
| "loss": 0.7072, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.2568306010928962, | |
| "grad_norm": 0.9547187089920044, | |
| "learning_rate": 0.00043721311475409836, | |
| "loss": 0.7618, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.3114754098360657, | |
| "grad_norm": 0.8470160961151123, | |
| "learning_rate": 0.00043448087431693987, | |
| "loss": 0.7719, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.366120218579235, | |
| "grad_norm": 0.8797179460525513, | |
| "learning_rate": 0.00043174863387978143, | |
| "loss": 0.7172, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.4207650273224044, | |
| "grad_norm": 0.6831291317939758, | |
| "learning_rate": 0.00042901639344262294, | |
| "loss": 0.7254, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.4754098360655736, | |
| "grad_norm": 0.7458399534225464, | |
| "learning_rate": 0.00042628415300546445, | |
| "loss": 0.7219, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.530054644808743, | |
| "grad_norm": 0.7829724550247192, | |
| "learning_rate": 0.000423551912568306, | |
| "loss": 0.6969, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.5846994535519126, | |
| "grad_norm": 0.914277970790863, | |
| "learning_rate": 0.0004208196721311475, | |
| "loss": 0.7088, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.639344262295082, | |
| "grad_norm": 0.9189252853393555, | |
| "learning_rate": 0.00041808743169398913, | |
| "loss": 0.7574, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.6939890710382515, | |
| "grad_norm": 0.8233757019042969, | |
| "learning_rate": 0.00041535519125683064, | |
| "loss": 0.7248, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.748633879781421, | |
| "grad_norm": 0.5294966101646423, | |
| "learning_rate": 0.00041262295081967215, | |
| "loss": 0.7173, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.8032786885245902, | |
| "grad_norm": 0.7432788014411926, | |
| "learning_rate": 0.0004098907103825137, | |
| "loss": 0.731, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.8579234972677594, | |
| "grad_norm": 0.9326347708702087, | |
| "learning_rate": 0.0004071584699453552, | |
| "loss": 0.6559, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.9125683060109289, | |
| "grad_norm": 0.6357129216194153, | |
| "learning_rate": 0.00040442622950819673, | |
| "loss": 0.7079, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.9672131147540983, | |
| "grad_norm": 0.6454396843910217, | |
| "learning_rate": 0.0004016939890710383, | |
| "loss": 0.6284, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.6172024607658386, | |
| "eval_runtime": 34.4665, | |
| "eval_samples_per_second": 23.617, | |
| "eval_steps_per_second": 2.959, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 2.021857923497268, | |
| "grad_norm": 0.9404008388519287, | |
| "learning_rate": 0.0003989617486338798, | |
| "loss": 0.6996, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.0765027322404372, | |
| "grad_norm": 0.7585016489028931, | |
| "learning_rate": 0.0003962295081967213, | |
| "loss": 0.6634, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.1311475409836067, | |
| "grad_norm": 0.9319397807121277, | |
| "learning_rate": 0.00039349726775956287, | |
| "loss": 0.6439, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.185792349726776, | |
| "grad_norm": 0.7812663912773132, | |
| "learning_rate": 0.0003907650273224044, | |
| "loss": 0.6807, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.240437158469945, | |
| "grad_norm": 0.8016160726547241, | |
| "learning_rate": 0.0003880327868852459, | |
| "loss": 0.6121, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 2.2950819672131146, | |
| "grad_norm": 0.5846936106681824, | |
| "learning_rate": 0.00038530054644808745, | |
| "loss": 0.6501, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 2.349726775956284, | |
| "grad_norm": 0.6993207931518555, | |
| "learning_rate": 0.00038256830601092896, | |
| "loss": 0.6242, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 2.4043715846994536, | |
| "grad_norm": 0.5866222381591797, | |
| "learning_rate": 0.00037983606557377047, | |
| "loss": 0.5766, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 2.459016393442623, | |
| "grad_norm": 0.8375122547149658, | |
| "learning_rate": 0.00037710382513661203, | |
| "loss": 0.6395, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 2.5136612021857925, | |
| "grad_norm": 0.9567583799362183, | |
| "learning_rate": 0.00037437158469945354, | |
| "loss": 0.6829, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 2.5683060109289615, | |
| "grad_norm": 0.829088032245636, | |
| "learning_rate": 0.00037163934426229505, | |
| "loss": 0.6138, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 2.6229508196721314, | |
| "grad_norm": 0.7738655805587769, | |
| "learning_rate": 0.00036890710382513666, | |
| "loss": 0.5961, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 2.6775956284153004, | |
| "grad_norm": 0.6849051117897034, | |
| "learning_rate": 0.00036617486338797817, | |
| "loss": 0.6158, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 2.73224043715847, | |
| "grad_norm": 0.6353682279586792, | |
| "learning_rate": 0.0003634426229508197, | |
| "loss": 0.6077, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.7868852459016393, | |
| "grad_norm": 0.6507243514060974, | |
| "learning_rate": 0.00036071038251366124, | |
| "loss": 0.5881, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 2.841530054644809, | |
| "grad_norm": 0.7680765390396118, | |
| "learning_rate": 0.00035797814207650275, | |
| "loss": 0.64, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 2.8961748633879782, | |
| "grad_norm": 0.8768549561500549, | |
| "learning_rate": 0.00035524590163934426, | |
| "loss": 0.5946, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 2.9508196721311473, | |
| "grad_norm": 0.9345018267631531, | |
| "learning_rate": 0.0003525136612021858, | |
| "loss": 0.5986, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.5772798657417297, | |
| "eval_runtime": 34.1537, | |
| "eval_samples_per_second": 23.833, | |
| "eval_steps_per_second": 2.986, | |
| "step": 2745 | |
| }, | |
| { | |
| "epoch": 3.0054644808743167, | |
| "grad_norm": 0.8109590411186218, | |
| "learning_rate": 0.00034978142076502733, | |
| "loss": 0.6056, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 3.060109289617486, | |
| "grad_norm": 1.1651564836502075, | |
| "learning_rate": 0.00034704918032786884, | |
| "loss": 0.5321, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 3.1147540983606556, | |
| "grad_norm": 0.9508790969848633, | |
| "learning_rate": 0.0003443169398907104, | |
| "loss": 0.5405, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 3.169398907103825, | |
| "grad_norm": 0.7765358686447144, | |
| "learning_rate": 0.0003415846994535519, | |
| "loss": 0.6053, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 3.2240437158469946, | |
| "grad_norm": 1.0186572074890137, | |
| "learning_rate": 0.0003388524590163934, | |
| "loss": 0.6171, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 3.278688524590164, | |
| "grad_norm": 0.9198819398880005, | |
| "learning_rate": 0.000336120218579235, | |
| "loss": 0.5755, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 3.3333333333333335, | |
| "grad_norm": 1.0580278635025024, | |
| "learning_rate": 0.0003333879781420765, | |
| "loss": 0.5521, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 3.387978142076503, | |
| "grad_norm": 0.637541651725769, | |
| "learning_rate": 0.00033065573770491805, | |
| "loss": 0.5288, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 3.442622950819672, | |
| "grad_norm": 1.3321665525436401, | |
| "learning_rate": 0.00032792349726775956, | |
| "loss": 0.57, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 3.4972677595628414, | |
| "grad_norm": 1.1390025615692139, | |
| "learning_rate": 0.00032519125683060107, | |
| "loss": 0.588, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 3.551912568306011, | |
| "grad_norm": 0.6929836273193359, | |
| "learning_rate": 0.00032245901639344263, | |
| "loss": 0.5508, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 3.6065573770491803, | |
| "grad_norm": 0.6794707179069519, | |
| "learning_rate": 0.00031972677595628414, | |
| "loss": 0.5284, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 3.66120218579235, | |
| "grad_norm": 0.7672021985054016, | |
| "learning_rate": 0.0003169945355191257, | |
| "loss": 0.5968, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 3.7158469945355193, | |
| "grad_norm": 1.0557798147201538, | |
| "learning_rate": 0.00031426229508196726, | |
| "loss": 0.5586, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 3.7704918032786887, | |
| "grad_norm": 0.7945201992988586, | |
| "learning_rate": 0.00031153005464480877, | |
| "loss": 0.5042, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 3.8251366120218577, | |
| "grad_norm": 0.49171268939971924, | |
| "learning_rate": 0.0003087978142076503, | |
| "loss": 0.5695, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 3.879781420765027, | |
| "grad_norm": 1.0232176780700684, | |
| "learning_rate": 0.00030606557377049184, | |
| "loss": 0.5511, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 3.9344262295081966, | |
| "grad_norm": 0.6561440229415894, | |
| "learning_rate": 0.00030333333333333335, | |
| "loss": 0.5256, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 3.989071038251366, | |
| "grad_norm": 0.6772050261497498, | |
| "learning_rate": 0.00030060109289617486, | |
| "loss": 0.5752, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.5453636050224304, | |
| "eval_runtime": 18.8015, | |
| "eval_samples_per_second": 43.294, | |
| "eval_steps_per_second": 5.425, | |
| "step": 3660 | |
| }, | |
| { | |
| "epoch": 4.043715846994536, | |
| "grad_norm": 0.9626933336257935, | |
| "learning_rate": 0.0002978688524590164, | |
| "loss": 0.5659, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 4.098360655737705, | |
| "grad_norm": 0.6350908875465393, | |
| "learning_rate": 0.00029513661202185793, | |
| "loss": 0.4663, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 4.1530054644808745, | |
| "grad_norm": 0.862783670425415, | |
| "learning_rate": 0.00029240437158469944, | |
| "loss": 0.5265, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 4.2076502732240435, | |
| "grad_norm": 0.845670223236084, | |
| "learning_rate": 0.000289672131147541, | |
| "loss": 0.5029, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 4.262295081967213, | |
| "grad_norm": 0.5551162958145142, | |
| "learning_rate": 0.0002869398907103825, | |
| "loss": 0.473, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 4.316939890710382, | |
| "grad_norm": 0.8691427707672119, | |
| "learning_rate": 0.000284207650273224, | |
| "loss": 0.5257, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 4.371584699453552, | |
| "grad_norm": 0.7771849036216736, | |
| "learning_rate": 0.0002814754098360656, | |
| "loss": 0.4816, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 4.426229508196721, | |
| "grad_norm": 0.8042870163917542, | |
| "learning_rate": 0.0002787431693989071, | |
| "loss": 0.5424, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 4.48087431693989, | |
| "grad_norm": 0.9805220365524292, | |
| "learning_rate": 0.0002760109289617486, | |
| "loss": 0.5323, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 4.53551912568306, | |
| "grad_norm": 0.7407246828079224, | |
| "learning_rate": 0.00027327868852459016, | |
| "loss": 0.5581, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 4.590163934426229, | |
| "grad_norm": 0.7939064502716064, | |
| "learning_rate": 0.00027054644808743167, | |
| "loss": 0.4608, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 4.644808743169399, | |
| "grad_norm": 0.8826588988304138, | |
| "learning_rate": 0.00026781420765027323, | |
| "loss": 0.5058, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 4.699453551912568, | |
| "grad_norm": 0.8143342137336731, | |
| "learning_rate": 0.0002650819672131148, | |
| "loss": 0.5476, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 4.754098360655737, | |
| "grad_norm": 0.6828013062477112, | |
| "learning_rate": 0.0002623497267759563, | |
| "loss": 0.5289, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 4.808743169398907, | |
| "grad_norm": 0.8838549256324768, | |
| "learning_rate": 0.0002596174863387978, | |
| "loss": 0.4789, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 4.863387978142076, | |
| "grad_norm": 1.089988350868225, | |
| "learning_rate": 0.0002568852459016394, | |
| "loss": 0.5388, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 4.918032786885246, | |
| "grad_norm": 0.7895328998565674, | |
| "learning_rate": 0.0002541530054644809, | |
| "loss": 0.4863, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 4.972677595628415, | |
| "grad_norm": 0.7219722270965576, | |
| "learning_rate": 0.00025142076502732244, | |
| "loss": 0.4884, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.5269655585289001, | |
| "eval_runtime": 82.4268, | |
| "eval_samples_per_second": 9.875, | |
| "eval_steps_per_second": 1.237, | |
| "step": 4575 | |
| }, | |
| { | |
| "epoch": 5.027322404371585, | |
| "grad_norm": 0.611998975276947, | |
| "learning_rate": 0.00024868852459016395, | |
| "loss": 0.5115, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 5.081967213114754, | |
| "grad_norm": 0.8524764776229858, | |
| "learning_rate": 0.00024595628415300546, | |
| "loss": 0.4502, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 5.136612021857924, | |
| "grad_norm": 1.2595255374908447, | |
| "learning_rate": 0.000243224043715847, | |
| "loss": 0.4866, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 5.191256830601093, | |
| "grad_norm": 0.887760579586029, | |
| "learning_rate": 0.00024049180327868853, | |
| "loss": 0.4813, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 5.245901639344262, | |
| "grad_norm": 0.6199231147766113, | |
| "learning_rate": 0.00023775956284153004, | |
| "loss": 0.476, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 5.300546448087432, | |
| "grad_norm": 0.8007299304008484, | |
| "learning_rate": 0.00023502732240437158, | |
| "loss": 0.4773, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 5.355191256830601, | |
| "grad_norm": 0.5639681816101074, | |
| "learning_rate": 0.0002322950819672131, | |
| "loss": 0.4903, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 5.409836065573771, | |
| "grad_norm": 0.980188250541687, | |
| "learning_rate": 0.00022956284153005467, | |
| "loss": 0.5212, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 5.46448087431694, | |
| "grad_norm": 0.8411896228790283, | |
| "learning_rate": 0.00022683060109289618, | |
| "loss": 0.4216, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 5.51912568306011, | |
| "grad_norm": 0.9759379029273987, | |
| "learning_rate": 0.00022409836065573772, | |
| "loss": 0.47, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 5.573770491803279, | |
| "grad_norm": 0.7654640674591064, | |
| "learning_rate": 0.00022136612021857925, | |
| "loss": 0.4756, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 5.628415300546449, | |
| "grad_norm": 0.6740959882736206, | |
| "learning_rate": 0.00021863387978142076, | |
| "loss": 0.4836, | |
| "step": 5150 | |
| }, | |
| { | |
| "epoch": 5.683060109289618, | |
| "grad_norm": 0.702583909034729, | |
| "learning_rate": 0.0002159016393442623, | |
| "loss": 0.4388, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 5.737704918032787, | |
| "grad_norm": 0.5787151455879211, | |
| "learning_rate": 0.00021316939890710383, | |
| "loss": 0.4364, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 5.7923497267759565, | |
| "grad_norm": 0.8957023620605469, | |
| "learning_rate": 0.00021043715846994534, | |
| "loss": 0.4574, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 5.8469945355191255, | |
| "grad_norm": 0.6572920680046082, | |
| "learning_rate": 0.00020770491803278688, | |
| "loss": 0.5049, | |
| "step": 5350 | |
| }, | |
| { | |
| "epoch": 5.901639344262295, | |
| "grad_norm": 0.7359984517097473, | |
| "learning_rate": 0.00020497267759562844, | |
| "loss": 0.4999, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 5.956284153005464, | |
| "grad_norm": 1.1582518815994263, | |
| "learning_rate": 0.00020224043715846995, | |
| "loss": 0.4899, | |
| "step": 5450 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.512095034122467, | |
| "eval_runtime": 18.8094, | |
| "eval_samples_per_second": 43.276, | |
| "eval_steps_per_second": 5.423, | |
| "step": 5490 | |
| }, | |
| { | |
| "epoch": 6.0109289617486334, | |
| "grad_norm": 0.8027153611183167, | |
| "learning_rate": 0.00019950819672131148, | |
| "loss": 0.4413, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 6.065573770491803, | |
| "grad_norm": 1.0127980709075928, | |
| "learning_rate": 0.00019677595628415302, | |
| "loss": 0.4161, | |
| "step": 5550 | |
| }, | |
| { | |
| "epoch": 6.120218579234972, | |
| "grad_norm": 0.6213232278823853, | |
| "learning_rate": 0.00019404371584699453, | |
| "loss": 0.4174, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 6.174863387978142, | |
| "grad_norm": 0.910073459148407, | |
| "learning_rate": 0.00019131147540983606, | |
| "loss": 0.4577, | |
| "step": 5650 | |
| }, | |
| { | |
| "epoch": 6.229508196721311, | |
| "grad_norm": 0.6484264135360718, | |
| "learning_rate": 0.0001885792349726776, | |
| "loss": 0.4764, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 6.284153005464481, | |
| "grad_norm": 0.6599116325378418, | |
| "learning_rate": 0.00018584699453551913, | |
| "loss": 0.4607, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 6.33879781420765, | |
| "grad_norm": 0.6737315058708191, | |
| "learning_rate": 0.00018311475409836064, | |
| "loss": 0.4512, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 6.39344262295082, | |
| "grad_norm": 0.798819363117218, | |
| "learning_rate": 0.0001803825136612022, | |
| "loss": 0.4477, | |
| "step": 5850 | |
| }, | |
| { | |
| "epoch": 6.448087431693989, | |
| "grad_norm": 0.8825194835662842, | |
| "learning_rate": 0.00017765027322404374, | |
| "loss": 0.4602, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 6.502732240437158, | |
| "grad_norm": 0.9620506167411804, | |
| "learning_rate": 0.00017491803278688525, | |
| "loss": 0.4387, | |
| "step": 5950 | |
| }, | |
| { | |
| "epoch": 6.557377049180328, | |
| "grad_norm": 0.6066147089004517, | |
| "learning_rate": 0.00017218579234972678, | |
| "loss": 0.4325, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 6.612021857923497, | |
| "grad_norm": 1.0289280414581299, | |
| "learning_rate": 0.00016945355191256832, | |
| "loss": 0.4678, | |
| "step": 6050 | |
| }, | |
| { | |
| "epoch": 6.666666666666667, | |
| "grad_norm": 0.849380612373352, | |
| "learning_rate": 0.00016672131147540983, | |
| "loss": 0.4462, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 6.721311475409836, | |
| "grad_norm": 0.9880459904670715, | |
| "learning_rate": 0.00016398907103825136, | |
| "loss": 0.4425, | |
| "step": 6150 | |
| }, | |
| { | |
| "epoch": 6.775956284153006, | |
| "grad_norm": 0.5747489333152771, | |
| "learning_rate": 0.0001612568306010929, | |
| "loss": 0.4123, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 6.830601092896175, | |
| "grad_norm": 0.9202123880386353, | |
| "learning_rate": 0.0001585245901639344, | |
| "loss": 0.4367, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 6.885245901639344, | |
| "grad_norm": 0.812148928642273, | |
| "learning_rate": 0.00015579234972677597, | |
| "loss": 0.4147, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 6.939890710382514, | |
| "grad_norm": 0.8769338130950928, | |
| "learning_rate": 0.0001530601092896175, | |
| "loss": 0.4192, | |
| "step": 6350 | |
| }, | |
| { | |
| "epoch": 6.994535519125683, | |
| "grad_norm": 1.2912615537643433, | |
| "learning_rate": 0.000150327868852459, | |
| "loss": 0.4471, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 0.5004224181175232, | |
| "eval_runtime": 20.3806, | |
| "eval_samples_per_second": 39.94, | |
| "eval_steps_per_second": 5.005, | |
| "step": 6405 | |
| }, | |
| { | |
| "epoch": 7.049180327868853, | |
| "grad_norm": 0.5414749979972839, | |
| "learning_rate": 0.00014759562841530055, | |
| "loss": 0.4265, | |
| "step": 6450 | |
| }, | |
| { | |
| "epoch": 7.103825136612022, | |
| "grad_norm": 0.7735195159912109, | |
| "learning_rate": 0.00014486338797814208, | |
| "loss": 0.4124, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 7.158469945355192, | |
| "grad_norm": 0.7762187123298645, | |
| "learning_rate": 0.00014213114754098362, | |
| "loss": 0.4004, | |
| "step": 6550 | |
| }, | |
| { | |
| "epoch": 7.213114754098361, | |
| "grad_norm": 0.6879094243049622, | |
| "learning_rate": 0.00013939890710382513, | |
| "loss": 0.4414, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 7.26775956284153, | |
| "grad_norm": 0.8285422921180725, | |
| "learning_rate": 0.00013666666666666666, | |
| "loss": 0.4629, | |
| "step": 6650 | |
| }, | |
| { | |
| "epoch": 7.3224043715847, | |
| "grad_norm": 0.8115780353546143, | |
| "learning_rate": 0.0001339344262295082, | |
| "loss": 0.3816, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 7.377049180327869, | |
| "grad_norm": 0.8859063386917114, | |
| "learning_rate": 0.00013120218579234973, | |
| "loss": 0.3816, | |
| "step": 6750 | |
| }, | |
| { | |
| "epoch": 7.4316939890710385, | |
| "grad_norm": 0.6960242986679077, | |
| "learning_rate": 0.00012846994535519127, | |
| "loss": 0.3909, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 7.4863387978142075, | |
| "grad_norm": 0.8247169852256775, | |
| "learning_rate": 0.0001257377049180328, | |
| "loss": 0.4111, | |
| "step": 6850 | |
| }, | |
| { | |
| "epoch": 7.540983606557377, | |
| "grad_norm": 0.5891773700714111, | |
| "learning_rate": 0.00012300546448087431, | |
| "loss": 0.4645, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 7.595628415300546, | |
| "grad_norm": 0.6889060139656067, | |
| "learning_rate": 0.00012027322404371585, | |
| "loss": 0.3781, | |
| "step": 6950 | |
| }, | |
| { | |
| "epoch": 7.6502732240437155, | |
| "grad_norm": 0.7287388443946838, | |
| "learning_rate": 0.00011754098360655737, | |
| "loss": 0.4184, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 7.704918032786885, | |
| "grad_norm": 0.5292376279830933, | |
| "learning_rate": 0.00011480874316939891, | |
| "loss": 0.4042, | |
| "step": 7050 | |
| }, | |
| { | |
| "epoch": 7.759562841530054, | |
| "grad_norm": 1.0296603441238403, | |
| "learning_rate": 0.00011207650273224044, | |
| "loss": 0.4092, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 7.814207650273224, | |
| "grad_norm": 0.7451539039611816, | |
| "learning_rate": 0.00010934426229508198, | |
| "loss": 0.4379, | |
| "step": 7150 | |
| }, | |
| { | |
| "epoch": 7.868852459016393, | |
| "grad_norm": 0.6021469831466675, | |
| "learning_rate": 0.0001066120218579235, | |
| "loss": 0.4015, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 7.923497267759563, | |
| "grad_norm": 0.6649101376533508, | |
| "learning_rate": 0.00010387978142076502, | |
| "loss": 0.4119, | |
| "step": 7250 | |
| }, | |
| { | |
| "epoch": 7.978142076502732, | |
| "grad_norm": 0.8819006681442261, | |
| "learning_rate": 0.00010114754098360656, | |
| "loss": 0.4503, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 0.4996373653411865, | |
| "eval_runtime": 38.3302, | |
| "eval_samples_per_second": 21.236, | |
| "eval_steps_per_second": 2.661, | |
| "step": 7320 | |
| }, | |
| { | |
| "epoch": 8.032786885245901, | |
| "grad_norm": 0.974883496761322, | |
| "learning_rate": 9.841530054644809e-05, | |
| "loss": 0.399, | |
| "step": 7350 | |
| }, | |
| { | |
| "epoch": 8.087431693989071, | |
| "grad_norm": 0.4739116132259369, | |
| "learning_rate": 9.568306010928961e-05, | |
| "loss": 0.4358, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 8.142076502732241, | |
| "grad_norm": 0.618143618106842, | |
| "learning_rate": 9.295081967213115e-05, | |
| "loss": 0.4221, | |
| "step": 7450 | |
| }, | |
| { | |
| "epoch": 8.19672131147541, | |
| "grad_norm": 0.9425492286682129, | |
| "learning_rate": 9.021857923497267e-05, | |
| "loss": 0.4056, | |
| "step": 7500 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 9150, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.115343028224e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |