| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.2609637479559113, | |
| "eval_steps": 500, | |
| "global_step": 600, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.054367447490814874, | |
| "grad_norm": 2.0497019290924072, | |
| "learning_rate": 2.2727272727272728e-06, | |
| "loss": 0.3348, | |
| "mean_token_accuracy": 0.036216020046958876, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.10873489498162975, | |
| "grad_norm": 3.0014805793762207, | |
| "learning_rate": 4.5454545454545455e-06, | |
| "loss": 0.2845, | |
| "mean_token_accuracy": 0.03815823743243527, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.16310234247244462, | |
| "grad_norm": 2.813481569290161, | |
| "learning_rate": 4.998433870444026e-06, | |
| "loss": 0.2715, | |
| "mean_token_accuracy": 0.03921769789994869, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.2174697899632595, | |
| "grad_norm": 1.4481842517852783, | |
| "learning_rate": 4.992074831939997e-06, | |
| "loss": 0.2675, | |
| "mean_token_accuracy": 0.03833619062224898, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.2718372374540744, | |
| "grad_norm": 1.9870078563690186, | |
| "learning_rate": 4.980837439924479e-06, | |
| "loss": 0.2632, | |
| "mean_token_accuracy": 0.039046796466209344, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.32620468494488924, | |
| "grad_norm": 1.224491834640503, | |
| "learning_rate": 4.9647436921200514e-06, | |
| "loss": 0.2593, | |
| "mean_token_accuracy": 0.04083767885422276, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.38057213243570415, | |
| "grad_norm": 1.2895762920379639, | |
| "learning_rate": 4.943825092793806e-06, | |
| "loss": 0.2574, | |
| "mean_token_accuracy": 0.040451897433194973, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.434939579926519, | |
| "grad_norm": 1.4011473655700684, | |
| "learning_rate": 4.91812259108626e-06, | |
| "loss": 0.2545, | |
| "mean_token_accuracy": 0.041019549200973414, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.4893070274173339, | |
| "grad_norm": 1.6724004745483398, | |
| "learning_rate": 4.887686500851499e-06, | |
| "loss": 0.2487, | |
| "mean_token_accuracy": 0.042838540498996734, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.5436744749081488, | |
| "grad_norm": 1.4560455083847046, | |
| "learning_rate": 4.852576402165436e-06, | |
| "loss": 0.2506, | |
| "mean_token_accuracy": 0.042806161533553676, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.5980419223989636, | |
| "grad_norm": 1.4531595706939697, | |
| "learning_rate": 4.812861024695024e-06, | |
| "loss": 0.2459, | |
| "mean_token_accuracy": 0.04187717779204832, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.6524093698897785, | |
| "grad_norm": 1.185799241065979, | |
| "learning_rate": 4.768618113156695e-06, | |
| "loss": 0.2475, | |
| "mean_token_accuracy": 0.04067943783993542, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.7067768173805934, | |
| "grad_norm": 1.970852017402649, | |
| "learning_rate": 4.719934275127435e-06, | |
| "loss": 0.247, | |
| "mean_token_accuracy": 0.04101934070331481, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.7611442648714083, | |
| "grad_norm": 1.789883017539978, | |
| "learning_rate": 4.666904811506382e-06, | |
| "loss": 0.2442, | |
| "mean_token_accuracy": 0.04314688319063862, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.8155117123622231, | |
| "grad_norm": 1.707210659980774, | |
| "learning_rate": 4.609633529958841e-06, | |
| "loss": 0.2453, | |
| "mean_token_accuracy": 0.04226606881111365, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.869879159853038, | |
| "grad_norm": 1.4981927871704102, | |
| "learning_rate": 4.5482325417079045e-06, | |
| "loss": 0.2455, | |
| "mean_token_accuracy": 0.04208922391790111, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.9242466073438529, | |
| "grad_norm": 1.4780975580215454, | |
| "learning_rate": 4.482822042071466e-06, | |
| "loss": 0.242, | |
| "mean_token_accuracy": 0.043558270454741435, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.9786140548346678, | |
| "grad_norm": 1.8650013208389282, | |
| "learning_rate": 4.413530075174245e-06, | |
| "loss": 0.2445, | |
| "mean_token_accuracy": 0.04371535095342551, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.0326204684944889, | |
| "grad_norm": 1.2570964097976685, | |
| "learning_rate": 4.340492283295396e-06, | |
| "loss": 0.2383, | |
| "mean_token_accuracy": 0.04298217990664554, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.0869879159853038, | |
| "grad_norm": 1.0197902917861938, | |
| "learning_rate": 4.263851641342383e-06, | |
| "loss": 0.236, | |
| "mean_token_accuracy": 0.04432974819592346, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.1413553634761187, | |
| "grad_norm": 1.1943126916885376, | |
| "learning_rate": 4.1837581769708755e-06, | |
| "loss": 0.238, | |
| "mean_token_accuracy": 0.043376582934797625, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.1957228109669336, | |
| "grad_norm": 1.7524161338806152, | |
| "learning_rate": 4.100368676898575e-06, | |
| "loss": 0.2374, | |
| "mean_token_accuracy": 0.04293899511212658, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.2500902584577485, | |
| "grad_norm": 1.563820719718933, | |
| "learning_rate": 4.013846379987847e-06, | |
| "loss": 0.2371, | |
| "mean_token_accuracy": 0.043539136235267506, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.3044577059485634, | |
| "grad_norm": 1.222346544265747, | |
| "learning_rate": 3.924360657697987e-06, | |
| "loss": 0.2388, | |
| "mean_token_accuracy": 0.043110936877928906, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.358825153439378, | |
| "grad_norm": 1.827516794204712, | |
| "learning_rate": 3.832086682532633e-06, | |
| "loss": 0.2366, | |
| "mean_token_accuracy": 0.04413067772566137, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.4131926009301932, | |
| "grad_norm": 1.1535050868988037, | |
| "learning_rate": 3.7372050851313597e-06, | |
| "loss": 0.2317, | |
| "mean_token_accuracy": 0.04412070568432682, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.4675600484210078, | |
| "grad_norm": 2.219189405441284, | |
| "learning_rate": 3.639901600676725e-06, | |
| "loss": 0.2293, | |
| "mean_token_accuracy": 0.043295259247861394, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.5219274959118227, | |
| "grad_norm": 1.609122633934021, | |
| "learning_rate": 3.5403667053089263e-06, | |
| "loss": 0.2279, | |
| "mean_token_accuracy": 0.046050211629699335, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.5762949434026376, | |
| "grad_norm": 1.0812249183654785, | |
| "learning_rate": 3.4387952432598102e-06, | |
| "loss": 0.2327, | |
| "mean_token_accuracy": 0.04309764919016743, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.6306623908934526, | |
| "grad_norm": 1.4412078857421875, | |
| "learning_rate": 3.3353860454361398e-06, | |
| "loss": 0.2345, | |
| "mean_token_accuracy": 0.042526886053747145, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.6850298383842675, | |
| "grad_norm": 1.4374982118606567, | |
| "learning_rate": 3.2303415401987543e-06, | |
| "loss": 0.2326, | |
| "mean_token_accuracy": 0.042373882613537715, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.7393972858750821, | |
| "grad_norm": 1.3343548774719238, | |
| "learning_rate": 3.1238673570995526e-06, | |
| "loss": 0.2308, | |
| "mean_token_accuracy": 0.04209477992808388, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.7937647333658973, | |
| "grad_norm": 1.3959535360336304, | |
| "learning_rate": 3.0161719243519848e-06, | |
| "loss": 0.2265, | |
| "mean_token_accuracy": 0.04400216135891242, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.848132180856712, | |
| "grad_norm": 1.9636735916137695, | |
| "learning_rate": 2.907466060823037e-06, | |
| "loss": 0.227, | |
| "mean_token_accuracy": 0.044824491128019874, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.902499628347527, | |
| "grad_norm": 1.432839274406433, | |
| "learning_rate": 2.7979625633454005e-06, | |
| "loss": 0.2281, | |
| "mean_token_accuracy": 0.043323908064667196, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.9568670758383417, | |
| "grad_norm": 1.2604482173919678, | |
| "learning_rate": 2.6878757901576775e-06, | |
| "loss": 0.2254, | |
| "mean_token_accuracy": 0.04304394573555328, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.010873489498163, | |
| "grad_norm": 1.0908658504486084, | |
| "learning_rate": 2.5774212412880636e-06, | |
| "loss": 0.2256, | |
| "mean_token_accuracy": 0.04459014333798497, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.0652409369889777, | |
| "grad_norm": 1.6308084726333618, | |
| "learning_rate": 2.4668151367029235e-06, | |
| "loss": 0.2245, | |
| "mean_token_accuracy": 0.044322372208444, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.119608384479793, | |
| "grad_norm": 1.3369026184082031, | |
| "learning_rate": 2.35627399304605e-06, | |
| "loss": 0.2188, | |
| "mean_token_accuracy": 0.0456739331535573, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.1739758319706075, | |
| "grad_norm": 1.295975685119629, | |
| "learning_rate": 2.2460141997971695e-06, | |
| "loss": 0.2193, | |
| "mean_token_accuracy": 0.04523389639671223, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.2283432794614226, | |
| "grad_norm": 1.357338309288025, | |
| "learning_rate": 2.1362515956793717e-06, | |
| "loss": 0.2192, | |
| "mean_token_accuracy": 0.04524684594507562, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.2827107269522373, | |
| "grad_norm": 1.4117406606674194, | |
| "learning_rate": 2.027201046144677e-06, | |
| "loss": 0.2191, | |
| "mean_token_accuracy": 0.044970535605898476, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.337078174443052, | |
| "grad_norm": 1.1702640056610107, | |
| "learning_rate": 1.9190760227648183e-06, | |
| "loss": 0.2199, | |
| "mean_token_accuracy": 0.04270703578586108, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.391445621933867, | |
| "grad_norm": 1.1582852602005005, | |
| "learning_rate": 1.8120881853506179e-06, | |
| "loss": 0.2177, | |
| "mean_token_accuracy": 0.04548128471906239, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.445813069424682, | |
| "grad_norm": 1.424065351486206, | |
| "learning_rate": 1.7064469676179682e-06, | |
| "loss": 0.219, | |
| "mean_token_accuracy": 0.04496145398898079, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.500180516915497, | |
| "grad_norm": 1.851121425628662, | |
| "learning_rate": 1.6023591672114992e-06, | |
| "loss": 0.2163, | |
| "mean_token_accuracy": 0.04483195860047999, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 2.5545479644063116, | |
| "grad_norm": 1.0778027772903442, | |
| "learning_rate": 1.5000285408884734e-06, | |
| "loss": 0.2163, | |
| "mean_token_accuracy": 0.04510914630936895, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 2.6089154118971267, | |
| "grad_norm": 1.2224993705749512, | |
| "learning_rate": 1.3996554056553723e-06, | |
| "loss": 0.2141, | |
| "mean_token_accuracy": 0.04724993375784834, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 2.6632828593879414, | |
| "grad_norm": 1.496194839477539, | |
| "learning_rate": 1.3014362466379407e-06, | |
| "loss": 0.2193, | |
| "mean_token_accuracy": 0.045281845046338275, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 2.717650306878756, | |
| "grad_norm": 1.2077394723892212, | |
| "learning_rate": 1.2055633324523324e-06, | |
| "loss": 0.2149, | |
| "mean_token_accuracy": 0.044335345691433756, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.772017754369571, | |
| "grad_norm": 1.171938180923462, | |
| "learning_rate": 1.1122243388302622e-06, | |
| "loss": 0.2174, | |
| "mean_token_accuracy": 0.04475436422835628, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 2.8263852018603863, | |
| "grad_norm": 1.2405449151992798, | |
| "learning_rate": 1.0216019812349508e-06, | |
| "loss": 0.2163, | |
| "mean_token_accuracy": 0.04440462300572108, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 2.880752649351201, | |
| "grad_norm": 1.1590036153793335, | |
| "learning_rate": 9.338736571870205e-07, | |
| "loss": 0.2153, | |
| "mean_token_accuracy": 0.043993269855855034, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 2.9351200968420157, | |
| "grad_norm": 1.6510144472122192, | |
| "learning_rate": 8.492110990005228e-07, | |
| "loss": 0.2127, | |
| "mean_token_accuracy": 0.045991620956010594, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.989487544332831, | |
| "grad_norm": 1.291462779045105, | |
| "learning_rate": 7.677800376088657e-07, | |
| "loss": 0.2109, | |
| "mean_token_accuracy": 0.045755728109907065, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 3.043493957992652, | |
| "grad_norm": 1.295513391494751, | |
| "learning_rate": 6.897398781387299e-07, | |
| "loss": 0.209, | |
| "mean_token_accuracy": 0.04398122681887118, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 3.097861405483467, | |
| "grad_norm": 1.6088683605194092, | |
| "learning_rate": 6.152433878670485e-07, | |
| "loss": 0.2053, | |
| "mean_token_accuracy": 0.04704667659716506, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 3.1522288529742815, | |
| "grad_norm": 1.117344617843628, | |
| "learning_rate": 5.444363971718875e-07, | |
| "loss": 0.2127, | |
| "mean_token_accuracy": 0.04461234014015645, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 3.2065963004650966, | |
| "grad_norm": 1.4021047353744507, | |
| "learning_rate": 4.774575140626317e-07, | |
| "loss": 0.2132, | |
| "mean_token_accuracy": 0.044966605161243935, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 3.2609637479559113, | |
| "grad_norm": 1.2901127338409424, | |
| "learning_rate": 4.144378528483009e-07, | |
| "loss": 0.2096, | |
| "mean_token_accuracy": 0.04288893376269698, | |
| "step": 600 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 732, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 300, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.169216325675168e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |