| { | |
| "best_global_step": 100, | |
| "best_metric": 0.5112941265106201, | |
| "best_model_checkpoint": "/root/autodl-tmp/model/lora-textui/stage0_caption-v2/checkpoint-100", | |
| "epoch": 3.458149779735683, | |
| "eval_steps": 10, | |
| "global_step": 100, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03524229074889868, | |
| "grad_norm": 1.2463780641555786, | |
| "learning_rate": 0.0, | |
| "loss": 1.9336, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.07048458149779736, | |
| "grad_norm": 1.3275448083877563, | |
| "learning_rate": 1e-05, | |
| "loss": 2.048, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.10572687224669604, | |
| "grad_norm": 1.2619495391845703, | |
| "learning_rate": 2e-05, | |
| "loss": 1.9311, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.14096916299559473, | |
| "grad_norm": 1.2719473838806152, | |
| "learning_rate": 3e-05, | |
| "loss": 1.9814, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.1762114537444934, | |
| "grad_norm": 1.3939034938812256, | |
| "learning_rate": 4e-05, | |
| "loss": 1.9955, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.21145374449339208, | |
| "grad_norm": 1.3475849628448486, | |
| "learning_rate": 5e-05, | |
| "loss": 1.874, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.24669603524229075, | |
| "grad_norm": 1.2599456310272217, | |
| "learning_rate": 6e-05, | |
| "loss": 1.7726, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.28193832599118945, | |
| "grad_norm": 1.1231255531311035, | |
| "learning_rate": 7e-05, | |
| "loss": 1.6056, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.31718061674008813, | |
| "grad_norm": 0.926201581954956, | |
| "learning_rate": 8e-05, | |
| "loss": 1.4883, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.3524229074889868, | |
| "grad_norm": 0.8478774428367615, | |
| "learning_rate": 9e-05, | |
| "loss": 1.3543, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.3524229074889868, | |
| "eval_loss": 1.2461069822311401, | |
| "eval_runtime": 25.3282, | |
| "eval_samples_per_second": 7.975, | |
| "eval_steps_per_second": 0.513, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.3876651982378855, | |
| "grad_norm": 0.8932197093963623, | |
| "learning_rate": 0.0001, | |
| "loss": 1.253, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.42290748898678415, | |
| "grad_norm": 0.9408664703369141, | |
| "learning_rate": 0.00011000000000000002, | |
| "loss": 1.1504, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.4581497797356828, | |
| "grad_norm": 0.7685560584068298, | |
| "learning_rate": 0.00012, | |
| "loss": 1.1904, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.4933920704845815, | |
| "grad_norm": 1.0110777616500854, | |
| "learning_rate": 0.00013000000000000002, | |
| "loss": 1.065, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.5286343612334802, | |
| "grad_norm": 1.0698988437652588, | |
| "learning_rate": 0.00014, | |
| "loss": 1.0323, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.5638766519823789, | |
| "grad_norm": 0.9592034220695496, | |
| "learning_rate": 0.00015000000000000001, | |
| "loss": 0.967, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.5991189427312775, | |
| "grad_norm": 0.515692412853241, | |
| "learning_rate": 0.00016, | |
| "loss": 0.9886, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.6343612334801763, | |
| "grad_norm": 0.4879838824272156, | |
| "learning_rate": 0.00017, | |
| "loss": 0.9052, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.6696035242290749, | |
| "grad_norm": 0.5619950294494629, | |
| "learning_rate": 0.00018, | |
| "loss": 0.8916, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.7048458149779736, | |
| "grad_norm": 0.5306786298751831, | |
| "learning_rate": 0.00019, | |
| "loss": 0.8508, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.7048458149779736, | |
| "eval_loss": 0.8256819844245911, | |
| "eval_runtime": 25.3172, | |
| "eval_samples_per_second": 7.979, | |
| "eval_steps_per_second": 0.513, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.7400881057268722, | |
| "grad_norm": 0.45645713806152344, | |
| "learning_rate": 0.0002, | |
| "loss": 0.837, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.775330396475771, | |
| "grad_norm": 0.3715068995952606, | |
| "learning_rate": 0.0001999229036240723, | |
| "loss": 0.8872, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.8105726872246696, | |
| "grad_norm": 0.3798660337924957, | |
| "learning_rate": 0.0001996917333733128, | |
| "loss": 0.8076, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.8458149779735683, | |
| "grad_norm": 0.4133865237236023, | |
| "learning_rate": 0.00019930684569549264, | |
| "loss": 0.8181, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.8810572687224669, | |
| "grad_norm": 0.3884609043598175, | |
| "learning_rate": 0.00019876883405951377, | |
| "loss": 0.8081, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.9162995594713657, | |
| "grad_norm": 0.3344547748565674, | |
| "learning_rate": 0.00019807852804032305, | |
| "loss": 0.7206, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.9515418502202643, | |
| "grad_norm": 0.2715914845466614, | |
| "learning_rate": 0.00019723699203976766, | |
| "loss": 0.7821, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.986784140969163, | |
| "grad_norm": 0.3191257417201996, | |
| "learning_rate": 0.00019624552364536473, | |
| "loss": 0.7356, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.43808606266975403, | |
| "learning_rate": 0.00019510565162951537, | |
| "loss": 0.7774, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 1.0352422907488987, | |
| "grad_norm": 0.32706010341644287, | |
| "learning_rate": 0.00019381913359224842, | |
| "loss": 0.7069, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.0352422907488987, | |
| "eval_loss": 0.6948859691619873, | |
| "eval_runtime": 25.3163, | |
| "eval_samples_per_second": 7.979, | |
| "eval_steps_per_second": 0.514, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.0704845814977975, | |
| "grad_norm": 0.2936725616455078, | |
| "learning_rate": 0.0001923879532511287, | |
| "loss": 0.7228, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 1.105726872246696, | |
| "grad_norm": 0.3039261996746063, | |
| "learning_rate": 0.00019081431738250814, | |
| "loss": 0.6692, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 1.1409691629955947, | |
| "grad_norm": 0.29901978373527527, | |
| "learning_rate": 0.0001891006524188368, | |
| "loss": 0.6742, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 1.1762114537444934, | |
| "grad_norm": 0.32121536135673523, | |
| "learning_rate": 0.00018724960070727972, | |
| "loss": 0.7012, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.2114537444933922, | |
| "grad_norm": 0.3143290579319, | |
| "learning_rate": 0.00018526401643540922, | |
| "loss": 0.6594, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.2466960352422907, | |
| "grad_norm": 0.3040238320827484, | |
| "learning_rate": 0.00018314696123025454, | |
| "loss": 0.6723, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.2819383259911894, | |
| "grad_norm": 0.2974918782711029, | |
| "learning_rate": 0.00018090169943749476, | |
| "loss": 0.6514, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 1.3171806167400881, | |
| "grad_norm": 0.2963806390762329, | |
| "learning_rate": 0.00017853169308807448, | |
| "loss": 0.6585, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 1.3524229074889869, | |
| "grad_norm": 0.2951265871524811, | |
| "learning_rate": 0.0001760405965600031, | |
| "loss": 0.6339, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 1.3876651982378854, | |
| "grad_norm": 0.3203594386577606, | |
| "learning_rate": 0.00017343225094356855, | |
| "loss": 0.655, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.3876651982378854, | |
| "eval_loss": 0.6199031472206116, | |
| "eval_runtime": 25.3133, | |
| "eval_samples_per_second": 7.98, | |
| "eval_steps_per_second": 0.514, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.422907488986784, | |
| "grad_norm": 0.29003453254699707, | |
| "learning_rate": 0.00017071067811865476, | |
| "loss": 0.6334, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 1.4581497797356828, | |
| "grad_norm": 0.29403063654899597, | |
| "learning_rate": 0.0001678800745532942, | |
| "loss": 0.644, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 1.4933920704845816, | |
| "grad_norm": 0.30763620138168335, | |
| "learning_rate": 0.00016494480483301836, | |
| "loss": 0.5925, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 1.5286343612334803, | |
| "grad_norm": 0.3372487425804138, | |
| "learning_rate": 0.00016190939493098344, | |
| "loss": 0.6486, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 1.5638766519823788, | |
| "grad_norm": 0.2898256480693817, | |
| "learning_rate": 0.00015877852522924732, | |
| "loss": 0.6039, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.5991189427312775, | |
| "grad_norm": 0.2907792627811432, | |
| "learning_rate": 0.00015555702330196023, | |
| "loss": 0.6244, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 1.6343612334801763, | |
| "grad_norm": 0.3023645579814911, | |
| "learning_rate": 0.0001522498564715949, | |
| "loss": 0.5672, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 1.6696035242290748, | |
| "grad_norm": 0.3130245804786682, | |
| "learning_rate": 0.00014886212414969553, | |
| "loss": 0.6356, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 1.7048458149779737, | |
| "grad_norm": 0.3192823529243469, | |
| "learning_rate": 0.00014539904997395468, | |
| "loss": 0.6105, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 1.7400881057268722, | |
| "grad_norm": 0.3187403082847595, | |
| "learning_rate": 0.0001418659737537428, | |
| "loss": 0.6023, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.7400881057268722, | |
| "eval_loss": 0.5753047466278076, | |
| "eval_runtime": 25.3636, | |
| "eval_samples_per_second": 7.964, | |
| "eval_steps_per_second": 0.513, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.775330396475771, | |
| "grad_norm": 0.3186057507991791, | |
| "learning_rate": 0.000138268343236509, | |
| "loss": 0.6216, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 1.8105726872246697, | |
| "grad_norm": 0.3059786856174469, | |
| "learning_rate": 0.0001346117057077493, | |
| "loss": 0.6124, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 1.8458149779735682, | |
| "grad_norm": 0.2928261160850525, | |
| "learning_rate": 0.00013090169943749476, | |
| "loss": 0.5445, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 1.881057268722467, | |
| "grad_norm": 0.2955411374568939, | |
| "learning_rate": 0.00012714404498650743, | |
| "loss": 0.5508, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 1.9162995594713657, | |
| "grad_norm": 0.30531296133995056, | |
| "learning_rate": 0.00012334453638559057, | |
| "loss": 0.5522, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.9515418502202642, | |
| "grad_norm": 0.3257448375225067, | |
| "learning_rate": 0.00011950903220161285, | |
| "loss": 0.5569, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.9867841409691631, | |
| "grad_norm": 0.29654088616371155, | |
| "learning_rate": 0.0001156434465040231, | |
| "loss": 0.5307, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.46664145588874817, | |
| "learning_rate": 0.00011175373974578378, | |
| "loss": 0.6131, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 2.0352422907488985, | |
| "grad_norm": 0.3071623742580414, | |
| "learning_rate": 0.0001078459095727845, | |
| "loss": 0.4965, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 2.0704845814977975, | |
| "grad_norm": 0.3435165286064148, | |
| "learning_rate": 0.00010392598157590688, | |
| "loss": 0.5086, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.0704845814977975, | |
| "eval_loss": 0.5449034571647644, | |
| "eval_runtime": 25.365, | |
| "eval_samples_per_second": 7.964, | |
| "eval_steps_per_second": 0.513, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.105726872246696, | |
| "grad_norm": 0.3053547739982605, | |
| "learning_rate": 0.0001, | |
| "loss": 0.5208, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 2.140969162995595, | |
| "grad_norm": 0.3197723627090454, | |
| "learning_rate": 9.607401842409317e-05, | |
| "loss": 0.5298, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 2.1762114537444934, | |
| "grad_norm": 0.31069523096084595, | |
| "learning_rate": 9.215409042721552e-05, | |
| "loss": 0.5025, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 2.211453744493392, | |
| "grad_norm": 0.3025587797164917, | |
| "learning_rate": 8.824626025421626e-05, | |
| "loss": 0.4856, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 2.246696035242291, | |
| "grad_norm": 0.32704848051071167, | |
| "learning_rate": 8.435655349597689e-05, | |
| "loss": 0.5118, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 2.2819383259911894, | |
| "grad_norm": 0.3421045243740082, | |
| "learning_rate": 8.049096779838719e-05, | |
| "loss": 0.5256, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 2.317180616740088, | |
| "grad_norm": 0.3493560552597046, | |
| "learning_rate": 7.66554636144095e-05, | |
| "loss": 0.5396, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 2.352422907488987, | |
| "grad_norm": 0.34267452359199524, | |
| "learning_rate": 7.285595501349258e-05, | |
| "loss": 0.5633, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 2.3876651982378854, | |
| "grad_norm": 0.34627947211265564, | |
| "learning_rate": 6.909830056250527e-05, | |
| "loss": 0.5233, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 2.4229074889867843, | |
| "grad_norm": 0.3640250265598297, | |
| "learning_rate": 6.538829429225069e-05, | |
| "loss": 0.5638, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.4229074889867843, | |
| "eval_loss": 0.5274443030357361, | |
| "eval_runtime": 25.3671, | |
| "eval_samples_per_second": 7.963, | |
| "eval_steps_per_second": 0.512, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.458149779735683, | |
| "grad_norm": 0.3336215913295746, | |
| "learning_rate": 6.173165676349103e-05, | |
| "loss": 0.5003, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 2.4933920704845813, | |
| "grad_norm": 0.3200637102127075, | |
| "learning_rate": 5.8134026246257225e-05, | |
| "loss": 0.4974, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 2.5286343612334803, | |
| "grad_norm": 0.3232054114341736, | |
| "learning_rate": 5.4600950026045326e-05, | |
| "loss": 0.5325, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 2.563876651982379, | |
| "grad_norm": 0.327750027179718, | |
| "learning_rate": 5.113787585030454e-05, | |
| "loss": 0.4323, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 2.5991189427312777, | |
| "grad_norm": 0.325334757566452, | |
| "learning_rate": 4.7750143528405126e-05, | |
| "loss": 0.4905, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 2.6343612334801763, | |
| "grad_norm": 0.3405950665473938, | |
| "learning_rate": 4.444297669803981e-05, | |
| "loss": 0.4649, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 2.6696035242290748, | |
| "grad_norm": 0.35329973697662354, | |
| "learning_rate": 4.12214747707527e-05, | |
| "loss": 0.5379, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 2.7048458149779737, | |
| "grad_norm": 0.34212884306907654, | |
| "learning_rate": 3.8090605069016595e-05, | |
| "loss": 0.4885, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 2.7400881057268722, | |
| "grad_norm": 0.33281591534614563, | |
| "learning_rate": 3.5055195166981645e-05, | |
| "loss": 0.4923, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 2.7753303964757707, | |
| "grad_norm": 0.32896408438682556, | |
| "learning_rate": 3.211992544670582e-05, | |
| "loss": 0.4747, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.7753303964757707, | |
| "eval_loss": 0.5167238712310791, | |
| "eval_runtime": 25.36, | |
| "eval_samples_per_second": 7.965, | |
| "eval_steps_per_second": 0.513, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.8105726872246697, | |
| "grad_norm": 0.34730497002601624, | |
| "learning_rate": 2.9289321881345254e-05, | |
| "loss": 0.4887, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 2.845814977973568, | |
| "grad_norm": 0.35023897886276245, | |
| "learning_rate": 2.6567749056431467e-05, | |
| "loss": 0.4812, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 2.8810572687224667, | |
| "grad_norm": 0.34714555740356445, | |
| "learning_rate": 2.3959403439996907e-05, | |
| "loss": 0.4947, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 2.9162995594713657, | |
| "grad_norm": 0.3485592007637024, | |
| "learning_rate": 2.146830691192553e-05, | |
| "loss": 0.5061, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 2.951541850220264, | |
| "grad_norm": 0.36132875084877014, | |
| "learning_rate": 1.9098300562505266e-05, | |
| "loss": 0.4976, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 2.986784140969163, | |
| "grad_norm": 0.3450870215892792, | |
| "learning_rate": 1.6853038769745467e-05, | |
| "loss": 0.459, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.5119443535804749, | |
| "learning_rate": 1.4735983564590783e-05, | |
| "loss": 0.4275, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 3.0352422907488985, | |
| "grad_norm": 0.325406551361084, | |
| "learning_rate": 1.2750399292720283e-05, | |
| "loss": 0.4549, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 3.0704845814977975, | |
| "grad_norm": 0.33169472217559814, | |
| "learning_rate": 1.0899347581163221e-05, | |
| "loss": 0.4918, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 3.105726872246696, | |
| "grad_norm": 0.3156157433986664, | |
| "learning_rate": 9.185682617491863e-06, | |
| "loss": 0.458, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.105726872246696, | |
| "eval_loss": 0.5118579268455505, | |
| "eval_runtime": 25.3571, | |
| "eval_samples_per_second": 7.966, | |
| "eval_steps_per_second": 0.513, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.140969162995595, | |
| "grad_norm": 0.34081166982650757, | |
| "learning_rate": 7.612046748871327e-06, | |
| "loss": 0.4861, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 3.1762114537444934, | |
| "grad_norm": 0.32250353693962097, | |
| "learning_rate": 6.180866407751595e-06, | |
| "loss": 0.4881, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 3.211453744493392, | |
| "grad_norm": 0.3292059600353241, | |
| "learning_rate": 4.8943483704846475e-06, | |
| "loss": 0.4524, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 3.246696035242291, | |
| "grad_norm": 0.3330952525138855, | |
| "learning_rate": 3.7544763546352834e-06, | |
| "loss": 0.4678, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 3.2819383259911894, | |
| "grad_norm": 0.3274575173854828, | |
| "learning_rate": 2.7630079602323442e-06, | |
| "loss": 0.5017, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 3.317180616740088, | |
| "grad_norm": 0.3240121006965637, | |
| "learning_rate": 1.921471959676957e-06, | |
| "loss": 0.4615, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 3.352422907488987, | |
| "grad_norm": 0.34071552753448486, | |
| "learning_rate": 1.231165940486234e-06, | |
| "loss": 0.4972, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 3.3876651982378854, | |
| "grad_norm": 0.3293885588645935, | |
| "learning_rate": 6.931543045073708e-07, | |
| "loss": 0.4188, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 3.4229074889867843, | |
| "grad_norm": 0.32528501749038696, | |
| "learning_rate": 3.0826662668720364e-07, | |
| "loss": 0.4577, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 3.458149779735683, | |
| "grad_norm": 0.35076647996902466, | |
| "learning_rate": 7.709637592770991e-08, | |
| "loss": 0.4505, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.458149779735683, | |
| "eval_loss": 0.5112941265106201, | |
| "eval_runtime": 25.3907, | |
| "eval_samples_per_second": 7.956, | |
| "eval_steps_per_second": 0.512, | |
| "step": 100 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 100, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 30, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.237513300119388e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |