| { | |
| "best_global_step": 3000, | |
| "best_metric": 0.8622779005977237, | |
| "best_model_checkpoint": "./model_checkpoint\\checkpoint-3000", | |
| "epoch": 3.927308447937132, | |
| "eval_steps": 100, | |
| "global_step": 3000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06548788474132286, | |
| "grad_norm": 0.932673454284668, | |
| "learning_rate": 4.9000000000000005e-06, | |
| "loss": 0.6777, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13097576948264572, | |
| "grad_norm": 1.4263807535171509, | |
| "learning_rate": 9.900000000000002e-06, | |
| "loss": 0.584, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.13097576948264572, | |
| "eval_accuracy": 0.7383116351428806, | |
| "eval_loss": 0.4755142033100128, | |
| "eval_runtime": 16.8317, | |
| "eval_samples_per_second": 725.593, | |
| "eval_steps_per_second": 45.39, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.19646365422396855, | |
| "grad_norm": 2.0286386013031006, | |
| "learning_rate": 1.49e-05, | |
| "loss": 0.4077, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.26195153896529144, | |
| "grad_norm": 1.7711361646652222, | |
| "learning_rate": 1.9900000000000003e-05, | |
| "loss": 0.2967, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.26195153896529144, | |
| "eval_accuracy": 0.8327192336035372, | |
| "eval_loss": 0.2789490818977356, | |
| "eval_runtime": 17.0163, | |
| "eval_samples_per_second": 717.724, | |
| "eval_steps_per_second": 44.898, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3274394237066143, | |
| "grad_norm": 1.2721681594848633, | |
| "learning_rate": 2.4900000000000002e-05, | |
| "loss": 0.2954, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.3929273084479371, | |
| "grad_norm": 0.7673302292823792, | |
| "learning_rate": 2.9900000000000002e-05, | |
| "loss": 0.2724, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.3929273084479371, | |
| "eval_accuracy": 0.8397609105051994, | |
| "eval_loss": 0.2580530345439911, | |
| "eval_runtime": 17.1319, | |
| "eval_samples_per_second": 712.879, | |
| "eval_steps_per_second": 44.595, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.45841519318926, | |
| "grad_norm": 0.6606993079185486, | |
| "learning_rate": 3.49e-05, | |
| "loss": 0.2524, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5239030779305829, | |
| "grad_norm": 0.9855170249938965, | |
| "learning_rate": 3.9800000000000005e-05, | |
| "loss": 0.246, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5239030779305829, | |
| "eval_accuracy": 0.8489314664701547, | |
| "eval_loss": 0.24741230905056, | |
| "eval_runtime": 17.2565, | |
| "eval_samples_per_second": 707.732, | |
| "eval_steps_per_second": 44.273, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5893909626719057, | |
| "grad_norm": 0.863400936126709, | |
| "learning_rate": 4.4800000000000005e-05, | |
| "loss": 0.2509, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.6548788474132285, | |
| "grad_norm": 0.2728968858718872, | |
| "learning_rate": 4.9800000000000004e-05, | |
| "loss": 0.2502, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6548788474132285, | |
| "eval_accuracy": 0.8481126668304266, | |
| "eval_loss": 0.24498052895069122, | |
| "eval_runtime": 17.4414, | |
| "eval_samples_per_second": 700.229, | |
| "eval_steps_per_second": 43.804, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7203667321545514, | |
| "grad_norm": 0.5378761291503906, | |
| "learning_rate": 4.906103286384977e-05, | |
| "loss": 0.2579, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.7858546168958742, | |
| "grad_norm": 0.2830002009868622, | |
| "learning_rate": 4.808294209702661e-05, | |
| "loss": 0.2367, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7858546168958742, | |
| "eval_accuracy": 0.8522066650290674, | |
| "eval_loss": 0.23902325332164764, | |
| "eval_runtime": 17.3908, | |
| "eval_samples_per_second": 702.27, | |
| "eval_steps_per_second": 43.931, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.8513425016371972, | |
| "grad_norm": 0.5070816874504089, | |
| "learning_rate": 4.710485133020345e-05, | |
| "loss": 0.2405, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.91683038637852, | |
| "grad_norm": 6.335970401763916, | |
| "learning_rate": 4.6126760563380286e-05, | |
| "loss": 0.2453, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.91683038637852, | |
| "eval_accuracy": 0.850159665929747, | |
| "eval_loss": 0.2547371983528137, | |
| "eval_runtime": 17.4528, | |
| "eval_samples_per_second": 699.773, | |
| "eval_steps_per_second": 43.775, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.9823182711198428, | |
| "grad_norm": 0.29032933712005615, | |
| "learning_rate": 4.514866979655713e-05, | |
| "loss": 0.2473, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.0471512770137525, | |
| "grad_norm": 0.278637558221817, | |
| "learning_rate": 4.417057902973396e-05, | |
| "loss": 0.2596, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.0471512770137525, | |
| "eval_accuracy": 0.8526979448129043, | |
| "eval_loss": 0.23811034858226776, | |
| "eval_runtime": 17.4788, | |
| "eval_samples_per_second": 698.732, | |
| "eval_steps_per_second": 43.71, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.1126391617550753, | |
| "grad_norm": 2.2316906452178955, | |
| "learning_rate": 4.3192488262910805e-05, | |
| "loss": 0.2351, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.1781270464963982, | |
| "grad_norm": 6.879007339477539, | |
| "learning_rate": 4.221439749608764e-05, | |
| "loss": 0.2388, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.1781270464963982, | |
| "eval_accuracy": 0.8549905838041432, | |
| "eval_loss": 0.2344694286584854, | |
| "eval_runtime": 17.4998, | |
| "eval_samples_per_second": 697.893, | |
| "eval_steps_per_second": 43.658, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.243614931237721, | |
| "grad_norm": 0.19675926864147186, | |
| "learning_rate": 4.123630672926448e-05, | |
| "loss": 0.242, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.3091028159790439, | |
| "grad_norm": 0.2648178040981293, | |
| "learning_rate": 4.0258215962441316e-05, | |
| "loss": 0.2267, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.3091028159790439, | |
| "eval_accuracy": 0.8549905838041432, | |
| "eval_loss": 0.23283295333385468, | |
| "eval_runtime": 17.5288, | |
| "eval_samples_per_second": 696.739, | |
| "eval_steps_per_second": 43.585, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.3745907007203666, | |
| "grad_norm": 0.2951018214225769, | |
| "learning_rate": 3.928012519561816e-05, | |
| "loss": 0.2347, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.4400785854616895, | |
| "grad_norm": 0.9093786478042603, | |
| "learning_rate": 3.830203442879499e-05, | |
| "loss": 0.2381, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.4400785854616895, | |
| "eval_accuracy": 0.8542536641283878, | |
| "eval_loss": 0.2354062795639038, | |
| "eval_runtime": 17.5883, | |
| "eval_samples_per_second": 694.384, | |
| "eval_steps_per_second": 43.438, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.5055664702030125, | |
| "grad_norm": 0.43629541993141174, | |
| "learning_rate": 3.7323943661971835e-05, | |
| "loss": 0.2343, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.5710543549443354, | |
| "grad_norm": 0.46896249055862427, | |
| "learning_rate": 3.634585289514868e-05, | |
| "loss": 0.2448, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.5710543549443354, | |
| "eval_accuracy": 0.8555637435519529, | |
| "eval_loss": 0.231441468000412, | |
| "eval_runtime": 17.7028, | |
| "eval_samples_per_second": 689.891, | |
| "eval_steps_per_second": 43.157, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.6365422396856582, | |
| "grad_norm": 3.0710296630859375, | |
| "learning_rate": 3.536776212832551e-05, | |
| "loss": 0.215, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.7020301244269809, | |
| "grad_norm": 0.28947174549102783, | |
| "learning_rate": 3.4389671361502353e-05, | |
| "loss": 0.2341, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.7020301244269809, | |
| "eval_accuracy": 0.8565463031196267, | |
| "eval_loss": 0.22934912145137787, | |
| "eval_runtime": 17.7508, | |
| "eval_samples_per_second": 688.025, | |
| "eval_steps_per_second": 43.04, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.7675180091683038, | |
| "grad_norm": 0.916119396686554, | |
| "learning_rate": 3.341158059467919e-05, | |
| "loss": 0.2291, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.8330058939096268, | |
| "grad_norm": 0.46822306513786316, | |
| "learning_rate": 3.243348982785603e-05, | |
| "loss": 0.2289, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.8330058939096268, | |
| "eval_accuracy": 0.8552362236960616, | |
| "eval_loss": 0.23168529570102692, | |
| "eval_runtime": 17.7733, | |
| "eval_samples_per_second": 687.153, | |
| "eval_steps_per_second": 42.986, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.8984937786509497, | |
| "grad_norm": 0.5012661218643188, | |
| "learning_rate": 3.1455399061032865e-05, | |
| "loss": 0.2325, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.9639816633922724, | |
| "grad_norm": 1.2745088338851929, | |
| "learning_rate": 3.0477308294209707e-05, | |
| "loss": 0.24, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.9639816633922724, | |
| "eval_accuracy": 0.8562187832637354, | |
| "eval_loss": 0.22923418879508972, | |
| "eval_runtime": 17.8388, | |
| "eval_samples_per_second": 684.631, | |
| "eval_steps_per_second": 42.828, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.028814669286182, | |
| "grad_norm": 0.3664182722568512, | |
| "learning_rate": 2.9499217527386542e-05, | |
| "loss": 0.2262, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.094302554027505, | |
| "grad_norm": 0.544244647026062, | |
| "learning_rate": 2.8521126760563384e-05, | |
| "loss": 0.229, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.094302554027505, | |
| "eval_accuracy": 0.8558093834438713, | |
| "eval_loss": 0.23112046718597412, | |
| "eval_runtime": 17.8003, | |
| "eval_samples_per_second": 686.112, | |
| "eval_steps_per_second": 42.921, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.1597904387688276, | |
| "grad_norm": 0.7998932600021362, | |
| "learning_rate": 2.754303599374022e-05, | |
| "loss": 0.2269, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.2252783235101505, | |
| "grad_norm": 1.73899245262146, | |
| "learning_rate": 2.656494522691706e-05, | |
| "loss": 0.2266, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.2252783235101505, | |
| "eval_accuracy": 0.8564644231556539, | |
| "eval_loss": 0.23248396813869476, | |
| "eval_runtime": 17.8458, | |
| "eval_samples_per_second": 684.363, | |
| "eval_steps_per_second": 42.811, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.2907662082514735, | |
| "grad_norm": 0.7401416301727295, | |
| "learning_rate": 2.5586854460093895e-05, | |
| "loss": 0.2304, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.3562540929927964, | |
| "grad_norm": 1.3307729959487915, | |
| "learning_rate": 2.4608763693270737e-05, | |
| "loss": 0.2229, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.3562540929927964, | |
| "eval_accuracy": 0.8578563825431916, | |
| "eval_loss": 0.22864140570163727, | |
| "eval_runtime": 17.9078, | |
| "eval_samples_per_second": 681.993, | |
| "eval_steps_per_second": 42.663, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.4217419777341194, | |
| "grad_norm": 0.16573481261730194, | |
| "learning_rate": 2.3630672926447576e-05, | |
| "loss": 0.2141, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.487229862475442, | |
| "grad_norm": 0.6072602272033691, | |
| "learning_rate": 2.2652582159624414e-05, | |
| "loss": 0.2171, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.487229862475442, | |
| "eval_accuracy": 0.856791943011545, | |
| "eval_loss": 0.2283138483762741, | |
| "eval_runtime": 17.9011, | |
| "eval_samples_per_second": 682.247, | |
| "eval_steps_per_second": 42.679, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.552717747216765, | |
| "grad_norm": 0.3190251588821411, | |
| "learning_rate": 2.1674491392801252e-05, | |
| "loss": 0.2173, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.6182056319580878, | |
| "grad_norm": 0.4229159355163574, | |
| "learning_rate": 2.069640062597809e-05, | |
| "loss": 0.2267, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.6182056319580878, | |
| "eval_accuracy": 0.8599033816425121, | |
| "eval_loss": 0.22846245765686035, | |
| "eval_runtime": 17.8748, | |
| "eval_samples_per_second": 683.254, | |
| "eval_steps_per_second": 42.742, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.6836935166994107, | |
| "grad_norm": 0.6909223198890686, | |
| "learning_rate": 1.971830985915493e-05, | |
| "loss": 0.2287, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 2.749181401440733, | |
| "grad_norm": 0.530602216720581, | |
| "learning_rate": 1.874021909233177e-05, | |
| "loss": 0.2332, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 2.749181401440733, | |
| "eval_accuracy": 0.8573651027593547, | |
| "eval_loss": 0.22631122171878815, | |
| "eval_runtime": 17.8614, | |
| "eval_samples_per_second": 683.766, | |
| "eval_steps_per_second": 42.774, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 2.814669286182056, | |
| "grad_norm": 0.4919154942035675, | |
| "learning_rate": 1.776212832550861e-05, | |
| "loss": 0.2116, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 2.880157170923379, | |
| "grad_norm": 1.3828797340393066, | |
| "learning_rate": 1.6784037558685448e-05, | |
| "loss": 0.2218, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 2.880157170923379, | |
| "eval_accuracy": 0.8577745025792188, | |
| "eval_loss": 0.2293933629989624, | |
| "eval_runtime": 17.8541, | |
| "eval_samples_per_second": 684.044, | |
| "eval_steps_per_second": 42.791, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 2.945645055664702, | |
| "grad_norm": 0.24504908919334412, | |
| "learning_rate": 1.5805946791862286e-05, | |
| "loss": 0.2287, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.0104780615586115, | |
| "grad_norm": 0.5326477885246277, | |
| "learning_rate": 1.4827856025039124e-05, | |
| "loss": 0.2272, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.0104780615586115, | |
| "eval_accuracy": 0.8573651027593547, | |
| "eval_loss": 0.2295144498348236, | |
| "eval_runtime": 17.8719, | |
| "eval_samples_per_second": 683.363, | |
| "eval_steps_per_second": 42.749, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.0759659462999345, | |
| "grad_norm": 0.6676012277603149, | |
| "learning_rate": 1.3849765258215963e-05, | |
| "loss": 0.2126, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.1414538310412574, | |
| "grad_norm": 0.3665286898612976, | |
| "learning_rate": 1.2871674491392801e-05, | |
| "loss": 0.2173, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.1414538310412574, | |
| "eval_accuracy": 0.8602309014984033, | |
| "eval_loss": 0.23410645127296448, | |
| "eval_runtime": 17.8958, | |
| "eval_samples_per_second": 682.449, | |
| "eval_steps_per_second": 42.691, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.2069417157825804, | |
| "grad_norm": 0.44082948565483093, | |
| "learning_rate": 1.189358372456964e-05, | |
| "loss": 0.2117, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.2724296005239033, | |
| "grad_norm": 1.7528928518295288, | |
| "learning_rate": 1.0915492957746478e-05, | |
| "loss": 0.2176, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.2724296005239033, | |
| "eval_accuracy": 0.8612134610660771, | |
| "eval_loss": 0.22918100655078888, | |
| "eval_runtime": 17.8879, | |
| "eval_samples_per_second": 682.753, | |
| "eval_steps_per_second": 42.71, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.337917485265226, | |
| "grad_norm": 1.001876711845398, | |
| "learning_rate": 9.937402190923318e-06, | |
| "loss": 0.2141, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.4034053700065487, | |
| "grad_norm": 0.3455939292907715, | |
| "learning_rate": 8.959311424100156e-06, | |
| "loss": 0.2147, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.4034053700065487, | |
| "eval_accuracy": 0.8619503807418325, | |
| "eval_loss": 0.2289513647556305, | |
| "eval_runtime": 17.8583, | |
| "eval_samples_per_second": 683.883, | |
| "eval_steps_per_second": 42.781, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.4688932547478717, | |
| "grad_norm": 0.39306333661079407, | |
| "learning_rate": 7.981220657276996e-06, | |
| "loss": 0.2212, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 3.5343811394891946, | |
| "grad_norm": 0.29782700538635254, | |
| "learning_rate": 7.003129890453834e-06, | |
| "loss": 0.2225, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 3.5343811394891946, | |
| "eval_accuracy": 0.8574469827233276, | |
| "eval_loss": 0.2323383092880249, | |
| "eval_runtime": 17.8359, | |
| "eval_samples_per_second": 684.744, | |
| "eval_steps_per_second": 42.835, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 3.599869024230517, | |
| "grad_norm": 0.5452626943588257, | |
| "learning_rate": 6.025039123630673e-06, | |
| "loss": 0.2121, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 3.66535690897184, | |
| "grad_norm": 3.8590140342712402, | |
| "learning_rate": 5.046948356807512e-06, | |
| "loss": 0.2208, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 3.66535690897184, | |
| "eval_accuracy": 0.8591664619667567, | |
| "eval_loss": 0.23002713918685913, | |
| "eval_runtime": 17.874, | |
| "eval_samples_per_second": 683.285, | |
| "eval_steps_per_second": 42.744, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 3.730844793713163, | |
| "grad_norm": 0.6683679223060608, | |
| "learning_rate": 4.068857589984351e-06, | |
| "loss": 0.2273, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 3.796332678454486, | |
| "grad_norm": 0.24069799482822418, | |
| "learning_rate": 3.0907668231611893e-06, | |
| "loss": 0.2084, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 3.796332678454486, | |
| "eval_accuracy": 0.8621960206337509, | |
| "eval_loss": 0.22760269045829773, | |
| "eval_runtime": 17.8864, | |
| "eval_samples_per_second": 682.809, | |
| "eval_steps_per_second": 42.714, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 3.861820563195809, | |
| "grad_norm": 1.0248568058013916, | |
| "learning_rate": 2.112676056338028e-06, | |
| "loss": 0.2082, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 3.927308447937132, | |
| "grad_norm": 1.0928360223770142, | |
| "learning_rate": 1.1541471048513302e-06, | |
| "loss": 0.2159, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 3.927308447937132, | |
| "eval_accuracy": 0.8622779005977237, | |
| "eval_loss": 0.22847115993499756, | |
| "eval_runtime": 17.9258, | |
| "eval_samples_per_second": 681.307, | |
| "eval_steps_per_second": 42.62, | |
| "step": 3000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 3056, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6354063711332352.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |