Invalid JSON:
Unexpected token 'I', ..."ad_norm": Infinity,
"... is not valid JSON
| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.0, | |
| "eval_steps": 500, | |
| "global_step": 8072, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0004956015363647627, | |
| "grad_norm": Infinity, | |
| "learning_rate": 0.0, | |
| "loss": 4.2404, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.04956015363647627, | |
| "grad_norm": 3.30698561668396, | |
| "learning_rate": 4.9435973720094217e-05, | |
| "loss": 1.484, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.09912030727295254, | |
| "grad_norm": 2.357815980911255, | |
| "learning_rate": 4.8816164621296644e-05, | |
| "loss": 1.1277, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.14868046090942882, | |
| "grad_norm": 2.1948750019073486, | |
| "learning_rate": 4.819635552249907e-05, | |
| "loss": 1.0774, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.19824061454590508, | |
| "grad_norm": 2.270810842514038, | |
| "learning_rate": 4.75765464237015e-05, | |
| "loss": 1.0326, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.24780076818238136, | |
| "grad_norm": 2.18212628364563, | |
| "learning_rate": 4.6956737324903934e-05, | |
| "loss": 1.0031, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.24780076818238136, | |
| "eval_loss": 1.9913930892944336, | |
| "eval_runtime": 368.7305, | |
| "eval_samples_per_second": 6.571, | |
| "eval_steps_per_second": 3.287, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.29736092181885765, | |
| "grad_norm": 1.9237951040267944, | |
| "learning_rate": 4.633692822610636e-05, | |
| "loss": 0.9989, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.3469210754553339, | |
| "grad_norm": 2.1836133003234863, | |
| "learning_rate": 4.571711912730879e-05, | |
| "loss": 0.9794, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.39648122909181016, | |
| "grad_norm": 1.7673603296279907, | |
| "learning_rate": 4.509731002851122e-05, | |
| "loss": 0.959, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.4460413827282865, | |
| "grad_norm": 1.9361021518707275, | |
| "learning_rate": 4.447750092971365e-05, | |
| "loss": 0.9235, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.49560153636476273, | |
| "grad_norm": 1.8775012493133545, | |
| "learning_rate": 4.385769183091608e-05, | |
| "loss": 0.9235, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.49560153636476273, | |
| "eval_loss": 1.903882384300232, | |
| "eval_runtime": 361.7269, | |
| "eval_samples_per_second": 6.698, | |
| "eval_steps_per_second": 3.351, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.545161690001239, | |
| "grad_norm": 1.8856130838394165, | |
| "learning_rate": 4.3237882732118514e-05, | |
| "loss": 0.9073, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.5947218436377153, | |
| "grad_norm": 1.8579490184783936, | |
| "learning_rate": 4.2618073633320935e-05, | |
| "loss": 0.8897, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.6442819972741916, | |
| "grad_norm": 1.821420431137085, | |
| "learning_rate": 4.199826453452337e-05, | |
| "loss": 0.8924, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.6938421509106678, | |
| "grad_norm": 1.6502270698547363, | |
| "learning_rate": 4.13784554357258e-05, | |
| "loss": 0.8813, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.7434023045471441, | |
| "grad_norm": 2.301623821258545, | |
| "learning_rate": 4.075864633692823e-05, | |
| "loss": 0.8831, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.7434023045471441, | |
| "eval_loss": 1.8592430353164673, | |
| "eval_runtime": 356.2419, | |
| "eval_samples_per_second": 6.802, | |
| "eval_steps_per_second": 3.402, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.7929624581836203, | |
| "grad_norm": 1.7900115251541138, | |
| "learning_rate": 4.013883723813065e-05, | |
| "loss": 0.8828, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.8425226118200967, | |
| "grad_norm": 1.7043579816818237, | |
| "learning_rate": 3.951902813933309e-05, | |
| "loss": 0.8722, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.892082765456573, | |
| "grad_norm": 1.7436531782150269, | |
| "learning_rate": 3.8899219040535514e-05, | |
| "loss": 0.8537, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.9416429190930492, | |
| "grad_norm": 1.583708643913269, | |
| "learning_rate": 3.827940994173795e-05, | |
| "loss": 0.8487, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.9912030727295255, | |
| "grad_norm": 1.6228455305099487, | |
| "learning_rate": 3.7659600842940377e-05, | |
| "loss": 0.836, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.9912030727295255, | |
| "eval_loss": 1.8353278636932373, | |
| "eval_runtime": 357.9669, | |
| "eval_samples_per_second": 6.769, | |
| "eval_steps_per_second": 3.386, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.0406393259819104, | |
| "grad_norm": 1.728725552558899, | |
| "learning_rate": 3.7039791744142804e-05, | |
| "loss": 0.75, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.090199479618387, | |
| "grad_norm": 1.7833404541015625, | |
| "learning_rate": 3.641998264534524e-05, | |
| "loss": 0.7392, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.1397596332548632, | |
| "grad_norm": 1.4817216396331787, | |
| "learning_rate": 3.5800173546547666e-05, | |
| "loss": 0.7349, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.1893197868913394, | |
| "grad_norm": 1.362112045288086, | |
| "learning_rate": 3.5180364447750094e-05, | |
| "loss": 0.7279, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.2388799405278157, | |
| "grad_norm": 1.6990805864334106, | |
| "learning_rate": 3.456055534895252e-05, | |
| "loss": 0.7303, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.2388799405278157, | |
| "eval_loss": 1.9066349267959595, | |
| "eval_runtime": 361.0756, | |
| "eval_samples_per_second": 6.711, | |
| "eval_steps_per_second": 3.357, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.288440094164292, | |
| "grad_norm": 1.5519583225250244, | |
| "learning_rate": 3.3940746250154956e-05, | |
| "loss": 0.739, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.3380002478007682, | |
| "grad_norm": 1.9304168224334717, | |
| "learning_rate": 3.3320937151357384e-05, | |
| "loss": 0.7384, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.3875604014372445, | |
| "grad_norm": 2.0171093940734863, | |
| "learning_rate": 3.270112805255982e-05, | |
| "loss": 0.7314, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.4371205550737207, | |
| "grad_norm": 1.5190871953964233, | |
| "learning_rate": 3.208131895376224e-05, | |
| "loss": 0.723, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.486680708710197, | |
| "grad_norm": 1.839789867401123, | |
| "learning_rate": 3.1461509854964674e-05, | |
| "loss": 0.7402, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.486680708710197, | |
| "eval_loss": 1.939505696296692, | |
| "eval_runtime": 357.1918, | |
| "eval_samples_per_second": 6.783, | |
| "eval_steps_per_second": 3.393, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.5362408623466732, | |
| "grad_norm": 1.6048545837402344, | |
| "learning_rate": 3.08417007561671e-05, | |
| "loss": 0.722, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 1.5858010159831495, | |
| "grad_norm": 1.897549033164978, | |
| "learning_rate": 3.0221891657369533e-05, | |
| "loss": 0.7244, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 1.635361169619626, | |
| "grad_norm": 1.4337180852890015, | |
| "learning_rate": 2.960208255857196e-05, | |
| "loss": 0.719, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 1.684921323256102, | |
| "grad_norm": 1.6136891841888428, | |
| "learning_rate": 2.8982273459774388e-05, | |
| "loss": 0.7163, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 1.7344814768925785, | |
| "grad_norm": 1.6104755401611328, | |
| "learning_rate": 2.836246436097682e-05, | |
| "loss": 0.7196, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.7344814768925785, | |
| "eval_loss": 1.9235131740570068, | |
| "eval_runtime": 356.6603, | |
| "eval_samples_per_second": 6.794, | |
| "eval_steps_per_second": 3.398, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.7840416305290545, | |
| "grad_norm": 1.4487009048461914, | |
| "learning_rate": 2.774265526217925e-05, | |
| "loss": 0.7232, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 1.833601784165531, | |
| "grad_norm": 1.6850144863128662, | |
| "learning_rate": 2.712284616338168e-05, | |
| "loss": 0.7134, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 1.883161937802007, | |
| "grad_norm": 1.427085518836975, | |
| "learning_rate": 2.650303706458411e-05, | |
| "loss": 0.7205, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 1.9327220914384835, | |
| "grad_norm": 1.330480694770813, | |
| "learning_rate": 2.588322796578654e-05, | |
| "loss": 0.7072, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 1.9822822450749598, | |
| "grad_norm": 1.7128593921661377, | |
| "learning_rate": 2.526341886698897e-05, | |
| "loss": 0.709, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.9822822450749598, | |
| "eval_loss": 1.8779014348983765, | |
| "eval_runtime": 360.7502, | |
| "eval_samples_per_second": 6.717, | |
| "eval_steps_per_second": 3.36, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.031718498327345, | |
| "grad_norm": 1.6081246137619019, | |
| "learning_rate": 2.46436097681914e-05, | |
| "loss": 0.6167, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 2.081278651963821, | |
| "grad_norm": 2.1736507415771484, | |
| "learning_rate": 2.402380066939383e-05, | |
| "loss": 0.562, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 2.1308388056002974, | |
| "grad_norm": 1.9378325939178467, | |
| "learning_rate": 2.3403991570596257e-05, | |
| "loss": 0.5701, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 2.180398959236774, | |
| "grad_norm": 1.6395761966705322, | |
| "learning_rate": 2.278418247179869e-05, | |
| "loss": 0.5634, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 2.22995911287325, | |
| "grad_norm": 1.547757863998413, | |
| "learning_rate": 2.2164373373001116e-05, | |
| "loss": 0.5664, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.22995911287325, | |
| "eval_loss": 2.0903470516204834, | |
| "eval_runtime": 360.0829, | |
| "eval_samples_per_second": 6.729, | |
| "eval_steps_per_second": 3.366, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.2795192665097264, | |
| "grad_norm": 2.287740468978882, | |
| "learning_rate": 2.1544564274203547e-05, | |
| "loss": 0.5761, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 2.3290794201462024, | |
| "grad_norm": 1.8061494827270508, | |
| "learning_rate": 2.092475517540598e-05, | |
| "loss": 0.5847, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 2.378639573782679, | |
| "grad_norm": 1.8823641538619995, | |
| "learning_rate": 2.0304946076608406e-05, | |
| "loss": 0.5769, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 2.428199727419155, | |
| "grad_norm": 1.9263031482696533, | |
| "learning_rate": 1.9685136977810837e-05, | |
| "loss": 0.5767, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 2.4777598810556314, | |
| "grad_norm": 1.8592733144760132, | |
| "learning_rate": 1.9065327879013265e-05, | |
| "loss": 0.5676, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.4777598810556314, | |
| "eval_loss": 2.1367952823638916, | |
| "eval_runtime": 358.8557, | |
| "eval_samples_per_second": 6.752, | |
| "eval_steps_per_second": 3.377, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.5273200346921074, | |
| "grad_norm": 1.823480248451233, | |
| "learning_rate": 1.8445518780215696e-05, | |
| "loss": 0.5719, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 2.576880188328584, | |
| "grad_norm": 1.9600774049758911, | |
| "learning_rate": 1.7825709681418124e-05, | |
| "loss": 0.5679, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 2.62644034196506, | |
| "grad_norm": 1.8784770965576172, | |
| "learning_rate": 1.7205900582620555e-05, | |
| "loss": 0.5743, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 2.6760004956015364, | |
| "grad_norm": 1.8293484449386597, | |
| "learning_rate": 1.6586091483822982e-05, | |
| "loss": 0.5593, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 2.7255606492380124, | |
| "grad_norm": 2.11364483833313, | |
| "learning_rate": 1.5966282385025413e-05, | |
| "loss": 0.5734, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.7255606492380124, | |
| "eval_loss": 2.2089123725891113, | |
| "eval_runtime": 357.9895, | |
| "eval_samples_per_second": 6.768, | |
| "eval_steps_per_second": 3.386, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.775120802874489, | |
| "grad_norm": 1.7178990840911865, | |
| "learning_rate": 1.534647328622784e-05, | |
| "loss": 0.5628, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 2.8246809565109654, | |
| "grad_norm": 2.3309977054595947, | |
| "learning_rate": 1.4726664187430272e-05, | |
| "loss": 0.5703, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 2.8742411101474414, | |
| "grad_norm": 1.5812301635742188, | |
| "learning_rate": 1.41068550886327e-05, | |
| "loss": 0.5684, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 2.9238012637839175, | |
| "grad_norm": 1.6736018657684326, | |
| "learning_rate": 1.3487045989835131e-05, | |
| "loss": 0.5672, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 2.973361417420394, | |
| "grad_norm": 1.706903338432312, | |
| "learning_rate": 1.2867236891037562e-05, | |
| "loss": 0.5599, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.973361417420394, | |
| "eval_loss": 2.1762378215789795, | |
| "eval_runtime": 358.2691, | |
| "eval_samples_per_second": 6.763, | |
| "eval_steps_per_second": 3.383, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 3.0227976706727793, | |
| "grad_norm": 2.0913047790527344, | |
| "learning_rate": 1.224742779223999e-05, | |
| "loss": 0.4929, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 3.0723578243092553, | |
| "grad_norm": 1.9073973894119263, | |
| "learning_rate": 1.162761869344242e-05, | |
| "loss": 0.3932, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 3.121917977945732, | |
| "grad_norm": 2.258511781692505, | |
| "learning_rate": 1.100780959464485e-05, | |
| "loss": 0.3935, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 3.171478131582208, | |
| "grad_norm": 2.307436227798462, | |
| "learning_rate": 1.038800049584728e-05, | |
| "loss": 0.3901, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 3.2210382852186843, | |
| "grad_norm": 1.9356776475906372, | |
| "learning_rate": 9.768191397049709e-06, | |
| "loss": 0.397, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.2210382852186843, | |
| "eval_loss": 2.597865343093872, | |
| "eval_runtime": 361.0516, | |
| "eval_samples_per_second": 6.711, | |
| "eval_steps_per_second": 3.357, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.2705984388551603, | |
| "grad_norm": 2.4313840866088867, | |
| "learning_rate": 9.14838229825214e-06, | |
| "loss": 0.3951, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 3.320158592491637, | |
| "grad_norm": 2.332550048828125, | |
| "learning_rate": 8.52857319945457e-06, | |
| "loss": 0.3919, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 3.369718746128113, | |
| "grad_norm": 2.176802396774292, | |
| "learning_rate": 7.908764100656999e-06, | |
| "loss": 0.399, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 3.4192788997645893, | |
| "grad_norm": 2.2205862998962402, | |
| "learning_rate": 7.288955001859428e-06, | |
| "loss": 0.3926, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 3.4688390534010654, | |
| "grad_norm": 2.149296760559082, | |
| "learning_rate": 6.6691459030618575e-06, | |
| "loss": 0.3841, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.4688390534010654, | |
| "eval_loss": 2.627171754837036, | |
| "eval_runtime": 359.3677, | |
| "eval_samples_per_second": 6.742, | |
| "eval_steps_per_second": 3.373, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.518399207037542, | |
| "grad_norm": 2.374778985977173, | |
| "learning_rate": 6.049336804264287e-06, | |
| "loss": 0.3836, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 3.567959360674018, | |
| "grad_norm": 2.034752607345581, | |
| "learning_rate": 5.429527705466716e-06, | |
| "loss": 0.3945, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 3.6175195143104943, | |
| "grad_norm": 2.6508090496063232, | |
| "learning_rate": 4.809718606669146e-06, | |
| "loss": 0.3958, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 3.667079667946971, | |
| "grad_norm": 2.4304332733154297, | |
| "learning_rate": 4.189909507871576e-06, | |
| "loss": 0.3853, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 3.716639821583447, | |
| "grad_norm": 2.457920551300049, | |
| "learning_rate": 3.570100409074005e-06, | |
| "loss": 0.3815, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 3.716639821583447, | |
| "eval_loss": 2.6776695251464844, | |
| "eval_runtime": 358.5716, | |
| "eval_samples_per_second": 6.757, | |
| "eval_steps_per_second": 3.38, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 3.7661999752199233, | |
| "grad_norm": 2.602248191833496, | |
| "learning_rate": 2.950291310276435e-06, | |
| "loss": 0.3863, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 3.8157601288563994, | |
| "grad_norm": 2.3076493740081787, | |
| "learning_rate": 2.330482211478865e-06, | |
| "loss": 0.3882, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 3.865320282492876, | |
| "grad_norm": 2.247474193572998, | |
| "learning_rate": 1.7106731126812943e-06, | |
| "loss": 0.384, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 3.914880436129352, | |
| "grad_norm": 2.400883913040161, | |
| "learning_rate": 1.0908640138837239e-06, | |
| "loss": 0.377, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 3.9644405897658284, | |
| "grad_norm": 2.1551015377044678, | |
| "learning_rate": 4.710549150861535e-07, | |
| "loss": 0.3834, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.9644405897658284, | |
| "eval_loss": 2.66070818901062, | |
| "eval_runtime": 356.8712, | |
| "eval_samples_per_second": 6.79, | |
| "eval_steps_per_second": 3.396, | |
| "step": 8000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 8072, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.6417343971328e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |