Invalid JSON: Unexpected token 'N', ..."labeled": NaN,
"... is not valid JSON
| { | |
| "best_metric": 0.6467730402946472, | |
| "best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGB-b0_2/checkpoint-280", | |
| "epoch": 93.33333333333333, | |
| "global_step": 280, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 5.555555555555555e-07, | |
| "loss": 1.1937, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 1.111111111111111e-06, | |
| "loss": 1.1971, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 1.6666666666666667e-06, | |
| "loss": 1.202, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 2.222222222222222e-06, | |
| "loss": 1.1957, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 2.7777777777777783e-06, | |
| "loss": 1.1958, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 1.2, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "learning_rate": 3.88888888888889e-06, | |
| "loss": 1.1924, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 4.444444444444444e-06, | |
| "loss": 1.193, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 5e-06, | |
| "loss": 1.1783, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "learning_rate": 5.555555555555557e-06, | |
| "loss": 1.1857, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "eval_accuracy_dropoff": 0.31831966365704045, | |
| "eval_accuracy_undropoff": 0.0893867354682692, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.16634351607849257, | |
| "eval_iou_undropoff": 0.08927195204529292, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 1.1215094327926636, | |
| "eval_mean_accuracy": 0.20385319956265482, | |
| "eval_mean_iou": 0.08520515604126183, | |
| "eval_overall_accuracy": 0.09952341715494792, | |
| "eval_runtime": 1.7685, | |
| "eval_samples_per_second": 8.482, | |
| "eval_steps_per_second": 0.565, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 3.67, | |
| "learning_rate": 6.111111111111112e-06, | |
| "loss": 1.186, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 1.1807, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 4.33, | |
| "learning_rate": 7.222222222222223e-06, | |
| "loss": 1.1837, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 4.67, | |
| "learning_rate": 7.77777777777778e-06, | |
| "loss": 1.1805, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 1.1748, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 5.33, | |
| "learning_rate": 8.888888888888888e-06, | |
| "loss": 1.1743, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 5.67, | |
| "learning_rate": 9.444444444444445e-06, | |
| "loss": 1.1706, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "learning_rate": 1e-05, | |
| "loss": 1.1773, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 6.33, | |
| "learning_rate": 9.97076023391813e-06, | |
| "loss": 1.1652, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 6.67, | |
| "learning_rate": 9.941520467836257e-06, | |
| "loss": 1.1597, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 6.67, | |
| "eval_accuracy_dropoff": 0.4929928550095343, | |
| "eval_accuracy_undropoff": 0.22057730973387277, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.21076229880541675, | |
| "eval_iou_undropoff": 0.22033540146160016, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 1.1164543628692627, | |
| "eval_mean_accuracy": 0.35678508237170353, | |
| "eval_mean_iou": 0.14369923342233895, | |
| "eval_overall_accuracy": 0.23263931274414062, | |
| "eval_runtime": 1.6559, | |
| "eval_samples_per_second": 9.058, | |
| "eval_steps_per_second": 0.604, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "learning_rate": 9.912280701754386e-06, | |
| "loss": 1.1697, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 7.33, | |
| "learning_rate": 9.883040935672515e-06, | |
| "loss": 1.1548, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 7.67, | |
| "learning_rate": 9.853801169590644e-06, | |
| "loss": 1.153, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 9.824561403508772e-06, | |
| "loss": 1.1425, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 8.33, | |
| "learning_rate": 9.795321637426901e-06, | |
| "loss": 1.1488, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 8.67, | |
| "learning_rate": 9.76608187134503e-06, | |
| "loss": 1.1368, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "learning_rate": 9.736842105263159e-06, | |
| "loss": 1.1341, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 9.33, | |
| "learning_rate": 9.707602339181286e-06, | |
| "loss": 1.1331, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 9.67, | |
| "learning_rate": 9.678362573099415e-06, | |
| "loss": 1.1291, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "learning_rate": 9.649122807017545e-06, | |
| "loss": 1.1528, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy_dropoff": 0.6977680520136926, | |
| "eval_accuracy_undropoff": 0.33014258450920847, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.25172809627609755, | |
| "eval_iou_undropoff": 0.32973099677549067, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 1.1040031909942627, | |
| "eval_mean_accuracy": 0.5139553182614506, | |
| "eval_mean_iou": 0.19381969768386273, | |
| "eval_overall_accuracy": 0.3464202880859375, | |
| "eval_runtime": 1.7994, | |
| "eval_samples_per_second": 8.336, | |
| "eval_steps_per_second": 0.556, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 10.33, | |
| "learning_rate": 9.619883040935674e-06, | |
| "loss": 1.1212, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 10.67, | |
| "learning_rate": 9.590643274853801e-06, | |
| "loss": 1.1221, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "learning_rate": 9.56140350877193e-06, | |
| "loss": 1.1172, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 11.33, | |
| "learning_rate": 9.532163742690059e-06, | |
| "loss": 1.1268, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 11.67, | |
| "learning_rate": 9.502923976608188e-06, | |
| "loss": 1.1014, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "learning_rate": 9.473684210526315e-06, | |
| "loss": 1.0921, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 12.33, | |
| "learning_rate": 9.444444444444445e-06, | |
| "loss": 1.0968, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 12.67, | |
| "learning_rate": 9.415204678362574e-06, | |
| "loss": 1.0976, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "learning_rate": 9.385964912280703e-06, | |
| "loss": 1.147, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 13.33, | |
| "learning_rate": 9.35672514619883e-06, | |
| "loss": 1.0852, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 13.33, | |
| "eval_accuracy_dropoff": 0.8560548624991384, | |
| "eval_accuracy_undropoff": 0.4017632007220762, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.2716624988152609, | |
| "eval_iou_undropoff": 0.40109059881190773, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 1.0895884037017822, | |
| "eval_mean_accuracy": 0.6289090316106073, | |
| "eval_mean_iou": 0.22425103254238954, | |
| "eval_overall_accuracy": 0.42187830607096355, | |
| "eval_runtime": 1.8264, | |
| "eval_samples_per_second": 8.213, | |
| "eval_steps_per_second": 0.548, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 13.67, | |
| "learning_rate": 9.327485380116959e-06, | |
| "loss": 1.0899, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "learning_rate": 9.298245614035088e-06, | |
| "loss": 1.1058, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 14.33, | |
| "learning_rate": 9.269005847953217e-06, | |
| "loss": 1.0853, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 14.67, | |
| "learning_rate": 9.239766081871345e-06, | |
| "loss": 1.0757, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "learning_rate": 9.210526315789474e-06, | |
| "loss": 1.0139, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 15.33, | |
| "learning_rate": 9.181286549707603e-06, | |
| "loss": 1.0763, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 15.67, | |
| "learning_rate": 9.152046783625732e-06, | |
| "loss": 1.0481, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "learning_rate": 9.12280701754386e-06, | |
| "loss": 1.0597, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 16.33, | |
| "learning_rate": 9.093567251461988e-06, | |
| "loss": 1.0762, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 16.67, | |
| "learning_rate": 9.064327485380117e-06, | |
| "loss": 1.0388, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 16.67, | |
| "eval_accuracy_dropoff": 0.8120074896041537, | |
| "eval_accuracy_undropoff": 0.5376623314419279, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.27435117229495026, | |
| "eval_iou_undropoff": 0.535722324991562, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 1.0510833263397217, | |
| "eval_mean_accuracy": 0.6748349105230408, | |
| "eval_mean_iou": 0.2700244990955041, | |
| "eval_overall_accuracy": 0.5498097737630209, | |
| "eval_runtime": 1.8323, | |
| "eval_samples_per_second": 8.186, | |
| "eval_steps_per_second": 0.546, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "learning_rate": 9.035087719298246e-06, | |
| "loss": 0.9925, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 17.33, | |
| "learning_rate": 9.005847953216374e-06, | |
| "loss": 1.0352, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 17.67, | |
| "learning_rate": 8.976608187134503e-06, | |
| "loss": 1.04, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "learning_rate": 8.947368421052632e-06, | |
| "loss": 1.0001, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 18.33, | |
| "learning_rate": 8.918128654970761e-06, | |
| "loss": 1.0212, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 18.67, | |
| "learning_rate": 8.888888888888888e-06, | |
| "loss": 1.0407, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "learning_rate": 8.859649122807017e-06, | |
| "loss": 1.0845, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 19.33, | |
| "learning_rate": 8.830409356725146e-06, | |
| "loss": 0.9858, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 19.67, | |
| "learning_rate": 8.801169590643275e-06, | |
| "loss": 1.024, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "learning_rate": 8.771929824561405e-06, | |
| "loss": 1.0426, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_accuracy_dropoff": 0.694913501964298, | |
| "eval_accuracy_undropoff": 0.6625187730238964, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.28570551480832534, | |
| "eval_iou_undropoff": 0.6582500159289429, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 1.008933186531067, | |
| "eval_mean_accuracy": 0.6787161374940972, | |
| "eval_mean_iou": 0.31465184357908943, | |
| "eval_overall_accuracy": 0.6639531453450521, | |
| "eval_runtime": 1.7982, | |
| "eval_samples_per_second": 8.342, | |
| "eval_steps_per_second": 0.556, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 20.33, | |
| "learning_rate": 8.742690058479532e-06, | |
| "loss": 0.9873, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 20.67, | |
| "learning_rate": 8.713450292397661e-06, | |
| "loss": 0.9929, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 21.0, | |
| "learning_rate": 8.68421052631579e-06, | |
| "loss": 1.082, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 21.33, | |
| "learning_rate": 8.654970760233919e-06, | |
| "loss": 0.9899, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 21.67, | |
| "learning_rate": 8.625730994152046e-06, | |
| "loss": 1.0285, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "learning_rate": 8.596491228070176e-06, | |
| "loss": 1.0342, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 22.33, | |
| "learning_rate": 8.567251461988305e-06, | |
| "loss": 1.0056, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 22.67, | |
| "learning_rate": 8.538011695906434e-06, | |
| "loss": 0.9658, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 23.0, | |
| "learning_rate": 8.508771929824563e-06, | |
| "loss": 1.0663, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 23.33, | |
| "learning_rate": 8.47953216374269e-06, | |
| "loss": 0.9621, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 23.33, | |
| "eval_accuracy_dropoff": 0.6694637811013854, | |
| "eval_accuracy_undropoff": 0.7424394340472138, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.2759700824650003, | |
| "eval_iou_undropoff": 0.7360936036618343, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.9921492338180542, | |
| "eval_mean_accuracy": 0.7059516075742995, | |
| "eval_mean_iou": 0.3373545620422782, | |
| "eval_overall_accuracy": 0.7392082214355469, | |
| "eval_runtime": 1.8437, | |
| "eval_samples_per_second": 8.136, | |
| "eval_steps_per_second": 0.542, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 23.67, | |
| "learning_rate": 8.45029239766082e-06, | |
| "loss": 0.9579, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "learning_rate": 8.421052631578948e-06, | |
| "loss": 1.0001, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 24.33, | |
| "learning_rate": 8.391812865497077e-06, | |
| "loss": 0.9568, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 24.67, | |
| "learning_rate": 8.362573099415205e-06, | |
| "loss": 0.9753, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 1.0714, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 25.33, | |
| "learning_rate": 8.304093567251463e-06, | |
| "loss": 0.9381, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 25.67, | |
| "learning_rate": 8.274853801169592e-06, | |
| "loss": 0.9421, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 26.0, | |
| "learning_rate": 8.24561403508772e-06, | |
| "loss": 1.0552, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 26.33, | |
| "learning_rate": 8.216374269005848e-06, | |
| "loss": 0.9717, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 26.67, | |
| "learning_rate": 8.187134502923977e-06, | |
| "loss": 0.925, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 26.67, | |
| "eval_accuracy_dropoff": 0.6007420681416132, | |
| "eval_accuracy_undropoff": 0.8054300472691703, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.28071917615199427, | |
| "eval_iou_undropoff": 0.7965033143254118, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.9463671445846558, | |
| "eval_mean_accuracy": 0.7030860577053918, | |
| "eval_mean_iou": 0.35907416349246873, | |
| "eval_overall_accuracy": 0.7963668823242187, | |
| "eval_runtime": 1.7718, | |
| "eval_samples_per_second": 8.466, | |
| "eval_steps_per_second": 0.564, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 27.0, | |
| "learning_rate": 8.157894736842106e-06, | |
| "loss": 0.9959, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 27.33, | |
| "learning_rate": 8.128654970760235e-06, | |
| "loss": 0.9623, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 27.67, | |
| "learning_rate": 8.099415204678363e-06, | |
| "loss": 0.9523, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "learning_rate": 8.070175438596492e-06, | |
| "loss": 0.8517, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 28.33, | |
| "learning_rate": 8.040935672514621e-06, | |
| "loss": 0.931, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 28.67, | |
| "learning_rate": 8.01169590643275e-06, | |
| "loss": 0.8751, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 29.0, | |
| "learning_rate": 7.982456140350877e-06, | |
| "loss": 1.0695, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 29.33, | |
| "learning_rate": 7.953216374269006e-06, | |
| "loss": 0.9203, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 29.67, | |
| "learning_rate": 7.923976608187136e-06, | |
| "loss": 0.8954, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "learning_rate": 7.894736842105265e-06, | |
| "loss": 0.8872, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "eval_accuracy_dropoff": 0.5316240494405771, | |
| "eval_accuracy_undropoff": 0.8831264176227471, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.28878523626039343, | |
| "eval_iou_undropoff": 0.868586008291627, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.8992863297462463, | |
| "eval_mean_accuracy": 0.7073752335316621, | |
| "eval_mean_iou": 0.38579041485067345, | |
| "eval_overall_accuracy": 0.8675626118977865, | |
| "eval_runtime": 1.7927, | |
| "eval_samples_per_second": 8.367, | |
| "eval_steps_per_second": 0.558, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 30.33, | |
| "learning_rate": 7.865497076023394e-06, | |
| "loss": 0.8853, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 30.67, | |
| "learning_rate": 7.836257309941521e-06, | |
| "loss": 0.9393, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 31.0, | |
| "learning_rate": 7.80701754385965e-06, | |
| "loss": 1.0412, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 31.33, | |
| "learning_rate": 7.77777777777778e-06, | |
| "loss": 0.884, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 31.67, | |
| "learning_rate": 7.748538011695908e-06, | |
| "loss": 0.8976, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 32.0, | |
| "learning_rate": 7.719298245614036e-06, | |
| "loss": 0.94, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 32.33, | |
| "learning_rate": 7.690058479532165e-06, | |
| "loss": 0.9039, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 32.67, | |
| "learning_rate": 7.660818713450294e-06, | |
| "loss": 0.8703, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 33.0, | |
| "learning_rate": 7.631578947368423e-06, | |
| "loss": 1.0682, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 33.33, | |
| "learning_rate": 7.60233918128655e-06, | |
| "loss": 0.8751, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 33.33, | |
| "eval_accuracy_dropoff": 0.5378500700714499, | |
| "eval_accuracy_undropoff": 0.897618234127681, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.28663781229147406, | |
| "eval_iou_undropoff": 0.8821925202496806, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.8973800539970398, | |
| "eval_mean_accuracy": 0.7177341520995655, | |
| "eval_mean_iou": 0.38961011084705155, | |
| "eval_overall_accuracy": 0.8816884358723959, | |
| "eval_runtime": 1.8938, | |
| "eval_samples_per_second": 7.921, | |
| "eval_steps_per_second": 0.528, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 33.67, | |
| "learning_rate": 7.573099415204679e-06, | |
| "loss": 0.8741, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 34.0, | |
| "learning_rate": 7.5438596491228074e-06, | |
| "loss": 1.0176, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 34.33, | |
| "learning_rate": 7.5146198830409365e-06, | |
| "loss": 0.8424, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 34.67, | |
| "learning_rate": 7.485380116959065e-06, | |
| "loss": 0.8853, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 35.0, | |
| "learning_rate": 7.456140350877194e-06, | |
| "loss": 0.7895, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 35.33, | |
| "learning_rate": 7.426900584795322e-06, | |
| "loss": 0.8757, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 35.67, | |
| "learning_rate": 7.397660818713451e-06, | |
| "loss": 0.8167, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 36.0, | |
| "learning_rate": 7.368421052631579e-06, | |
| "loss": 0.9893, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 36.33, | |
| "learning_rate": 7.339181286549708e-06, | |
| "loss": 0.8695, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 36.67, | |
| "learning_rate": 7.309941520467837e-06, | |
| "loss": 0.8571, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 36.67, | |
| "eval_accuracy_dropoff": 0.5011085073632457, | |
| "eval_accuracy_undropoff": 0.9312282001419885, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.2952840916782867, | |
| "eval_iou_undropoff": 0.9131370144920805, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.8501250743865967, | |
| "eval_mean_accuracy": 0.7161683537526171, | |
| "eval_mean_iou": 0.40280703539012236, | |
| "eval_overall_accuracy": 0.9121833801269531, | |
| "eval_runtime": 1.8553, | |
| "eval_samples_per_second": 8.085, | |
| "eval_steps_per_second": 0.539, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 37.0, | |
| "learning_rate": 7.280701754385966e-06, | |
| "loss": 0.9179, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 37.33, | |
| "learning_rate": 7.251461988304094e-06, | |
| "loss": 0.8797, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 37.67, | |
| "learning_rate": 7.222222222222223e-06, | |
| "loss": 0.8253, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 38.0, | |
| "learning_rate": 7.192982456140352e-06, | |
| "loss": 1.0202, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 38.33, | |
| "learning_rate": 7.16374269005848e-06, | |
| "loss": 0.8197, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 38.67, | |
| "learning_rate": 7.134502923976608e-06, | |
| "loss": 0.9141, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 39.0, | |
| "learning_rate": 7.1052631578947375e-06, | |
| "loss": 0.8941, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 39.33, | |
| "learning_rate": 7.0760233918128665e-06, | |
| "loss": 0.8099, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 39.67, | |
| "learning_rate": 7.046783625730995e-06, | |
| "loss": 0.814, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 40.0, | |
| "learning_rate": 7.017543859649123e-06, | |
| "loss": 0.8866, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 40.0, | |
| "eval_accuracy_dropoff": 0.5032278815447883, | |
| "eval_accuracy_undropoff": 0.9447830950715956, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.29626824286854314, | |
| "eval_iou_undropoff": 0.9253514754358129, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.8434326648712158, | |
| "eval_mean_accuracy": 0.7240054883081919, | |
| "eval_mean_iou": 0.40720657276811867, | |
| "eval_overall_accuracy": 0.92523193359375, | |
| "eval_runtime": 1.7709, | |
| "eval_samples_per_second": 8.47, | |
| "eval_steps_per_second": 0.565, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 40.33, | |
| "learning_rate": 6.988304093567252e-06, | |
| "loss": 0.8118, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 40.67, | |
| "learning_rate": 6.959064327485381e-06, | |
| "loss": 0.8094, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 41.0, | |
| "learning_rate": 6.92982456140351e-06, | |
| "loss": 1.0206, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 41.33, | |
| "learning_rate": 6.9005847953216375e-06, | |
| "loss": 0.8131, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 41.67, | |
| "learning_rate": 6.871345029239767e-06, | |
| "loss": 0.8135, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 42.0, | |
| "learning_rate": 6.842105263157896e-06, | |
| "loss": 0.8002, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 42.33, | |
| "learning_rate": 6.812865497076025e-06, | |
| "loss": 0.7738, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 42.67, | |
| "learning_rate": 6.783625730994152e-06, | |
| "loss": 0.8152, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 43.0, | |
| "learning_rate": 6.754385964912281e-06, | |
| "loss": 0.9834, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 43.33, | |
| "learning_rate": 6.72514619883041e-06, | |
| "loss": 0.8127, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 43.33, | |
| "eval_accuracy_dropoff": 0.4548154019344315, | |
| "eval_accuracy_undropoff": 0.9628874214619701, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.30249561653150175, | |
| "eval_iou_undropoff": 0.9401739536203356, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.7922199964523315, | |
| "eval_mean_accuracy": 0.7088514116982008, | |
| "eval_mean_iou": 0.4142231900506124, | |
| "eval_overall_accuracy": 0.9403910319010417, | |
| "eval_runtime": 1.7189, | |
| "eval_samples_per_second": 8.727, | |
| "eval_steps_per_second": 0.582, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 43.67, | |
| "learning_rate": 6.695906432748539e-06, | |
| "loss": 0.7528, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 44.0, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.9918, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 44.33, | |
| "learning_rate": 6.637426900584796e-06, | |
| "loss": 0.7708, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 44.67, | |
| "learning_rate": 6.608187134502925e-06, | |
| "loss": 0.8658, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 45.0, | |
| "learning_rate": 6.578947368421054e-06, | |
| "loss": 0.8974, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 45.33, | |
| "learning_rate": 6.549707602339181e-06, | |
| "loss": 0.7966, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 45.67, | |
| "learning_rate": 6.52046783625731e-06, | |
| "loss": 0.7828, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 46.0, | |
| "learning_rate": 6.491228070175439e-06, | |
| "loss": 0.7661, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 46.33, | |
| "learning_rate": 6.461988304093568e-06, | |
| "loss": 0.7771, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 46.67, | |
| "learning_rate": 6.432748538011696e-06, | |
| "loss": 0.8062, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 46.67, | |
| "eval_accuracy_dropoff": 0.4548154019344315, | |
| "eval_accuracy_undropoff": 0.9657854654485888, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.29428794410584214, | |
| "eval_iou_undropoff": 0.9424776486046446, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.7916517853736877, | |
| "eval_mean_accuracy": 0.7103004336915102, | |
| "eval_mean_iou": 0.4122551975701622, | |
| "eval_overall_accuracy": 0.9431607564290364, | |
| "eval_runtime": 1.8278, | |
| "eval_samples_per_second": 8.207, | |
| "eval_steps_per_second": 0.547, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 47.0, | |
| "learning_rate": 6.403508771929825e-06, | |
| "loss": 0.9571, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 47.33, | |
| "learning_rate": 6.374269005847954e-06, | |
| "loss": 0.7728, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 47.67, | |
| "learning_rate": 6.345029239766083e-06, | |
| "loss": 0.7592, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 48.0, | |
| "learning_rate": 6.31578947368421e-06, | |
| "loss": 0.9889, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 48.33, | |
| "learning_rate": 6.286549707602339e-06, | |
| "loss": 0.7768, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 48.67, | |
| "learning_rate": 6.2573099415204685e-06, | |
| "loss": 0.7516, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 49.0, | |
| "learning_rate": 6.2280701754385975e-06, | |
| "loss": 0.9547, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 49.33, | |
| "learning_rate": 6.198830409356725e-06, | |
| "loss": 0.7487, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 49.67, | |
| "learning_rate": 6.169590643274854e-06, | |
| "loss": 0.7782, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 50.0, | |
| "learning_rate": 6.140350877192983e-06, | |
| "loss": 0.7512, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 50.0, | |
| "eval_accuracy_dropoff": 0.4404392675810416, | |
| "eval_accuracy_undropoff": 0.9713423337409913, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.2954794161618964, | |
| "eval_iou_undropoff": 0.9470340351686451, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.7646103501319885, | |
| "eval_mean_accuracy": 0.7058908006610165, | |
| "eval_mean_iou": 0.4141711504435139, | |
| "eval_overall_accuracy": 0.9478350321451823, | |
| "eval_runtime": 1.7304, | |
| "eval_samples_per_second": 8.669, | |
| "eval_steps_per_second": 0.578, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 50.33, | |
| "learning_rate": 6.111111111111112e-06, | |
| "loss": 0.7721, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 50.67, | |
| "learning_rate": 6.08187134502924e-06, | |
| "loss": 0.7395, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 51.0, | |
| "learning_rate": 6.0526315789473685e-06, | |
| "loss": 0.769, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 51.33, | |
| "learning_rate": 6.023391812865498e-06, | |
| "loss": 0.7787, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 51.67, | |
| "learning_rate": 5.994152046783627e-06, | |
| "loss": 0.706, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 52.0, | |
| "learning_rate": 5.964912280701755e-06, | |
| "loss": 0.9423, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 52.33, | |
| "learning_rate": 5.935672514619883e-06, | |
| "loss": 0.7311, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 52.67, | |
| "learning_rate": 5.906432748538012e-06, | |
| "loss": 0.7922, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 53.0, | |
| "learning_rate": 5.877192982456141e-06, | |
| "loss": 0.7217, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 53.33, | |
| "learning_rate": 5.847953216374269e-06, | |
| "loss": 0.7554, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 53.33, | |
| "eval_accuracy_dropoff": 0.42476508833597537, | |
| "eval_accuracy_undropoff": 0.9753952313592255, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.2981335160848182, | |
| "eval_iou_undropoff": 0.9502212896939117, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.7497116923332214, | |
| "eval_mean_accuracy": 0.7000801598476004, | |
| "eval_mean_iou": 0.41611826859290996, | |
| "eval_overall_accuracy": 0.9510144551595052, | |
| "eval_runtime": 1.7881, | |
| "eval_samples_per_second": 8.389, | |
| "eval_steps_per_second": 0.559, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 53.67, | |
| "learning_rate": 5.8187134502923985e-06, | |
| "loss": 0.7207, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 54.0, | |
| "learning_rate": 5.789473684210527e-06, | |
| "loss": 0.9743, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 54.33, | |
| "learning_rate": 5.760233918128656e-06, | |
| "loss": 0.7271, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 54.67, | |
| "learning_rate": 5.730994152046784e-06, | |
| "loss": 0.7114, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 55.0, | |
| "learning_rate": 5.701754385964913e-06, | |
| "loss": 0.7564, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 55.33, | |
| "learning_rate": 5.672514619883041e-06, | |
| "loss": 0.7265, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 55.67, | |
| "learning_rate": 5.64327485380117e-06, | |
| "loss": 0.7082, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 56.0, | |
| "learning_rate": 5.6140350877192985e-06, | |
| "loss": 0.9715, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 56.33, | |
| "learning_rate": 5.584795321637428e-06, | |
| "loss": 0.6921, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 56.67, | |
| "learning_rate": 5.555555555555557e-06, | |
| "loss": 0.7468, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 56.67, | |
| "eval_accuracy_dropoff": 0.4195384474004641, | |
| "eval_accuracy_undropoff": 0.9781964166541602, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.3004878891594813, | |
| "eval_iou_undropoff": 0.9526777819357527, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.732612669467926, | |
| "eval_mean_accuracy": 0.6988674320273122, | |
| "eval_mean_iou": 0.41772189036507806, | |
| "eval_overall_accuracy": 0.9534601847330729, | |
| "eval_runtime": 1.8858, | |
| "eval_samples_per_second": 7.954, | |
| "eval_steps_per_second": 0.53, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 57.0, | |
| "learning_rate": 5.526315789473685e-06, | |
| "loss": 0.9868, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 57.33, | |
| "learning_rate": 5.497076023391813e-06, | |
| "loss": 0.7203, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 57.67, | |
| "learning_rate": 5.467836257309942e-06, | |
| "loss": 0.6981, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 58.0, | |
| "learning_rate": 5.438596491228071e-06, | |
| "loss": 0.9637, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 58.33, | |
| "learning_rate": 5.4093567251461994e-06, | |
| "loss": 0.6928, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 58.67, | |
| "learning_rate": 5.380116959064328e-06, | |
| "loss": 0.7607, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 59.0, | |
| "learning_rate": 5.350877192982457e-06, | |
| "loss": 0.6925, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 59.33, | |
| "learning_rate": 5.321637426900586e-06, | |
| "loss": 0.7149, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 59.67, | |
| "learning_rate": 5.292397660818714e-06, | |
| "loss": 0.7041, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 60.0, | |
| "learning_rate": 5.263157894736842e-06, | |
| "loss": 0.6506, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 60.0, | |
| "eval_accuracy_dropoff": 0.4196188572610104, | |
| "eval_accuracy_undropoff": 0.978872032638186, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.2986815478015576, | |
| "eval_iou_undropoff": 0.9532789523356896, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.718389630317688, | |
| "eval_mean_accuracy": 0.6992454449495982, | |
| "eval_mean_iou": 0.41732016671241573, | |
| "eval_overall_accuracy": 0.9541094462076823, | |
| "eval_runtime": 1.9089, | |
| "eval_samples_per_second": 7.858, | |
| "eval_steps_per_second": 0.524, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 60.33, | |
| "learning_rate": 5.233918128654971e-06, | |
| "loss": 0.723, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 60.67, | |
| "learning_rate": 5.2046783625731e-06, | |
| "loss": 0.6845, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 61.0, | |
| "learning_rate": 5.175438596491229e-06, | |
| "loss": 0.8873, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 61.33, | |
| "learning_rate": 5.146198830409357e-06, | |
| "loss": 0.6904, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 61.67, | |
| "learning_rate": 5.116959064327486e-06, | |
| "loss": 0.783, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 62.0, | |
| "learning_rate": 5.087719298245615e-06, | |
| "loss": 0.6318, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 62.33, | |
| "learning_rate": 5.058479532163744e-06, | |
| "loss": 0.6781, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 62.67, | |
| "learning_rate": 5.029239766081871e-06, | |
| "loss": 0.7246, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 63.0, | |
| "learning_rate": 5e-06, | |
| "loss": 0.9476, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 63.33, | |
| "learning_rate": 4.970760233918129e-06, | |
| "loss": 0.6761, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 63.33, | |
| "eval_accuracy_dropoff": 0.3963574333172514, | |
| "eval_accuracy_undropoff": 0.9804997376300275, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.28863068873357844, | |
| "eval_iou_undropoff": 0.9538536290387296, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.7037181854248047, | |
| "eval_mean_accuracy": 0.6884285854736394, | |
| "eval_mean_iou": 0.41416143925743604, | |
| "eval_overall_accuracy": 0.9546351114908854, | |
| "eval_runtime": 1.7466, | |
| "eval_samples_per_second": 8.588, | |
| "eval_steps_per_second": 0.573, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 63.67, | |
| "learning_rate": 4.941520467836258e-06, | |
| "loss": 0.753, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 64.0, | |
| "learning_rate": 4.912280701754386e-06, | |
| "loss": 0.9325, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 64.33, | |
| "learning_rate": 4.883040935672515e-06, | |
| "loss": 0.7045, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 64.67, | |
| "learning_rate": 4.853801169590643e-06, | |
| "loss": 0.6972, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 65.0, | |
| "learning_rate": 4.824561403508772e-06, | |
| "loss": 0.6756, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 65.33, | |
| "learning_rate": 4.7953216374269005e-06, | |
| "loss": 0.6869, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 65.67, | |
| "learning_rate": 4.7660818713450295e-06, | |
| "loss": 0.6688, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 66.0, | |
| "learning_rate": 4.736842105263158e-06, | |
| "loss": 0.6584, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 66.33, | |
| "learning_rate": 4.707602339181287e-06, | |
| "loss": 0.6366, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 66.67, | |
| "learning_rate": 4.678362573099415e-06, | |
| "loss": 0.7245, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 66.67, | |
| "eval_accuracy_dropoff": 0.38236037402072276, | |
| "eval_accuracy_undropoff": 0.9818006775850893, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.28203335832877907, | |
| "eval_iou_undropoff": 0.9544903881152086, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.6959893107414246, | |
| "eval_mean_accuracy": 0.682080525802906, | |
| "eval_mean_iou": 0.41217458214799585, | |
| "eval_overall_accuracy": 0.9552586873372396, | |
| "eval_runtime": 1.7647, | |
| "eval_samples_per_second": 8.5, | |
| "eval_steps_per_second": 0.567, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 67.0, | |
| "learning_rate": 4.649122807017544e-06, | |
| "loss": 0.95, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 67.33, | |
| "learning_rate": 4.619883040935672e-06, | |
| "loss": 0.6638, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 67.67, | |
| "learning_rate": 4.590643274853801e-06, | |
| "loss": 0.665, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 68.0, | |
| "learning_rate": 4.56140350877193e-06, | |
| "loss": 0.6479, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 68.33, | |
| "learning_rate": 4.532163742690059e-06, | |
| "loss": 0.6732, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 68.67, | |
| "learning_rate": 4.502923976608187e-06, | |
| "loss": 0.6869, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 69.0, | |
| "learning_rate": 4.473684210526316e-06, | |
| "loss": 0.9015, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 69.33, | |
| "learning_rate": 4.444444444444444e-06, | |
| "loss": 0.6845, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 69.67, | |
| "learning_rate": 4.415204678362573e-06, | |
| "loss": 0.7129, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 70.0, | |
| "learning_rate": 4.385964912280702e-06, | |
| "loss": 0.6514, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 70.0, | |
| "eval_accuracy_dropoff": 0.3559227605853838, | |
| "eval_accuracy_undropoff": 0.9851614613102746, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.27459156227705966, | |
| "eval_iou_undropoff": 0.956616600218594, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.6755360960960388, | |
| "eval_mean_accuracy": 0.6705421109478292, | |
| "eval_mean_iou": 0.41040272083188456, | |
| "eval_overall_accuracy": 0.9573000590006511, | |
| "eval_runtime": 1.6967, | |
| "eval_samples_per_second": 8.84, | |
| "eval_steps_per_second": 0.589, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 70.33, | |
| "learning_rate": 4.3567251461988305e-06, | |
| "loss": 0.6626, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 70.67, | |
| "learning_rate": 4.3274853801169596e-06, | |
| "loss": 0.703, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 71.0, | |
| "learning_rate": 4.298245614035088e-06, | |
| "loss": 0.9492, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 71.33, | |
| "learning_rate": 4.269005847953217e-06, | |
| "loss": 0.6613, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 71.67, | |
| "learning_rate": 4.239766081871345e-06, | |
| "loss": 0.655, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 72.0, | |
| "learning_rate": 4.210526315789474e-06, | |
| "loss": 0.8745, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 72.33, | |
| "learning_rate": 4.181286549707602e-06, | |
| "loss": 0.6602, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 72.67, | |
| "learning_rate": 4.152046783625731e-06, | |
| "loss": 0.6767, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 73.0, | |
| "learning_rate": 4.12280701754386e-06, | |
| "loss": 0.9219, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 73.33, | |
| "learning_rate": 4.093567251461989e-06, | |
| "loss": 0.6433, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 73.33, | |
| "eval_accuracy_dropoff": 0.4099639304339835, | |
| "eval_accuracy_undropoff": 0.9808879706826835, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.29912706760930513, | |
| "eval_iou_undropoff": 0.9548156149062209, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.6803807616233826, | |
| "eval_mean_accuracy": 0.6954259505583336, | |
| "eval_mean_iou": 0.417980894171842, | |
| "eval_overall_accuracy": 0.955608622233073, | |
| "eval_runtime": 1.7817, | |
| "eval_samples_per_second": 8.419, | |
| "eval_steps_per_second": 0.561, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 73.67, | |
| "learning_rate": 4.064327485380118e-06, | |
| "loss": 0.6502, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 74.0, | |
| "learning_rate": 4.035087719298246e-06, | |
| "loss": 0.6356, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 74.33, | |
| "learning_rate": 4.005847953216375e-06, | |
| "loss": 0.6709, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 74.67, | |
| "learning_rate": 3.976608187134503e-06, | |
| "loss": 0.641, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 75.0, | |
| "learning_rate": 3.947368421052632e-06, | |
| "loss": 0.6381, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 75.33, | |
| "learning_rate": 3.9181286549707605e-06, | |
| "loss": 0.6744, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 75.67, | |
| "learning_rate": 3.88888888888889e-06, | |
| "loss": 0.6525, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 76.0, | |
| "learning_rate": 3.859649122807018e-06, | |
| "loss": 0.6357, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 76.33, | |
| "learning_rate": 3.830409356725147e-06, | |
| "loss": 0.6491, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 76.67, | |
| "learning_rate": 3.801169590643275e-06, | |
| "loss": 0.6686, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 76.67, | |
| "eval_accuracy_dropoff": 0.35306821053598914, | |
| "eval_accuracy_undropoff": 0.9858205793852772, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.2748764728240213, | |
| "eval_iou_undropoff": 0.9571334605114129, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.6608495712280273, | |
| "eval_mean_accuracy": 0.6694443949606332, | |
| "eval_mean_iou": 0.4106699777784781, | |
| "eval_overall_accuracy": 0.9578035990397136, | |
| "eval_runtime": 1.7393, | |
| "eval_samples_per_second": 8.624, | |
| "eval_steps_per_second": 0.575, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 77.0, | |
| "learning_rate": 3.7719298245614037e-06, | |
| "loss": 0.6871, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 77.33, | |
| "learning_rate": 3.7426900584795324e-06, | |
| "loss": 0.6416, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 77.67, | |
| "learning_rate": 3.713450292397661e-06, | |
| "loss": 0.6469, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 78.0, | |
| "learning_rate": 3.6842105263157896e-06, | |
| "loss": 0.8602, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 78.33, | |
| "learning_rate": 3.6549707602339187e-06, | |
| "loss": 0.6188, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 78.67, | |
| "learning_rate": 3.625730994152047e-06, | |
| "loss": 0.7108, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 79.0, | |
| "learning_rate": 3.596491228070176e-06, | |
| "loss": 0.6065, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 79.33, | |
| "learning_rate": 3.567251461988304e-06, | |
| "loss": 0.6272, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 79.67, | |
| "learning_rate": 3.5380116959064333e-06, | |
| "loss": 0.6602, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 80.0, | |
| "learning_rate": 3.5087719298245615e-06, | |
| "loss": 0.9091, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 80.0, | |
| "eval_accuracy_dropoff": 0.40314057941048087, | |
| "eval_accuracy_undropoff": 0.9813366073699885, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.29295880462456697, | |
| "eval_iou_undropoff": 0.9549312682031544, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.670059323310852, | |
| "eval_mean_accuracy": 0.6922385933902346, | |
| "eval_mean_iou": 0.41596335760924047, | |
| "eval_overall_accuracy": 0.9557352701822917, | |
| "eval_runtime": 1.9268, | |
| "eval_samples_per_second": 7.785, | |
| "eval_steps_per_second": 0.519, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 80.33, | |
| "learning_rate": 3.4795321637426905e-06, | |
| "loss": 0.6373, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 80.67, | |
| "learning_rate": 3.4502923976608188e-06, | |
| "loss": 0.6484, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 81.0, | |
| "learning_rate": 3.421052631578948e-06, | |
| "loss": 0.6679, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 81.33, | |
| "learning_rate": 3.391812865497076e-06, | |
| "loss": 0.6349, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 81.67, | |
| "learning_rate": 3.362573099415205e-06, | |
| "loss": 0.6319, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 82.0, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.9136, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 82.33, | |
| "learning_rate": 3.3040935672514624e-06, | |
| "loss": 0.637, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 82.67, | |
| "learning_rate": 3.2748538011695906e-06, | |
| "loss": 0.6478, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 83.0, | |
| "learning_rate": 3.2456140350877197e-06, | |
| "loss": 0.7076, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 83.33, | |
| "learning_rate": 3.216374269005848e-06, | |
| "loss": 0.6346, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 83.33, | |
| "eval_accuracy_dropoff": 0.39871803708043285, | |
| "eval_accuracy_undropoff": 0.9820861978493113, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.29438330894981235, | |
| "eval_iou_undropoff": 0.9554704345649212, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.6724778413772583, | |
| "eval_mean_accuracy": 0.690402117464872, | |
| "eval_mean_iou": 0.4166179145049112, | |
| "eval_overall_accuracy": 0.956255849202474, | |
| "eval_runtime": 2.1195, | |
| "eval_samples_per_second": 7.077, | |
| "eval_steps_per_second": 0.472, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 83.67, | |
| "learning_rate": 3.187134502923977e-06, | |
| "loss": 0.6372, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 84.0, | |
| "learning_rate": 3.157894736842105e-06, | |
| "loss": 0.5675, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 84.33, | |
| "learning_rate": 3.1286549707602342e-06, | |
| "loss": 0.6351, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 84.67, | |
| "learning_rate": 3.0994152046783624e-06, | |
| "loss": 0.6653, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 85.0, | |
| "learning_rate": 3.0701754385964915e-06, | |
| "loss": 0.6255, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 85.33, | |
| "learning_rate": 3.04093567251462e-06, | |
| "loss": 0.697, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 85.67, | |
| "learning_rate": 3.011695906432749e-06, | |
| "loss": 0.606, | |
| "step": 257 | |
| }, | |
| { | |
| "epoch": 86.0, | |
| "learning_rate": 2.9824561403508774e-06, | |
| "loss": 0.5992, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 86.33, | |
| "learning_rate": 2.953216374269006e-06, | |
| "loss": 0.6058, | |
| "step": 259 | |
| }, | |
| { | |
| "epoch": 86.67, | |
| "learning_rate": 2.9239766081871347e-06, | |
| "loss": 0.6303, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 86.67, | |
| "eval_accuracy_dropoff": 0.3481287476738576, | |
| "eval_accuracy_undropoff": 0.9858072746199361, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.27019966744381985, | |
| "eval_iou_undropoff": 0.9569079345006184, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.6459957957267761, | |
| "eval_mean_accuracy": 0.6669680111468969, | |
| "eval_mean_iou": 0.40903586731481273, | |
| "eval_overall_accuracy": 0.9575721740722656, | |
| "eval_runtime": 1.8655, | |
| "eval_samples_per_second": 8.041, | |
| "eval_steps_per_second": 0.536, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 87.0, | |
| "learning_rate": 2.8947368421052634e-06, | |
| "loss": 0.8867, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 87.33, | |
| "learning_rate": 2.865497076023392e-06, | |
| "loss": 0.6459, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 87.67, | |
| "learning_rate": 2.8362573099415206e-06, | |
| "loss": 0.6791, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 88.0, | |
| "learning_rate": 2.8070175438596493e-06, | |
| "loss": 0.5866, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 88.33, | |
| "learning_rate": 2.7777777777777783e-06, | |
| "loss": 0.6311, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 88.67, | |
| "learning_rate": 2.7485380116959066e-06, | |
| "loss": 0.6489, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 89.0, | |
| "learning_rate": 2.7192982456140356e-06, | |
| "loss": 0.5736, | |
| "step": 267 | |
| }, | |
| { | |
| "epoch": 89.33, | |
| "learning_rate": 2.690058479532164e-06, | |
| "loss": 0.6387, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 89.67, | |
| "learning_rate": 2.660818713450293e-06, | |
| "loss": 0.6558, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 90.0, | |
| "learning_rate": 2.631578947368421e-06, | |
| "loss": 0.8923, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 90.0, | |
| "eval_accuracy_dropoff": 0.37603671284490087, | |
| "eval_accuracy_undropoff": 0.9837325295126305, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.2832072394745152, | |
| "eval_iou_undropoff": 0.95609396049178, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.6549919843673706, | |
| "eval_mean_accuracy": 0.6798846211787657, | |
| "eval_mean_iou": 0.4131003999887651, | |
| "eval_overall_accuracy": 0.9568250020345052, | |
| "eval_runtime": 1.8036, | |
| "eval_samples_per_second": 8.317, | |
| "eval_steps_per_second": 0.554, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 90.33, | |
| "learning_rate": 2.60233918128655e-06, | |
| "loss": 0.6328, | |
| "step": 271 | |
| }, | |
| { | |
| "epoch": 90.67, | |
| "learning_rate": 2.5730994152046784e-06, | |
| "loss": 0.6585, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 91.0, | |
| "learning_rate": 2.5438596491228075e-06, | |
| "loss": 0.9006, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 91.33, | |
| "learning_rate": 2.5146198830409357e-06, | |
| "loss": 0.6464, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 91.67, | |
| "learning_rate": 2.4853801169590643e-06, | |
| "loss": 0.6188, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 92.0, | |
| "learning_rate": 2.456140350877193e-06, | |
| "loss": 0.6249, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 92.33, | |
| "learning_rate": 2.4269005847953216e-06, | |
| "loss": 0.6195, | |
| "step": 277 | |
| }, | |
| { | |
| "epoch": 92.67, | |
| "learning_rate": 2.3976608187134502e-06, | |
| "loss": 0.6036, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 93.0, | |
| "learning_rate": 2.368421052631579e-06, | |
| "loss": 0.7175, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 93.33, | |
| "learning_rate": 2.3391812865497075e-06, | |
| "loss": 0.6334, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 93.33, | |
| "eval_accuracy_dropoff": 0.3565660394697544, | |
| "eval_accuracy_undropoff": 0.9850611433796019, | |
| "eval_accuracy_unlabeled": NaN, | |
| "eval_iou_dropoff": 0.27340870154979013, | |
| "eval_iou_undropoff": 0.9565466234668595, | |
| "eval_iou_unlabeled": 0.0, | |
| "eval_loss": 0.6467730402946472, | |
| "eval_mean_accuracy": 0.6708135914246782, | |
| "eval_mean_iou": 0.4099851083388832, | |
| "eval_overall_accuracy": 0.957232666015625, | |
| "eval_runtime": 1.8074, | |
| "eval_samples_per_second": 8.299, | |
| "eval_steps_per_second": 0.553, | |
| "step": 280 | |
| } | |
| ], | |
| "max_steps": 360, | |
| "num_train_epochs": 120, | |
| "total_flos": 5.570769653858304e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |