| { | |
| "best_metric": 0.14974823594093323, | |
| "best_model_checkpoint": "scenarioanalysisv1/checkpoint-155", | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 155, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03225806451612903, | |
| "grad_norm": 1.6419936418533325, | |
| "learning_rate": 6.25e-07, | |
| "loss": 1.0981, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.06451612903225806, | |
| "grad_norm": 1.4918025732040405, | |
| "learning_rate": 1.25e-06, | |
| "loss": 1.1177, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0967741935483871, | |
| "grad_norm": 4.263033390045166, | |
| "learning_rate": 1.8750000000000003e-06, | |
| "loss": 1.0258, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.12903225806451613, | |
| "grad_norm": 3.408543825149536, | |
| "learning_rate": 2.5e-06, | |
| "loss": 1.0784, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.16129032258064516, | |
| "grad_norm": 3.7692720890045166, | |
| "learning_rate": 3.125e-06, | |
| "loss": 1.0965, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.1935483870967742, | |
| "grad_norm": 1.494915246963501, | |
| "learning_rate": 3.7500000000000005e-06, | |
| "loss": 1.0732, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.22580645161290322, | |
| "grad_norm": 2.0530953407287598, | |
| "learning_rate": 4.3750000000000005e-06, | |
| "loss": 1.0941, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.25806451612903225, | |
| "grad_norm": 2.534554958343506, | |
| "learning_rate": 5e-06, | |
| "loss": 1.1186, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.2903225806451613, | |
| "grad_norm": 2.1732537746429443, | |
| "learning_rate": 5.625e-06, | |
| "loss": 1.0666, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.3225806451612903, | |
| "grad_norm": 3.5926756858825684, | |
| "learning_rate": 6.25e-06, | |
| "loss": 1.105, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.3548387096774194, | |
| "grad_norm": 3.346235513687134, | |
| "learning_rate": 6.875e-06, | |
| "loss": 1.1246, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.3870967741935484, | |
| "grad_norm": 3.055208921432495, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 1.1027, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.41935483870967744, | |
| "grad_norm": 5.725679397583008, | |
| "learning_rate": 8.125000000000001e-06, | |
| "loss": 1.0906, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.45161290322580644, | |
| "grad_norm": 4.051356315612793, | |
| "learning_rate": 8.750000000000001e-06, | |
| "loss": 1.1059, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.4838709677419355, | |
| "grad_norm": 1.5993609428405762, | |
| "learning_rate": 9.375000000000001e-06, | |
| "loss": 1.1071, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.5161290322580645, | |
| "grad_norm": 2.622431516647339, | |
| "learning_rate": 1e-05, | |
| "loss": 1.0781, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.5483870967741935, | |
| "grad_norm": 1.9900400638580322, | |
| "learning_rate": 9.928057553956835e-06, | |
| "loss": 1.0589, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.5806451612903226, | |
| "grad_norm": 4.026608943939209, | |
| "learning_rate": 9.85611510791367e-06, | |
| "loss": 1.0339, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.6129032258064516, | |
| "grad_norm": 2.8869481086730957, | |
| "learning_rate": 9.784172661870505e-06, | |
| "loss": 1.1168, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.6451612903225806, | |
| "grad_norm": 2.108684778213501, | |
| "learning_rate": 9.712230215827338e-06, | |
| "loss": 1.0924, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.6774193548387096, | |
| "grad_norm": 2.0955443382263184, | |
| "learning_rate": 9.640287769784174e-06, | |
| "loss": 1.0522, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.7096774193548387, | |
| "grad_norm": 2.345808267593384, | |
| "learning_rate": 9.568345323741008e-06, | |
| "loss": 1.1096, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.7419354838709677, | |
| "grad_norm": 1.8832900524139404, | |
| "learning_rate": 9.496402877697842e-06, | |
| "loss": 1.1013, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.7741935483870968, | |
| "grad_norm": 3.978363037109375, | |
| "learning_rate": 9.424460431654678e-06, | |
| "loss": 1.0922, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.8064516129032258, | |
| "grad_norm": 4.316551685333252, | |
| "learning_rate": 9.35251798561151e-06, | |
| "loss": 1.1014, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.8387096774193549, | |
| "grad_norm": 3.7009897232055664, | |
| "learning_rate": 9.280575539568346e-06, | |
| "loss": 1.0436, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.8709677419354839, | |
| "grad_norm": 2.790764570236206, | |
| "learning_rate": 9.20863309352518e-06, | |
| "loss": 1.0744, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.9032258064516129, | |
| "grad_norm": 3.6903886795043945, | |
| "learning_rate": 9.136690647482015e-06, | |
| "loss": 1.0738, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.9354838709677419, | |
| "grad_norm": 3.147907018661499, | |
| "learning_rate": 9.064748201438849e-06, | |
| "loss": 1.0317, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.967741935483871, | |
| "grad_norm": 1.630570650100708, | |
| "learning_rate": 8.992805755395683e-06, | |
| "loss": 1.0416, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 3.8663077354431152, | |
| "learning_rate": 8.92086330935252e-06, | |
| "loss": 1.0477, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.6612903225806451, | |
| "eval_f1_macro": 0.6276198679299455, | |
| "eval_f1_micro": 0.6612903225806451, | |
| "eval_f1_weighted": 0.6257953377233197, | |
| "eval_loss": 1.045354962348938, | |
| "eval_precision_macro": 0.7566844919786097, | |
| "eval_precision_micro": 0.6612903225806451, | |
| "eval_precision_weighted": 0.7594014145247541, | |
| "eval_recall_macro": 0.6666666666666666, | |
| "eval_recall_micro": 0.6612903225806451, | |
| "eval_recall_weighted": 0.6612903225806451, | |
| "eval_runtime": 0.3659, | |
| "eval_samples_per_second": 169.466, | |
| "eval_steps_per_second": 10.933, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 1.032258064516129, | |
| "grad_norm": 3.992098808288574, | |
| "learning_rate": 8.848920863309353e-06, | |
| "loss": 1.0215, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 1.064516129032258, | |
| "grad_norm": 3.5149612426757812, | |
| "learning_rate": 8.776978417266188e-06, | |
| "loss": 0.9723, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 1.096774193548387, | |
| "grad_norm": 5.044914722442627, | |
| "learning_rate": 8.705035971223022e-06, | |
| "loss": 1.0323, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.129032258064516, | |
| "grad_norm": 4.974006652832031, | |
| "learning_rate": 8.633093525179856e-06, | |
| "loss": 0.9716, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.1612903225806452, | |
| "grad_norm": 2.0443384647369385, | |
| "learning_rate": 8.561151079136692e-06, | |
| "loss": 1.0068, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.1935483870967742, | |
| "grad_norm": 4.729581832885742, | |
| "learning_rate": 8.489208633093526e-06, | |
| "loss": 0.9642, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 1.2258064516129032, | |
| "grad_norm": 7.420672416687012, | |
| "learning_rate": 8.41726618705036e-06, | |
| "loss": 0.9551, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 1.2580645161290323, | |
| "grad_norm": 3.8499510288238525, | |
| "learning_rate": 8.345323741007195e-06, | |
| "loss": 1.0062, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 1.2903225806451613, | |
| "grad_norm": 2.718811273574829, | |
| "learning_rate": 8.273381294964029e-06, | |
| "loss": 1.0076, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.3225806451612903, | |
| "grad_norm": 3.33786940574646, | |
| "learning_rate": 8.201438848920865e-06, | |
| "loss": 0.9966, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 1.3548387096774195, | |
| "grad_norm": 3.4368371963500977, | |
| "learning_rate": 8.129496402877699e-06, | |
| "loss": 0.9804, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 1.3870967741935485, | |
| "grad_norm": 3.017920970916748, | |
| "learning_rate": 8.057553956834533e-06, | |
| "loss": 0.9786, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 1.4193548387096775, | |
| "grad_norm": 6.471282482147217, | |
| "learning_rate": 7.985611510791367e-06, | |
| "loss": 0.8658, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 1.4516129032258065, | |
| "grad_norm": 3.747462511062622, | |
| "learning_rate": 7.913669064748202e-06, | |
| "loss": 0.9323, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.4838709677419355, | |
| "grad_norm": 4.153210639953613, | |
| "learning_rate": 7.841726618705036e-06, | |
| "loss": 0.8928, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 1.5161290322580645, | |
| "grad_norm": 3.5706467628479004, | |
| "learning_rate": 7.769784172661872e-06, | |
| "loss": 1.0055, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 1.5483870967741935, | |
| "grad_norm": 7.582662582397461, | |
| "learning_rate": 7.697841726618706e-06, | |
| "loss": 0.7635, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 1.5806451612903225, | |
| "grad_norm": 5.1474609375, | |
| "learning_rate": 7.62589928057554e-06, | |
| "loss": 0.7824, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 1.6129032258064515, | |
| "grad_norm": 7.146518707275391, | |
| "learning_rate": 7.5539568345323745e-06, | |
| "loss": 0.8768, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.6451612903225805, | |
| "grad_norm": 5.432666301727295, | |
| "learning_rate": 7.48201438848921e-06, | |
| "loss": 0.9762, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 1.6774193548387095, | |
| "grad_norm": 6.38230037689209, | |
| "learning_rate": 7.410071942446043e-06, | |
| "loss": 0.9135, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 1.7096774193548387, | |
| "grad_norm": 4.441874027252197, | |
| "learning_rate": 7.338129496402878e-06, | |
| "loss": 0.8176, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 1.7419354838709677, | |
| "grad_norm": 5.8212175369262695, | |
| "learning_rate": 7.266187050359713e-06, | |
| "loss": 0.841, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 1.7741935483870968, | |
| "grad_norm": 5.359001636505127, | |
| "learning_rate": 7.194244604316547e-06, | |
| "loss": 0.8037, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.8064516129032258, | |
| "grad_norm": 4.61914587020874, | |
| "learning_rate": 7.122302158273382e-06, | |
| "loss": 0.752, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.838709677419355, | |
| "grad_norm": 4.204216003417969, | |
| "learning_rate": 7.050359712230216e-06, | |
| "loss": 0.6685, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 1.870967741935484, | |
| "grad_norm": 3.6568996906280518, | |
| "learning_rate": 6.978417266187051e-06, | |
| "loss": 0.6714, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.903225806451613, | |
| "grad_norm": 4.489734172821045, | |
| "learning_rate": 6.906474820143886e-06, | |
| "loss": 0.7707, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 1.935483870967742, | |
| "grad_norm": 7.898622512817383, | |
| "learning_rate": 6.834532374100719e-06, | |
| "loss": 0.7188, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.967741935483871, | |
| "grad_norm": 4.874721050262451, | |
| "learning_rate": 6.762589928057554e-06, | |
| "loss": 0.5568, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 11.679137229919434, | |
| "learning_rate": 6.6906474820143886e-06, | |
| "loss": 0.9695, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.8709677419354839, | |
| "eval_f1_macro": 0.8696476964769649, | |
| "eval_f1_micro": 0.8709677419354839, | |
| "eval_f1_weighted": 0.8679386309992132, | |
| "eval_loss": 0.636049211025238, | |
| "eval_precision_macro": 0.8977072310405644, | |
| "eval_precision_micro": 0.8709677419354839, | |
| "eval_precision_weighted": 0.8968253968253967, | |
| "eval_recall_macro": 0.873015873015873, | |
| "eval_recall_micro": 0.8709677419354839, | |
| "eval_recall_weighted": 0.8709677419354839, | |
| "eval_runtime": 0.3635, | |
| "eval_samples_per_second": 170.563, | |
| "eval_steps_per_second": 11.004, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 2.032258064516129, | |
| "grad_norm": 5.676018714904785, | |
| "learning_rate": 6.618705035971224e-06, | |
| "loss": 0.6626, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 2.064516129032258, | |
| "grad_norm": 5.493109226226807, | |
| "learning_rate": 6.546762589928059e-06, | |
| "loss": 0.7337, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 2.096774193548387, | |
| "grad_norm": 3.7354605197906494, | |
| "learning_rate": 6.474820143884892e-06, | |
| "loss": 0.619, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 2.129032258064516, | |
| "grad_norm": 4.089387893676758, | |
| "learning_rate": 6.402877697841727e-06, | |
| "loss": 0.5191, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 2.161290322580645, | |
| "grad_norm": 5.231932163238525, | |
| "learning_rate": 6.330935251798561e-06, | |
| "loss": 0.481, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 2.193548387096774, | |
| "grad_norm": 4.4416656494140625, | |
| "learning_rate": 6.2589928057553964e-06, | |
| "loss": 0.4988, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 2.225806451612903, | |
| "grad_norm": 5.860562324523926, | |
| "learning_rate": 6.1870503597122315e-06, | |
| "loss": 0.5378, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 2.258064516129032, | |
| "grad_norm": 4.37066125869751, | |
| "learning_rate": 6.115107913669065e-06, | |
| "loss": 0.5621, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.2903225806451615, | |
| "grad_norm": 5.553720951080322, | |
| "learning_rate": 6.0431654676259e-06, | |
| "loss": 0.5557, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 2.3225806451612905, | |
| "grad_norm": 4.640178203582764, | |
| "learning_rate": 5.971223021582734e-06, | |
| "loss": 0.3728, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 2.3548387096774195, | |
| "grad_norm": 3.7993886470794678, | |
| "learning_rate": 5.899280575539568e-06, | |
| "loss": 0.5291, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 2.3870967741935485, | |
| "grad_norm": 3.220982313156128, | |
| "learning_rate": 5.8273381294964035e-06, | |
| "loss": 0.463, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 2.4193548387096775, | |
| "grad_norm": 3.250502347946167, | |
| "learning_rate": 5.755395683453238e-06, | |
| "loss": 0.4777, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 2.4516129032258065, | |
| "grad_norm": 5.749074935913086, | |
| "learning_rate": 5.683453237410073e-06, | |
| "loss": 0.5531, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 2.4838709677419355, | |
| "grad_norm": 3.6344985961914062, | |
| "learning_rate": 5.611510791366906e-06, | |
| "loss": 0.532, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 2.5161290322580645, | |
| "grad_norm": 3.7246253490448, | |
| "learning_rate": 5.539568345323741e-06, | |
| "loss": 0.4914, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 2.5483870967741935, | |
| "grad_norm": 4.004693031311035, | |
| "learning_rate": 5.467625899280576e-06, | |
| "loss": 0.4084, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 2.5806451612903225, | |
| "grad_norm": 3.7413482666015625, | |
| "learning_rate": 5.3956834532374105e-06, | |
| "loss": 0.4734, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.6129032258064515, | |
| "grad_norm": 4.712541103363037, | |
| "learning_rate": 5.3237410071942456e-06, | |
| "loss": 0.4475, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 2.6451612903225805, | |
| "grad_norm": 3.9814302921295166, | |
| "learning_rate": 5.251798561151079e-06, | |
| "loss": 0.4309, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 2.6774193548387095, | |
| "grad_norm": 3.632049083709717, | |
| "learning_rate": 5.179856115107914e-06, | |
| "loss": 0.4897, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 2.709677419354839, | |
| "grad_norm": 4.6201090812683105, | |
| "learning_rate": 5.107913669064749e-06, | |
| "loss": 0.4236, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 2.741935483870968, | |
| "grad_norm": 5.9254536628723145, | |
| "learning_rate": 5.035971223021583e-06, | |
| "loss": 0.5915, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 2.774193548387097, | |
| "grad_norm": 4.538214206695557, | |
| "learning_rate": 4.9640287769784175e-06, | |
| "loss": 0.3737, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 2.806451612903226, | |
| "grad_norm": 2.919090747833252, | |
| "learning_rate": 4.892086330935253e-06, | |
| "loss": 0.3687, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 2.838709677419355, | |
| "grad_norm": 3.4993410110473633, | |
| "learning_rate": 4.820143884892087e-06, | |
| "loss": 0.3704, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 2.870967741935484, | |
| "grad_norm": 3.9015276432037354, | |
| "learning_rate": 4.748201438848921e-06, | |
| "loss": 0.2553, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 2.903225806451613, | |
| "grad_norm": 5.3451642990112305, | |
| "learning_rate": 4.676258992805755e-06, | |
| "loss": 0.3705, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.935483870967742, | |
| "grad_norm": 5.762584686279297, | |
| "learning_rate": 4.60431654676259e-06, | |
| "loss": 0.4058, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 2.967741935483871, | |
| "grad_norm": 7.210673809051514, | |
| "learning_rate": 4.5323741007194245e-06, | |
| "loss": 0.5305, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 4.536133766174316, | |
| "learning_rate": 4.46043165467626e-06, | |
| "loss": 0.3045, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9516129032258065, | |
| "eval_f1_macro": 0.9519474380790319, | |
| "eval_f1_micro": 0.9516129032258065, | |
| "eval_f1_weighted": 0.9515657877884105, | |
| "eval_loss": 0.31354543566703796, | |
| "eval_precision_macro": 0.9538239538239538, | |
| "eval_precision_micro": 0.9516129032258065, | |
| "eval_precision_weighted": 0.9538472280407764, | |
| "eval_recall_macro": 0.9523809523809524, | |
| "eval_recall_micro": 0.9516129032258065, | |
| "eval_recall_weighted": 0.9516129032258065, | |
| "eval_runtime": 0.3639, | |
| "eval_samples_per_second": 170.373, | |
| "eval_steps_per_second": 10.992, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 3.032258064516129, | |
| "grad_norm": 4.34368896484375, | |
| "learning_rate": 4.388489208633094e-06, | |
| "loss": 0.3556, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 3.064516129032258, | |
| "grad_norm": 3.32374906539917, | |
| "learning_rate": 4.316546762589928e-06, | |
| "loss": 0.3519, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 3.096774193548387, | |
| "grad_norm": 3.226163148880005, | |
| "learning_rate": 4.244604316546763e-06, | |
| "loss": 0.3765, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 3.129032258064516, | |
| "grad_norm": 4.01658296585083, | |
| "learning_rate": 4.172661870503597e-06, | |
| "loss": 0.2085, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 3.161290322580645, | |
| "grad_norm": 2.88631272315979, | |
| "learning_rate": 4.100719424460432e-06, | |
| "loss": 0.2401, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 3.193548387096774, | |
| "grad_norm": 6.475894451141357, | |
| "learning_rate": 4.028776978417267e-06, | |
| "loss": 0.4706, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 3.225806451612903, | |
| "grad_norm": 2.6866559982299805, | |
| "learning_rate": 3.956834532374101e-06, | |
| "loss": 0.2144, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.258064516129032, | |
| "grad_norm": 4.116889476776123, | |
| "learning_rate": 3.884892086330936e-06, | |
| "loss": 0.4079, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 3.2903225806451615, | |
| "grad_norm": 2.9576964378356934, | |
| "learning_rate": 3.81294964028777e-06, | |
| "loss": 0.26, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 3.3225806451612905, | |
| "grad_norm": 2.7903542518615723, | |
| "learning_rate": 3.741007194244605e-06, | |
| "loss": 0.1992, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 3.3548387096774195, | |
| "grad_norm": 4.459733963012695, | |
| "learning_rate": 3.669064748201439e-06, | |
| "loss": 0.2324, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 3.3870967741935485, | |
| "grad_norm": 2.987039566040039, | |
| "learning_rate": 3.5971223021582737e-06, | |
| "loss": 0.2646, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 3.4193548387096775, | |
| "grad_norm": 4.127433776855469, | |
| "learning_rate": 3.525179856115108e-06, | |
| "loss": 0.2636, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 3.4516129032258065, | |
| "grad_norm": 4.202077388763428, | |
| "learning_rate": 3.453237410071943e-06, | |
| "loss": 0.3163, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 3.4838709677419355, | |
| "grad_norm": 6.350815773010254, | |
| "learning_rate": 3.381294964028777e-06, | |
| "loss": 0.3308, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 3.5161290322580645, | |
| "grad_norm": 2.5210700035095215, | |
| "learning_rate": 3.309352517985612e-06, | |
| "loss": 0.1864, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 3.5483870967741935, | |
| "grad_norm": 3.4547629356384277, | |
| "learning_rate": 3.237410071942446e-06, | |
| "loss": 0.2425, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 3.5806451612903225, | |
| "grad_norm": 7.385854721069336, | |
| "learning_rate": 3.1654676258992807e-06, | |
| "loss": 0.375, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 3.6129032258064515, | |
| "grad_norm": 2.776376247406006, | |
| "learning_rate": 3.0935251798561158e-06, | |
| "loss": 0.2256, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 3.6451612903225805, | |
| "grad_norm": 4.016394138336182, | |
| "learning_rate": 3.02158273381295e-06, | |
| "loss": 0.2812, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 3.6774193548387095, | |
| "grad_norm": 1.8014113903045654, | |
| "learning_rate": 2.949640287769784e-06, | |
| "loss": 0.1343, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 3.709677419354839, | |
| "grad_norm": 3.2501912117004395, | |
| "learning_rate": 2.877697841726619e-06, | |
| "loss": 0.1907, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 3.741935483870968, | |
| "grad_norm": 4.083921432495117, | |
| "learning_rate": 2.805755395683453e-06, | |
| "loss": 0.223, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 3.774193548387097, | |
| "grad_norm": 3.769907236099243, | |
| "learning_rate": 2.733812949640288e-06, | |
| "loss": 0.2081, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 3.806451612903226, | |
| "grad_norm": 2.7997310161590576, | |
| "learning_rate": 2.6618705035971228e-06, | |
| "loss": 0.2655, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 3.838709677419355, | |
| "grad_norm": 2.2651586532592773, | |
| "learning_rate": 2.589928057553957e-06, | |
| "loss": 0.165, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 3.870967741935484, | |
| "grad_norm": 4.948031902313232, | |
| "learning_rate": 2.5179856115107916e-06, | |
| "loss": 0.2495, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 3.903225806451613, | |
| "grad_norm": 2.7708613872528076, | |
| "learning_rate": 2.4460431654676263e-06, | |
| "loss": 0.1332, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 3.935483870967742, | |
| "grad_norm": 6.138238430023193, | |
| "learning_rate": 2.3741007194244605e-06, | |
| "loss": 0.3341, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 3.967741935483871, | |
| "grad_norm": 4.07533073425293, | |
| "learning_rate": 2.302158273381295e-06, | |
| "loss": 0.22, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 1.9955432415008545, | |
| "learning_rate": 2.23021582733813e-06, | |
| "loss": 0.1138, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.967741935483871, | |
| "eval_f1_macro": 0.9678668215253582, | |
| "eval_f1_micro": 0.967741935483871, | |
| "eval_f1_weighted": 0.967741935483871, | |
| "eval_loss": 0.18146049976348877, | |
| "eval_precision_macro": 0.9682539682539683, | |
| "eval_precision_micro": 0.967741935483871, | |
| "eval_precision_weighted": 0.9685099846390169, | |
| "eval_recall_macro": 0.9682539682539683, | |
| "eval_recall_micro": 0.967741935483871, | |
| "eval_recall_weighted": 0.967741935483871, | |
| "eval_runtime": 0.3668, | |
| "eval_samples_per_second": 169.051, | |
| "eval_steps_per_second": 10.907, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 4.032258064516129, | |
| "grad_norm": 2.669346332550049, | |
| "learning_rate": 2.158273381294964e-06, | |
| "loss": 0.1608, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 4.064516129032258, | |
| "grad_norm": 4.302420139312744, | |
| "learning_rate": 2.0863309352517987e-06, | |
| "loss": 0.2336, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 4.096774193548387, | |
| "grad_norm": 6.073089599609375, | |
| "learning_rate": 2.0143884892086333e-06, | |
| "loss": 0.2958, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 4.129032258064516, | |
| "grad_norm": 3.470445156097412, | |
| "learning_rate": 1.942446043165468e-06, | |
| "loss": 0.1744, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 4.161290322580645, | |
| "grad_norm": Infinity, | |
| "learning_rate": 1.942446043165468e-06, | |
| "loss": 0.2689, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 4.193548387096774, | |
| "grad_norm": 3.8664259910583496, | |
| "learning_rate": 1.8705035971223024e-06, | |
| "loss": 0.2828, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.225806451612903, | |
| "grad_norm": 2.115190029144287, | |
| "learning_rate": 1.7985611510791368e-06, | |
| "loss": 0.1365, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 4.258064516129032, | |
| "grad_norm": 5.977684020996094, | |
| "learning_rate": 1.7266187050359715e-06, | |
| "loss": 0.2181, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 4.290322580645161, | |
| "grad_norm": 2.0196449756622314, | |
| "learning_rate": 1.654676258992806e-06, | |
| "loss": 0.1417, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 4.32258064516129, | |
| "grad_norm": 2.432018280029297, | |
| "learning_rate": 1.5827338129496403e-06, | |
| "loss": 0.1573, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 4.354838709677419, | |
| "grad_norm": 1.9823634624481201, | |
| "learning_rate": 1.510791366906475e-06, | |
| "loss": 0.1538, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 4.387096774193548, | |
| "grad_norm": 3.8992857933044434, | |
| "learning_rate": 1.4388489208633094e-06, | |
| "loss": 0.1599, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 4.419354838709677, | |
| "grad_norm": 5.235621452331543, | |
| "learning_rate": 1.366906474820144e-06, | |
| "loss": 0.2014, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 4.451612903225806, | |
| "grad_norm": 2.0223617553710938, | |
| "learning_rate": 1.2949640287769785e-06, | |
| "loss": 0.1222, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 4.483870967741936, | |
| "grad_norm": 2.558138847351074, | |
| "learning_rate": 1.2230215827338131e-06, | |
| "loss": 0.1284, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 4.516129032258064, | |
| "grad_norm": 1.374127745628357, | |
| "learning_rate": 1.1510791366906476e-06, | |
| "loss": 0.0995, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 4.548387096774194, | |
| "grad_norm": 1.9377779960632324, | |
| "learning_rate": 1.079136690647482e-06, | |
| "loss": 0.1242, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 4.580645161290323, | |
| "grad_norm": 4.1599297523498535, | |
| "learning_rate": 1.0071942446043167e-06, | |
| "loss": 0.1644, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 4.612903225806452, | |
| "grad_norm": 5.7351861000061035, | |
| "learning_rate": 9.352517985611512e-07, | |
| "loss": 0.2411, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 4.645161290322581, | |
| "grad_norm": 1.9384703636169434, | |
| "learning_rate": 8.633093525179857e-07, | |
| "loss": 0.1034, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 4.67741935483871, | |
| "grad_norm": 2.739354133605957, | |
| "learning_rate": 7.913669064748202e-07, | |
| "loss": 0.2015, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 4.709677419354839, | |
| "grad_norm": 4.692154407501221, | |
| "learning_rate": 7.194244604316547e-07, | |
| "loss": 0.1552, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 4.741935483870968, | |
| "grad_norm": 8.152336120605469, | |
| "learning_rate": 6.474820143884893e-07, | |
| "loss": 0.4293, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 4.774193548387097, | |
| "grad_norm": 1.1844388246536255, | |
| "learning_rate": 5.755395683453238e-07, | |
| "loss": 0.0752, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 4.806451612903226, | |
| "grad_norm": 4.59033727645874, | |
| "learning_rate": 5.035971223021583e-07, | |
| "loss": 0.1651, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 4.838709677419355, | |
| "grad_norm": 1.9293760061264038, | |
| "learning_rate": 4.3165467625899287e-07, | |
| "loss": 0.1155, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 4.870967741935484, | |
| "grad_norm": 5.44809627532959, | |
| "learning_rate": 3.5971223021582736e-07, | |
| "loss": 0.176, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 4.903225806451613, | |
| "grad_norm": 3.984349012374878, | |
| "learning_rate": 2.877697841726619e-07, | |
| "loss": 0.1681, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 4.935483870967742, | |
| "grad_norm": 4.985234260559082, | |
| "learning_rate": 2.1582733812949643e-07, | |
| "loss": 0.1754, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 4.967741935483871, | |
| "grad_norm": 2.230881929397583, | |
| "learning_rate": 1.4388489208633095e-07, | |
| "loss": 0.1141, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 3.2103071212768555, | |
| "learning_rate": 7.194244604316547e-08, | |
| "loss": 0.1563, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9838709677419355, | |
| "eval_f1_macro": 0.983739837398374, | |
| "eval_f1_micro": 0.9838709677419355, | |
| "eval_f1_weighted": 0.9838709677419355, | |
| "eval_loss": 0.14974823594093323, | |
| "eval_precision_macro": 0.9841269841269842, | |
| "eval_precision_micro": 0.9838709677419355, | |
| "eval_precision_weighted": 0.9846390168970814, | |
| "eval_recall_macro": 0.9841269841269842, | |
| "eval_recall_micro": 0.9838709677419355, | |
| "eval_recall_weighted": 0.9838709677419355, | |
| "eval_runtime": 0.3655, | |
| "eval_samples_per_second": 169.61, | |
| "eval_steps_per_second": 10.943, | |
| "step": 155 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 155, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "total_flos": 161613108449280.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |