| { |
| "best_metric": 0.1307469755411148, |
| "best_model_checkpoint": "saves/psy-course-info-chain/Llama-3.1-8B-Instruct/train/fold5/checkpoint-80", |
| "epoch": 4.938271604938271, |
| "eval_steps": 10, |
| "global_step": 125, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.03950617283950617, |
| "grad_norm": 1.80983304977417, |
| "learning_rate": 7.692307692307694e-06, |
| "loss": 0.552, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.07901234567901234, |
| "grad_norm": 1.6933711767196655, |
| "learning_rate": 1.5384615384615387e-05, |
| "loss": 0.5028, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.11851851851851852, |
| "grad_norm": 1.7453396320343018, |
| "learning_rate": 2.307692307692308e-05, |
| "loss": 0.5433, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.1580246913580247, |
| "grad_norm": 1.8712102174758911, |
| "learning_rate": 3.0769230769230774e-05, |
| "loss": 0.5509, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.19753086419753085, |
| "grad_norm": 1.6804018020629883, |
| "learning_rate": 3.846153846153846e-05, |
| "loss": 0.5003, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.23703703703703705, |
| "grad_norm": 1.663559079170227, |
| "learning_rate": 4.615384615384616e-05, |
| "loss": 0.4642, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.2765432098765432, |
| "grad_norm": 1.7948654890060425, |
| "learning_rate": 5.384615384615385e-05, |
| "loss": 0.4165, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.3160493827160494, |
| "grad_norm": 2.221102237701416, |
| "learning_rate": 6.153846153846155e-05, |
| "loss": 0.3761, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.35555555555555557, |
| "grad_norm": 1.2934051752090454, |
| "learning_rate": 6.923076923076924e-05, |
| "loss": 0.2801, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.3950617283950617, |
| "grad_norm": 0.7657822966575623, |
| "learning_rate": 7.692307692307693e-05, |
| "loss": 0.1837, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.3950617283950617, |
| "eval_loss": 0.2033713459968567, |
| "eval_runtime": 23.9679, |
| "eval_samples_per_second": 1.878, |
| "eval_steps_per_second": 1.878, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.4345679012345679, |
| "grad_norm": 0.6020725965499878, |
| "learning_rate": 8.461538461538461e-05, |
| "loss": 0.1474, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.4740740740740741, |
| "grad_norm": 0.656200647354126, |
| "learning_rate": 9.230769230769232e-05, |
| "loss": 0.1354, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.5135802469135803, |
| "grad_norm": 0.7624897360801697, |
| "learning_rate": 0.0001, |
| "loss": 0.1797, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.5530864197530864, |
| "grad_norm": 0.7797865867614746, |
| "learning_rate": 9.998033131915266e-05, |
| "loss": 0.2137, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.5925925925925926, |
| "grad_norm": 0.7429288029670715, |
| "learning_rate": 9.992134075089084e-05, |
| "loss": 0.2024, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.6320987654320988, |
| "grad_norm": 0.48697131872177124, |
| "learning_rate": 9.982307470588098e-05, |
| "loss": 0.1274, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.671604938271605, |
| "grad_norm": 0.4295575022697449, |
| "learning_rate": 9.968561049466214e-05, |
| "loss": 0.1411, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.7111111111111111, |
| "grad_norm": 0.501865565776825, |
| "learning_rate": 9.950905626682228e-05, |
| "loss": 0.1287, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.7506172839506173, |
| "grad_norm": 0.3981885612010956, |
| "learning_rate": 9.92935509259118e-05, |
| "loss": 0.1184, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.7901234567901234, |
| "grad_norm": 0.4694223701953888, |
| "learning_rate": 9.903926402016153e-05, |
| "loss": 0.1173, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.7901234567901234, |
| "eval_loss": 0.16731415688991547, |
| "eval_runtime": 23.795, |
| "eval_samples_per_second": 1.891, |
| "eval_steps_per_second": 1.891, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.8296296296296296, |
| "grad_norm": 0.4646396040916443, |
| "learning_rate": 9.874639560909117e-05, |
| "loss": 0.089, |
| "step": 21 |
| }, |
| { |
| "epoch": 0.8691358024691358, |
| "grad_norm": 0.3763000965118408, |
| "learning_rate": 9.841517610611309e-05, |
| "loss": 0.0742, |
| "step": 22 |
| }, |
| { |
| "epoch": 0.908641975308642, |
| "grad_norm": 0.5053507685661316, |
| "learning_rate": 9.804586609725499e-05, |
| "loss": 0.1193, |
| "step": 23 |
| }, |
| { |
| "epoch": 0.9481481481481482, |
| "grad_norm": 0.44667503237724304, |
| "learning_rate": 9.763875613614482e-05, |
| "loss": 0.1416, |
| "step": 24 |
| }, |
| { |
| "epoch": 0.9876543209876543, |
| "grad_norm": 0.40994566679000854, |
| "learning_rate": 9.719416651541839e-05, |
| "loss": 0.0992, |
| "step": 25 |
| }, |
| { |
| "epoch": 1.0271604938271606, |
| "grad_norm": 0.6846351623535156, |
| "learning_rate": 9.671244701472999e-05, |
| "loss": 0.1712, |
| "step": 26 |
| }, |
| { |
| "epoch": 1.0666666666666667, |
| "grad_norm": 0.4171813130378723, |
| "learning_rate": 9.619397662556435e-05, |
| "loss": 0.1169, |
| "step": 27 |
| }, |
| { |
| "epoch": 1.106172839506173, |
| "grad_norm": 0.43199366331100464, |
| "learning_rate": 9.563916325306594e-05, |
| "loss": 0.0848, |
| "step": 28 |
| }, |
| { |
| "epoch": 1.145679012345679, |
| "grad_norm": 0.3436686098575592, |
| "learning_rate": 9.504844339512095e-05, |
| "loss": 0.0762, |
| "step": 29 |
| }, |
| { |
| "epoch": 1.1851851851851851, |
| "grad_norm": 0.39385366439819336, |
| "learning_rate": 9.442228179894362e-05, |
| "loss": 0.0777, |
| "step": 30 |
| }, |
| { |
| "epoch": 1.1851851851851851, |
| "eval_loss": 0.14833307266235352, |
| "eval_runtime": 23.7935, |
| "eval_samples_per_second": 1.891, |
| "eval_steps_per_second": 1.891, |
| "step": 30 |
| }, |
| { |
| "epoch": 1.2246913580246914, |
| "grad_norm": 0.3341183066368103, |
| "learning_rate": 9.376117109543769e-05, |
| "loss": 0.0644, |
| "step": 31 |
| }, |
| { |
| "epoch": 1.2641975308641975, |
| "grad_norm": 0.3897767961025238, |
| "learning_rate": 9.306563141162046e-05, |
| "loss": 0.0733, |
| "step": 32 |
| }, |
| { |
| "epoch": 1.3037037037037038, |
| "grad_norm": 0.38037943840026855, |
| "learning_rate": 9.233620996141421e-05, |
| "loss": 0.0849, |
| "step": 33 |
| }, |
| { |
| "epoch": 1.34320987654321, |
| "grad_norm": 0.5712338089942932, |
| "learning_rate": 9.157348061512727e-05, |
| "loss": 0.1381, |
| "step": 34 |
| }, |
| { |
| "epoch": 1.382716049382716, |
| "grad_norm": 0.5225220322608948, |
| "learning_rate": 9.077804344796302e-05, |
| "loss": 0.1083, |
| "step": 35 |
| }, |
| { |
| "epoch": 1.4222222222222223, |
| "grad_norm": 0.36653491854667664, |
| "learning_rate": 8.995052426791247e-05, |
| "loss": 0.1029, |
| "step": 36 |
| }, |
| { |
| "epoch": 1.4617283950617284, |
| "grad_norm": 0.4130406379699707, |
| "learning_rate": 8.90915741234015e-05, |
| "loss": 0.0706, |
| "step": 37 |
| }, |
| { |
| "epoch": 1.5012345679012347, |
| "grad_norm": 0.47387003898620605, |
| "learning_rate": 8.820186879108038e-05, |
| "loss": 0.0885, |
| "step": 38 |
| }, |
| { |
| "epoch": 1.5407407407407407, |
| "grad_norm": 0.49038437008857727, |
| "learning_rate": 8.728210824415827e-05, |
| "loss": 0.0868, |
| "step": 39 |
| }, |
| { |
| "epoch": 1.5802469135802468, |
| "grad_norm": 0.6217578649520874, |
| "learning_rate": 8.633301610170135e-05, |
| "loss": 0.1135, |
| "step": 40 |
| }, |
| { |
| "epoch": 1.5802469135802468, |
| "eval_loss": 0.14447812736034393, |
| "eval_runtime": 23.7733, |
| "eval_samples_per_second": 1.893, |
| "eval_steps_per_second": 1.893, |
| "step": 40 |
| }, |
| { |
| "epoch": 1.6197530864197531, |
| "grad_norm": 0.38582590222358704, |
| "learning_rate": 8.535533905932738e-05, |
| "loss": 0.0531, |
| "step": 41 |
| }, |
| { |
| "epoch": 1.6592592592592592, |
| "grad_norm": 0.4498811960220337, |
| "learning_rate": 8.434984630174509e-05, |
| "loss": 0.0876, |
| "step": 42 |
| }, |
| { |
| "epoch": 1.6987654320987655, |
| "grad_norm": 0.29603007435798645, |
| "learning_rate": 8.33173288976002e-05, |
| "loss": 0.0774, |
| "step": 43 |
| }, |
| { |
| "epoch": 1.7382716049382716, |
| "grad_norm": 0.4084310531616211, |
| "learning_rate": 8.225859917710439e-05, |
| "loss": 0.1151, |
| "step": 44 |
| }, |
| { |
| "epoch": 1.7777777777777777, |
| "grad_norm": 0.4662802517414093, |
| "learning_rate": 8.117449009293668e-05, |
| "loss": 0.1025, |
| "step": 45 |
| }, |
| { |
| "epoch": 1.817283950617284, |
| "grad_norm": 0.3423199951648712, |
| "learning_rate": 8.006585456492029e-05, |
| "loss": 0.0866, |
| "step": 46 |
| }, |
| { |
| "epoch": 1.8567901234567903, |
| "grad_norm": 0.3950996696949005, |
| "learning_rate": 7.89335648089903e-05, |
| "loss": 0.0746, |
| "step": 47 |
| }, |
| { |
| "epoch": 1.8962962962962964, |
| "grad_norm": 0.43036192655563354, |
| "learning_rate": 7.777851165098012e-05, |
| "loss": 0.1036, |
| "step": 48 |
| }, |
| { |
| "epoch": 1.9358024691358025, |
| "grad_norm": 0.30657947063446045, |
| "learning_rate": 7.660160382576683e-05, |
| "loss": 0.1087, |
| "step": 49 |
| }, |
| { |
| "epoch": 1.9753086419753085, |
| "grad_norm": 0.4479210674762726, |
| "learning_rate": 7.540376726232648e-05, |
| "loss": 0.0978, |
| "step": 50 |
| }, |
| { |
| "epoch": 1.9753086419753085, |
| "eval_loss": 0.13088345527648926, |
| "eval_runtime": 23.7838, |
| "eval_samples_per_second": 1.892, |
| "eval_steps_per_second": 1.892, |
| "step": 50 |
| }, |
| { |
| "epoch": 2.0148148148148146, |
| "grad_norm": 0.9994279146194458, |
| "learning_rate": 7.4185944355262e-05, |
| "loss": 0.1531, |
| "step": 51 |
| }, |
| { |
| "epoch": 2.054320987654321, |
| "grad_norm": 0.3115217387676239, |
| "learning_rate": 7.294909322337689e-05, |
| "loss": 0.0644, |
| "step": 52 |
| }, |
| { |
| "epoch": 2.093827160493827, |
| "grad_norm": 0.27081796526908875, |
| "learning_rate": 7.169418695587791e-05, |
| "loss": 0.0416, |
| "step": 53 |
| }, |
| { |
| "epoch": 2.1333333333333333, |
| "grad_norm": 0.38470354676246643, |
| "learning_rate": 7.042221284679982e-05, |
| "loss": 0.0784, |
| "step": 54 |
| }, |
| { |
| "epoch": 2.1728395061728394, |
| "grad_norm": 0.3997117877006531, |
| "learning_rate": 6.91341716182545e-05, |
| "loss": 0.06, |
| "step": 55 |
| }, |
| { |
| "epoch": 2.212345679012346, |
| "grad_norm": 0.4341140687465668, |
| "learning_rate": 6.783107663311565e-05, |
| "loss": 0.0762, |
| "step": 56 |
| }, |
| { |
| "epoch": 2.251851851851852, |
| "grad_norm": 0.4046367108821869, |
| "learning_rate": 6.651395309775837e-05, |
| "loss": 0.0744, |
| "step": 57 |
| }, |
| { |
| "epoch": 2.291358024691358, |
| "grad_norm": 0.38747474551200867, |
| "learning_rate": 6.518383725548074e-05, |
| "loss": 0.0747, |
| "step": 58 |
| }, |
| { |
| "epoch": 2.330864197530864, |
| "grad_norm": 0.40078696608543396, |
| "learning_rate": 6.384177557124247e-05, |
| "loss": 0.102, |
| "step": 59 |
| }, |
| { |
| "epoch": 2.3703703703703702, |
| "grad_norm": 0.4537595510482788, |
| "learning_rate": 6.248882390836135e-05, |
| "loss": 0.1132, |
| "step": 60 |
| }, |
| { |
| "epoch": 2.3703703703703702, |
| "eval_loss": 0.13448503613471985, |
| "eval_runtime": 23.7988, |
| "eval_samples_per_second": 1.891, |
| "eval_steps_per_second": 1.891, |
| "step": 60 |
| }, |
| { |
| "epoch": 2.4098765432098768, |
| "grad_norm": 0.24842111766338348, |
| "learning_rate": 6.112604669781572e-05, |
| "loss": 0.0495, |
| "step": 61 |
| }, |
| { |
| "epoch": 2.449382716049383, |
| "grad_norm": 0.4049798548221588, |
| "learning_rate": 5.9754516100806423e-05, |
| "loss": 0.0555, |
| "step": 62 |
| }, |
| { |
| "epoch": 2.488888888888889, |
| "grad_norm": 0.43116194009780884, |
| "learning_rate": 5.837531116523682e-05, |
| "loss": 0.0655, |
| "step": 63 |
| }, |
| { |
| "epoch": 2.528395061728395, |
| "grad_norm": 0.42819809913635254, |
| "learning_rate": 5.698951697677498e-05, |
| "loss": 0.06, |
| "step": 64 |
| }, |
| { |
| "epoch": 2.567901234567901, |
| "grad_norm": 0.3851662874221802, |
| "learning_rate": 5.559822380516539e-05, |
| "loss": 0.0451, |
| "step": 65 |
| }, |
| { |
| "epoch": 2.6074074074074076, |
| "grad_norm": 0.3556596338748932, |
| "learning_rate": 5.420252624646238e-05, |
| "loss": 0.0602, |
| "step": 66 |
| }, |
| { |
| "epoch": 2.6469135802469137, |
| "grad_norm": 0.49053123593330383, |
| "learning_rate": 5.2803522361859594e-05, |
| "loss": 0.0606, |
| "step": 67 |
| }, |
| { |
| "epoch": 2.68641975308642, |
| "grad_norm": 0.4977152645587921, |
| "learning_rate": 5.140231281379345e-05, |
| "loss": 0.055, |
| "step": 68 |
| }, |
| { |
| "epoch": 2.725925925925926, |
| "grad_norm": 0.3776383697986603, |
| "learning_rate": 5e-05, |
| "loss": 0.0633, |
| "step": 69 |
| }, |
| { |
| "epoch": 2.765432098765432, |
| "grad_norm": 0.39874809980392456, |
| "learning_rate": 4.859768718620656e-05, |
| "loss": 0.0546, |
| "step": 70 |
| }, |
| { |
| "epoch": 2.765432098765432, |
| "eval_loss": 0.13161805272102356, |
| "eval_runtime": 23.802, |
| "eval_samples_per_second": 1.891, |
| "eval_steps_per_second": 1.891, |
| "step": 70 |
| }, |
| { |
| "epoch": 2.8049382716049385, |
| "grad_norm": 0.3818924129009247, |
| "learning_rate": 4.7196477638140404e-05, |
| "loss": 0.0379, |
| "step": 71 |
| }, |
| { |
| "epoch": 2.8444444444444446, |
| "grad_norm": 0.6765838861465454, |
| "learning_rate": 4.579747375353763e-05, |
| "loss": 0.0942, |
| "step": 72 |
| }, |
| { |
| "epoch": 2.8839506172839506, |
| "grad_norm": 0.28867602348327637, |
| "learning_rate": 4.4401776194834613e-05, |
| "loss": 0.0347, |
| "step": 73 |
| }, |
| { |
| "epoch": 2.9234567901234567, |
| "grad_norm": 0.5019446015357971, |
| "learning_rate": 4.3010483023225045e-05, |
| "loss": 0.0724, |
| "step": 74 |
| }, |
| { |
| "epoch": 2.962962962962963, |
| "grad_norm": 0.5975403785705566, |
| "learning_rate": 4.162468883476319e-05, |
| "loss": 0.0772, |
| "step": 75 |
| }, |
| { |
| "epoch": 3.0024691358024693, |
| "grad_norm": 1.112894058227539, |
| "learning_rate": 4.0245483899193595e-05, |
| "loss": 0.1196, |
| "step": 76 |
| }, |
| { |
| "epoch": 3.0419753086419754, |
| "grad_norm": 0.4566960036754608, |
| "learning_rate": 3.887395330218429e-05, |
| "loss": 0.0626, |
| "step": 77 |
| }, |
| { |
| "epoch": 3.0814814814814815, |
| "grad_norm": 0.4427141845226288, |
| "learning_rate": 3.7511176091638653e-05, |
| "loss": 0.0628, |
| "step": 78 |
| }, |
| { |
| "epoch": 3.1209876543209876, |
| "grad_norm": 0.3613295257091522, |
| "learning_rate": 3.6158224428757535e-05, |
| "loss": 0.0573, |
| "step": 79 |
| }, |
| { |
| "epoch": 3.1604938271604937, |
| "grad_norm": 0.3130965530872345, |
| "learning_rate": 3.4816162744519263e-05, |
| "loss": 0.0399, |
| "step": 80 |
| }, |
| { |
| "epoch": 3.1604938271604937, |
| "eval_loss": 0.1307469755411148, |
| "eval_runtime": 23.8161, |
| "eval_samples_per_second": 1.889, |
| "eval_steps_per_second": 1.889, |
| "step": 80 |
| }, |
| { |
| "epoch": 3.2, |
| "grad_norm": 0.4269215166568756, |
| "learning_rate": 3.3486046902241664e-05, |
| "loss": 0.0565, |
| "step": 81 |
| }, |
| { |
| "epoch": 3.2395061728395063, |
| "grad_norm": 0.4015073776245117, |
| "learning_rate": 3.216892336688435e-05, |
| "loss": 0.0637, |
| "step": 82 |
| }, |
| { |
| "epoch": 3.2790123456790123, |
| "grad_norm": 0.5721244812011719, |
| "learning_rate": 3.086582838174551e-05, |
| "loss": 0.0474, |
| "step": 83 |
| }, |
| { |
| "epoch": 3.3185185185185184, |
| "grad_norm": 0.36225229501724243, |
| "learning_rate": 2.9577787153200197e-05, |
| "loss": 0.0558, |
| "step": 84 |
| }, |
| { |
| "epoch": 3.3580246913580245, |
| "grad_norm": 0.43945959210395813, |
| "learning_rate": 2.8305813044122097e-05, |
| "loss": 0.0664, |
| "step": 85 |
| }, |
| { |
| "epoch": 3.397530864197531, |
| "grad_norm": 0.36850857734680176, |
| "learning_rate": 2.705090677662311e-05, |
| "loss": 0.06, |
| "step": 86 |
| }, |
| { |
| "epoch": 3.437037037037037, |
| "grad_norm": 0.3282819092273712, |
| "learning_rate": 2.581405564473801e-05, |
| "loss": 0.0592, |
| "step": 87 |
| }, |
| { |
| "epoch": 3.476543209876543, |
| "grad_norm": 0.35570910573005676, |
| "learning_rate": 2.459623273767354e-05, |
| "loss": 0.0332, |
| "step": 88 |
| }, |
| { |
| "epoch": 3.5160493827160493, |
| "grad_norm": 0.39235472679138184, |
| "learning_rate": 2.3398396174233178e-05, |
| "loss": 0.0676, |
| "step": 89 |
| }, |
| { |
| "epoch": 3.5555555555555554, |
| "grad_norm": 0.3432936370372772, |
| "learning_rate": 2.2221488349019903e-05, |
| "loss": 0.0471, |
| "step": 90 |
| }, |
| { |
| "epoch": 3.5555555555555554, |
| "eval_loss": 0.1328047811985016, |
| "eval_runtime": 23.825, |
| "eval_samples_per_second": 1.889, |
| "eval_steps_per_second": 1.889, |
| "step": 90 |
| }, |
| { |
| "epoch": 3.595061728395062, |
| "grad_norm": 0.3519253134727478, |
| "learning_rate": 2.1066435191009715e-05, |
| "loss": 0.0446, |
| "step": 91 |
| }, |
| { |
| "epoch": 3.634567901234568, |
| "grad_norm": 0.31493380665779114, |
| "learning_rate": 1.9934145435079702e-05, |
| "loss": 0.0403, |
| "step": 92 |
| }, |
| { |
| "epoch": 3.674074074074074, |
| "grad_norm": 0.3447498083114624, |
| "learning_rate": 1.8825509907063327e-05, |
| "loss": 0.0363, |
| "step": 93 |
| }, |
| { |
| "epoch": 3.71358024691358, |
| "grad_norm": 0.34639570116996765, |
| "learning_rate": 1.774140082289563e-05, |
| "loss": 0.0327, |
| "step": 94 |
| }, |
| { |
| "epoch": 3.753086419753086, |
| "grad_norm": 0.42725279927253723, |
| "learning_rate": 1.6682671102399805e-05, |
| "loss": 0.0461, |
| "step": 95 |
| }, |
| { |
| "epoch": 3.7925925925925927, |
| "grad_norm": 0.3287409842014313, |
| "learning_rate": 1.5650153698254916e-05, |
| "loss": 0.0382, |
| "step": 96 |
| }, |
| { |
| "epoch": 3.832098765432099, |
| "grad_norm": 0.40669727325439453, |
| "learning_rate": 1.4644660940672627e-05, |
| "loss": 0.0429, |
| "step": 97 |
| }, |
| { |
| "epoch": 3.871604938271605, |
| "grad_norm": 0.35624250769615173, |
| "learning_rate": 1.3666983898298657e-05, |
| "loss": 0.0601, |
| "step": 98 |
| }, |
| { |
| "epoch": 3.911111111111111, |
| "grad_norm": 0.3901943862438202, |
| "learning_rate": 1.2717891755841722e-05, |
| "loss": 0.0443, |
| "step": 99 |
| }, |
| { |
| "epoch": 3.950617283950617, |
| "grad_norm": 0.3899290859699249, |
| "learning_rate": 1.1798131208919627e-05, |
| "loss": 0.0338, |
| "step": 100 |
| }, |
| { |
| "epoch": 3.950617283950617, |
| "eval_loss": 0.13736537098884583, |
| "eval_runtime": 23.8304, |
| "eval_samples_per_second": 1.888, |
| "eval_steps_per_second": 1.888, |
| "step": 100 |
| }, |
| { |
| "epoch": 3.9901234567901236, |
| "grad_norm": 0.8404723405838013, |
| "learning_rate": 1.090842587659851e-05, |
| "loss": 0.0661, |
| "step": 101 |
| }, |
| { |
| "epoch": 4.029629629629629, |
| "grad_norm": 1.0385932922363281, |
| "learning_rate": 1.004947573208756e-05, |
| "loss": 0.105, |
| "step": 102 |
| }, |
| { |
| "epoch": 4.069135802469136, |
| "grad_norm": 0.34659644961357117, |
| "learning_rate": 9.221956552036992e-06, |
| "loss": 0.0396, |
| "step": 103 |
| }, |
| { |
| "epoch": 4.108641975308642, |
| "grad_norm": 0.3384858965873718, |
| "learning_rate": 8.426519384872733e-06, |
| "loss": 0.0307, |
| "step": 104 |
| }, |
| { |
| "epoch": 4.148148148148148, |
| "grad_norm": 0.47433656454086304, |
| "learning_rate": 7.663790038585793e-06, |
| "loss": 0.0547, |
| "step": 105 |
| }, |
| { |
| "epoch": 4.187654320987654, |
| "grad_norm": 0.3601801097393036, |
| "learning_rate": 6.934368588379553e-06, |
| "loss": 0.0404, |
| "step": 106 |
| }, |
| { |
| "epoch": 4.22716049382716, |
| "grad_norm": 0.36949536204338074, |
| "learning_rate": 6.238828904562316e-06, |
| "loss": 0.042, |
| "step": 107 |
| }, |
| { |
| "epoch": 4.266666666666667, |
| "grad_norm": 0.3295345902442932, |
| "learning_rate": 5.577718201056392e-06, |
| "loss": 0.042, |
| "step": 108 |
| }, |
| { |
| "epoch": 4.306172839506173, |
| "grad_norm": 0.36201441287994385, |
| "learning_rate": 4.951556604879048e-06, |
| "loss": 0.0297, |
| "step": 109 |
| }, |
| { |
| "epoch": 4.345679012345679, |
| "grad_norm": 0.29246869683265686, |
| "learning_rate": 4.360836746934055e-06, |
| "loss": 0.0373, |
| "step": 110 |
| }, |
| { |
| "epoch": 4.345679012345679, |
| "eval_loss": 0.13885988295078278, |
| "eval_runtime": 23.8538, |
| "eval_samples_per_second": 1.886, |
| "eval_steps_per_second": 1.886, |
| "step": 110 |
| }, |
| { |
| "epoch": 4.385185185185185, |
| "grad_norm": 0.7540881633758545, |
| "learning_rate": 3.8060233744356633e-06, |
| "loss": 0.0317, |
| "step": 111 |
| }, |
| { |
| "epoch": 4.424691358024692, |
| "grad_norm": 0.4602210521697998, |
| "learning_rate": 3.2875529852700147e-06, |
| "loss": 0.0553, |
| "step": 112 |
| }, |
| { |
| "epoch": 4.4641975308641975, |
| "grad_norm": 0.35102057456970215, |
| "learning_rate": 2.8058334845816213e-06, |
| "loss": 0.0425, |
| "step": 113 |
| }, |
| { |
| "epoch": 4.503703703703704, |
| "grad_norm": 0.3979504108428955, |
| "learning_rate": 2.361243863855184e-06, |
| "loss": 0.0431, |
| "step": 114 |
| }, |
| { |
| "epoch": 4.54320987654321, |
| "grad_norm": 0.3529750406742096, |
| "learning_rate": 1.9541339027450256e-06, |
| "loss": 0.0388, |
| "step": 115 |
| }, |
| { |
| "epoch": 4.582716049382716, |
| "grad_norm": 0.3799625635147095, |
| "learning_rate": 1.584823893886933e-06, |
| "loss": 0.0312, |
| "step": 116 |
| }, |
| { |
| "epoch": 4.622222222222222, |
| "grad_norm": 0.46877995133399963, |
| "learning_rate": 1.2536043909088191e-06, |
| "loss": 0.0346, |
| "step": 117 |
| }, |
| { |
| "epoch": 4.661728395061728, |
| "grad_norm": 0.47655749320983887, |
| "learning_rate": 9.607359798384785e-07, |
| "loss": 0.0667, |
| "step": 118 |
| }, |
| { |
| "epoch": 4.701234567901235, |
| "grad_norm": 0.3486880362033844, |
| "learning_rate": 7.064490740882057e-07, |
| "loss": 0.0338, |
| "step": 119 |
| }, |
| { |
| "epoch": 4.7407407407407405, |
| "grad_norm": 0.4768744111061096, |
| "learning_rate": 4.909437331777179e-07, |
| "loss": 0.0495, |
| "step": 120 |
| }, |
| { |
| "epoch": 4.7407407407407405, |
| "eval_loss": 0.13912023603916168, |
| "eval_runtime": 23.8602, |
| "eval_samples_per_second": 1.886, |
| "eval_steps_per_second": 1.886, |
| "step": 120 |
| }, |
| { |
| "epoch": 4.780246913580247, |
| "grad_norm": 0.4780932068824768, |
| "learning_rate": 3.143895053378698e-07, |
| "loss": 0.0368, |
| "step": 121 |
| }, |
| { |
| "epoch": 4.8197530864197535, |
| "grad_norm": 0.2929289937019348, |
| "learning_rate": 1.7692529411904578e-07, |
| "loss": 0.0311, |
| "step": 122 |
| }, |
| { |
| "epoch": 4.859259259259259, |
| "grad_norm": 0.46973612904548645, |
| "learning_rate": 7.865924910916977e-08, |
| "loss": 0.0418, |
| "step": 123 |
| }, |
| { |
| "epoch": 4.898765432098766, |
| "grad_norm": 0.5098392367362976, |
| "learning_rate": 1.9668680847356735e-08, |
| "loss": 0.0557, |
| "step": 124 |
| }, |
| { |
| "epoch": 4.938271604938271, |
| "grad_norm": 0.3944704830646515, |
| "learning_rate": 0.0, |
| "loss": 0.0427, |
| "step": 125 |
| }, |
| { |
| "epoch": 4.938271604938271, |
| "step": 125, |
| "total_flos": 1.3189127055763046e+17, |
| "train_loss": 0.10347887057065963, |
| "train_runtime": 3705.2649, |
| "train_samples_per_second": 0.547, |
| "train_steps_per_second": 0.034 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 125, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 5, |
| "save_steps": 10, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.3189127055763046e+17, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|