| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 6.666666666666667, | |
| "eval_steps": 500, | |
| "global_step": 70, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09523809523809523, | |
| "grad_norm": 2.8320955506227734, | |
| "learning_rate": 2.8571428571428573e-06, | |
| "loss": 0.7503, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.19047619047619047, | |
| "grad_norm": 3.070678834597269, | |
| "learning_rate": 5.7142857142857145e-06, | |
| "loss": 0.7881, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.2857142857142857, | |
| "grad_norm": 2.8406043275851043, | |
| "learning_rate": 8.571428571428571e-06, | |
| "loss": 0.7837, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.38095238095238093, | |
| "grad_norm": 1.5322822135832683, | |
| "learning_rate": 1.1428571428571429e-05, | |
| "loss": 0.7099, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.47619047619047616, | |
| "grad_norm": 1.4552346222662111, | |
| "learning_rate": 1.4285714285714287e-05, | |
| "loss": 0.6701, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 1.252269636021821, | |
| "learning_rate": 1.7142857142857142e-05, | |
| "loss": 0.6887, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 1.6182707319888876, | |
| "learning_rate": 2e-05, | |
| "loss": 0.5969, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.7619047619047619, | |
| "grad_norm": 1.288445793207834, | |
| "learning_rate": 1.9987569212189224e-05, | |
| "loss": 0.6037, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.8571428571428571, | |
| "grad_norm": 1.3885958633175666, | |
| "learning_rate": 1.9950307753654016e-05, | |
| "loss": 0.5739, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.9523809523809523, | |
| "grad_norm": 1.3145050695727345, | |
| "learning_rate": 1.9888308262251286e-05, | |
| "loss": 0.5667, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.0476190476190477, | |
| "grad_norm": 0.8226641945304691, | |
| "learning_rate": 1.9801724878485438e-05, | |
| "loss": 0.52, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 1.1428571428571428, | |
| "grad_norm": 0.7029412793318481, | |
| "learning_rate": 1.969077286229078e-05, | |
| "loss": 0.4985, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 1.2380952380952381, | |
| "grad_norm": 1.0085941606777864, | |
| "learning_rate": 1.955572805786141e-05, | |
| "loss": 0.4998, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.7798543200166844, | |
| "learning_rate": 1.9396926207859085e-05, | |
| "loss": 0.4844, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": 0.47938718917071815, | |
| "learning_rate": 1.921476211870408e-05, | |
| "loss": 0.4763, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 1.5238095238095237, | |
| "grad_norm": 0.7368916353173011, | |
| "learning_rate": 1.900968867902419e-05, | |
| "loss": 0.4661, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 1.619047619047619, | |
| "grad_norm": 0.6028471352447777, | |
| "learning_rate": 1.8782215733702286e-05, | |
| "loss": 0.4481, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 1.7142857142857144, | |
| "grad_norm": 0.5036570978313853, | |
| "learning_rate": 1.8532908816321557e-05, | |
| "loss": 0.4689, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 1.8095238095238095, | |
| "grad_norm": 0.43540278611578953, | |
| "learning_rate": 1.826238774315995e-05, | |
| "loss": 0.4755, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 1.9047619047619047, | |
| "grad_norm": 0.473121854105158, | |
| "learning_rate": 1.7971325072229227e-05, | |
| "loss": 0.4536, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.393615379852409, | |
| "learning_rate": 1.766044443118978e-05, | |
| "loss": 0.4324, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 2.0952380952380953, | |
| "grad_norm": 0.4382316892369182, | |
| "learning_rate": 1.7330518718298263e-05, | |
| "loss": 0.377, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 2.1904761904761907, | |
| "grad_norm": 0.3706811505731689, | |
| "learning_rate": 1.698236818086073e-05, | |
| "loss": 0.3974, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 2.2857142857142856, | |
| "grad_norm": 0.4270626496673723, | |
| "learning_rate": 1.6616858375968596e-05, | |
| "loss": 0.3696, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 2.380952380952381, | |
| "grad_norm": 0.37439815103910373, | |
| "learning_rate": 1.6234898018587336e-05, | |
| "loss": 0.3689, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 2.4761904761904763, | |
| "grad_norm": 0.44016734715705863, | |
| "learning_rate": 1.5837436722347902e-05, | |
| "loss": 0.3566, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 2.571428571428571, | |
| "grad_norm": 0.38945751281489904, | |
| "learning_rate": 1.5425462638657597e-05, | |
| "loss": 0.3488, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 2.6666666666666665, | |
| "grad_norm": 0.38729545807061805, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 0.3604, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 2.761904761904762, | |
| "grad_norm": 0.337571788427641, | |
| "learning_rate": 1.4562106573531632e-05, | |
| "loss": 0.3724, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 2.857142857142857, | |
| "grad_norm": 0.366079466990378, | |
| "learning_rate": 1.4112871031306118e-05, | |
| "loss": 0.356, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 2.9523809523809526, | |
| "grad_norm": 0.34109571329058475, | |
| "learning_rate": 1.3653410243663953e-05, | |
| "loss": 0.3729, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 3.0476190476190474, | |
| "grad_norm": 0.37256982898521124, | |
| "learning_rate": 1.3184866502516846e-05, | |
| "loss": 0.3332, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 3.142857142857143, | |
| "grad_norm": 0.32503079376324434, | |
| "learning_rate": 1.2708404681430054e-05, | |
| "loss": 0.3045, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 3.238095238095238, | |
| "grad_norm": 0.44291894638005047, | |
| "learning_rate": 1.2225209339563144e-05, | |
| "loss": 0.2815, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 3.3333333333333335, | |
| "grad_norm": 0.34893424381655663, | |
| "learning_rate": 1.1736481776669307e-05, | |
| "loss": 0.2993, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 3.4285714285714284, | |
| "grad_norm": 0.36007093418871194, | |
| "learning_rate": 1.1243437046474854e-05, | |
| "loss": 0.2955, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 3.5238095238095237, | |
| "grad_norm": 0.3579958497206131, | |
| "learning_rate": 1.0747300935864245e-05, | |
| "loss": 0.2987, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 3.619047619047619, | |
| "grad_norm": 0.2897671994039566, | |
| "learning_rate": 1.0249306917380731e-05, | |
| "loss": 0.2944, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 3.7142857142857144, | |
| "grad_norm": 0.3028567004058315, | |
| "learning_rate": 9.750693082619274e-06, | |
| "loss": 0.2769, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 3.8095238095238093, | |
| "grad_norm": 0.32978392442323545, | |
| "learning_rate": 9.252699064135759e-06, | |
| "loss": 0.3137, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 3.9047619047619047, | |
| "grad_norm": 0.26648670250781353, | |
| "learning_rate": 8.756562953525151e-06, | |
| "loss": 0.2652, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.2841438927117525, | |
| "learning_rate": 8.263518223330698e-06, | |
| "loss": 0.2901, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 4.095238095238095, | |
| "grad_norm": 0.4527943251840279, | |
| "learning_rate": 7.774790660436857e-06, | |
| "loss": 0.2513, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 4.190476190476191, | |
| "grad_norm": 0.34557068013858505, | |
| "learning_rate": 7.291595318569951e-06, | |
| "loss": 0.2517, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 4.285714285714286, | |
| "grad_norm": 0.2682787764385143, | |
| "learning_rate": 6.815133497483157e-06, | |
| "loss": 0.2496, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 4.380952380952381, | |
| "grad_norm": 0.3771576753399809, | |
| "learning_rate": 6.34658975633605e-06, | |
| "loss": 0.2777, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 4.476190476190476, | |
| "grad_norm": 0.3491352343564436, | |
| "learning_rate": 5.887128968693887e-06, | |
| "loss": 0.2519, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 4.571428571428571, | |
| "grad_norm": 0.23283825299003122, | |
| "learning_rate": 5.43789342646837e-06, | |
| "loss": 0.228, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 4.666666666666667, | |
| "grad_norm": 0.24105637170971358, | |
| "learning_rate": 5.000000000000003e-06, | |
| "loss": 0.2454, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 4.761904761904762, | |
| "grad_norm": 0.2506297946545498, | |
| "learning_rate": 4.5745373613424075e-06, | |
| "loss": 0.2234, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 4.857142857142857, | |
| "grad_norm": 0.27558261186067, | |
| "learning_rate": 4.162563277652104e-06, | |
| "loss": 0.2155, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 4.9523809523809526, | |
| "grad_norm": 0.2584942834824004, | |
| "learning_rate": 3.7651019814126656e-06, | |
| "loss": 0.2527, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 5.0476190476190474, | |
| "grad_norm": 0.23004602827606485, | |
| "learning_rate": 3.3831416240314085e-06, | |
| "loss": 0.2293, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 5.142857142857143, | |
| "grad_norm": 0.22066736092139588, | |
| "learning_rate": 3.017631819139273e-06, | |
| "loss": 0.2281, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 5.238095238095238, | |
| "grad_norm": 0.1940298105510071, | |
| "learning_rate": 2.669481281701739e-06, | |
| "loss": 0.1992, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 5.333333333333333, | |
| "grad_norm": 0.20886369554142747, | |
| "learning_rate": 2.339555568810221e-06, | |
| "loss": 0.178, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 5.428571428571429, | |
| "grad_norm": 0.23594054857576238, | |
| "learning_rate": 2.0286749277707783e-06, | |
| "loss": 0.2168, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 5.523809523809524, | |
| "grad_norm": 0.2596136041823099, | |
| "learning_rate": 1.7376122568400533e-06, | |
| "loss": 0.2271, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 5.619047619047619, | |
| "grad_norm": 0.24708363861802166, | |
| "learning_rate": 1.467091183678444e-06, | |
| "loss": 0.2314, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 5.714285714285714, | |
| "grad_norm": 0.1924002571265768, | |
| "learning_rate": 1.2177842662977136e-06, | |
| "loss": 0.1739, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 5.809523809523809, | |
| "grad_norm": 0.18344348453306975, | |
| "learning_rate": 9.903113209758098e-07, | |
| "loss": 0.2262, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 5.904761904761905, | |
| "grad_norm": 0.16573331766938174, | |
| "learning_rate": 7.852378812959227e-07, | |
| "loss": 0.225, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.170300581792204, | |
| "learning_rate": 6.030737921409169e-07, | |
| "loss": 0.2453, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 6.095238095238095, | |
| "grad_norm": 0.22362018692425412, | |
| "learning_rate": 4.4427194213859216e-07, | |
| "loss": 0.2161, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 6.190476190476191, | |
| "grad_norm": 0.192560271462529, | |
| "learning_rate": 3.0922713770922155e-07, | |
| "loss": 0.1761, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 6.285714285714286, | |
| "grad_norm": 0.20601479851731874, | |
| "learning_rate": 1.9827512151456175e-07, | |
| "loss": 0.1877, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 6.380952380952381, | |
| "grad_norm": 0.17386515490190058, | |
| "learning_rate": 1.1169173774871478e-07, | |
| "loss": 0.223, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 6.476190476190476, | |
| "grad_norm": 0.19256718631019112, | |
| "learning_rate": 4.9692246345985905e-08, | |
| "loss": 0.2255, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 6.571428571428571, | |
| "grad_norm": 0.17625780507703753, | |
| "learning_rate": 1.2430787810776556e-08, | |
| "loss": 0.2008, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 6.666666666666667, | |
| "grad_norm": 0.1714615772867102, | |
| "learning_rate": 0.0, | |
| "loss": 0.2137, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 6.666666666666667, | |
| "step": 70, | |
| "total_flos": 1.0594077221715968e+17, | |
| "train_loss": 0.3594118290713855, | |
| "train_runtime": 4436.4382, | |
| "train_samples_per_second": 1.578, | |
| "train_steps_per_second": 0.016 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 70, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 7, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.0594077221715968e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |