| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.934541792547835, | |
| "eval_steps": 500, | |
| "global_step": 310, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.16112789526686808, | |
| "grad_norm": 0.901447594165802, | |
| "learning_rate": 5.806451612903226e-06, | |
| "loss": 0.6022, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.32225579053373615, | |
| "grad_norm": 0.5838184356689453, | |
| "learning_rate": 1.2258064516129034e-05, | |
| "loss": 0.5086, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.48338368580060426, | |
| "grad_norm": 0.73076331615448, | |
| "learning_rate": 1.870967741935484e-05, | |
| "loss": 0.4313, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.6445115810674723, | |
| "grad_norm": 0.7731372714042664, | |
| "learning_rate": 1.9959454037227215e-05, | |
| "loss": 0.3224, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.8056394763343404, | |
| "grad_norm": 0.5845904350280762, | |
| "learning_rate": 1.9795299412524948e-05, | |
| "loss": 0.2318, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.9667673716012085, | |
| "grad_norm": 0.5347937345504761, | |
| "learning_rate": 1.9507079544701583e-05, | |
| "loss": 0.1739, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.1127895266868078, | |
| "grad_norm": 0.5780048370361328, | |
| "learning_rate": 1.9098444967188308e-05, | |
| "loss": 0.1281, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.2739174219536757, | |
| "grad_norm": 0.9586585164070129, | |
| "learning_rate": 1.857457136130651e-05, | |
| "loss": 0.0624, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.4350453172205437, | |
| "grad_norm": 0.6118476390838623, | |
| "learning_rate": 1.7942094002155122e-05, | |
| "loss": 0.0469, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.596173212487412, | |
| "grad_norm": 0.7363295555114746, | |
| "learning_rate": 1.7209023717584013e-05, | |
| "loss": 0.0449, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.75730110775428, | |
| "grad_norm": 0.34214308857917786, | |
| "learning_rate": 1.6384645424699835e-05, | |
| "loss": 0.0285, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.918429003021148, | |
| "grad_norm": 0.42204025387763977, | |
| "learning_rate": 1.5479400529019987e-05, | |
| "loss": 0.0243, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.0644511581067473, | |
| "grad_norm": 0.49233436584472656, | |
| "learning_rate": 1.4504754675782731e-05, | |
| "loss": 0.0154, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.2255790533736155, | |
| "grad_norm": 0.31754371523857117, | |
| "learning_rate": 1.3473052528448203e-05, | |
| "loss": 0.0064, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.3867069486404833, | |
| "grad_norm": 0.13772433996200562, | |
| "learning_rate": 1.2397361413735785e-05, | |
| "loss": 0.0073, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.5478348439073515, | |
| "grad_norm": 0.1763952523469925, | |
| "learning_rate": 1.1291305813557616e-05, | |
| "loss": 0.0064, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.7089627391742197, | |
| "grad_norm": 0.12793567776679993, | |
| "learning_rate": 1.0168894800139311e-05, | |
| "loss": 0.0044, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.8700906344410875, | |
| "grad_norm": 0.1250123381614685, | |
| "learning_rate": 9.04434459999902e-06, | |
| "loss": 0.0036, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 3.016112789526687, | |
| "grad_norm": 0.10916411876678467, | |
| "learning_rate": 7.93189853415293e-06, | |
| "loss": 0.0034, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 3.177240684793555, | |
| "grad_norm": 0.09267673641443253, | |
| "learning_rate": 6.845646615147445e-06, | |
| "loss": 0.0013, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 3.338368580060423, | |
| "grad_norm": 0.049450311809778214, | |
| "learning_rate": 5.799347085864851e-06, | |
| "loss": 0.0011, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 3.499496475327291, | |
| "grad_norm": 0.05649149790406227, | |
| "learning_rate": 4.8062521604551245e-06, | |
| "loss": 0.0012, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 3.6606243705941592, | |
| "grad_norm": 0.032816365361213684, | |
| "learning_rate": 3.878940174523371e-06, | |
| "loss": 0.001, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 3.8217522658610275, | |
| "grad_norm": 0.031284552067518234, | |
| "learning_rate": 3.0291562705240107e-06, | |
| "loss": 0.001, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.9828801611278952, | |
| "grad_norm": 0.513493001461029, | |
| "learning_rate": 2.2676636362076075e-06, | |
| "loss": 0.0019, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 4.128902316213495, | |
| "grad_norm": 0.011032285168766975, | |
| "learning_rate": 1.60410718030361e-06, | |
| "loss": 0.0006, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 4.290030211480363, | |
| "grad_norm": 0.030012542381882668, | |
| "learning_rate": 1.0468913720946084e-06, | |
| "loss": 0.0006, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 4.451158106747231, | |
| "grad_norm": 0.013430260121822357, | |
| "learning_rate": 6.030737921409169e-07, | |
| "loss": 0.0005, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 4.612286002014098, | |
| "grad_norm": 0.012694916687905788, | |
| "learning_rate": 2.7827574242009434e-07, | |
| "loss": 0.0009, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 4.7734138972809665, | |
| "grad_norm": 0.020768938586115837, | |
| "learning_rate": 7.661104807487607e-08, | |
| "loss": 0.0006, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 4.934541792547835, | |
| "grad_norm": 0.037272222340106964, | |
| "learning_rate": 6.339525519594159e-10, | |
| "loss": 0.0007, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 4.934541792547835, | |
| "step": 310, | |
| "total_flos": 7.473315916210504e+17, | |
| "train_loss": 0.08592292484677126, | |
| "train_runtime": 5128.9726, | |
| "train_samples_per_second": 3.87, | |
| "train_steps_per_second": 0.06 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 310, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 250, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 7.473315916210504e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |