| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.8027057497181511, | |
| "eval_steps": 50, | |
| "global_step": 400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04509582863585118, | |
| "grad_norm": 3.0241758823394775, | |
| "learning_rate": 1.323529411764706e-05, | |
| "loss": 2.4093, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.09019165727170236, | |
| "grad_norm": 0.5808393359184265, | |
| "learning_rate": 2.7941176470588236e-05, | |
| "loss": 1.9852, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.13528748590755355, | |
| "grad_norm": 0.5174122452735901, | |
| "learning_rate": 4.2647058823529415e-05, | |
| "loss": 1.5172, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.18038331454340473, | |
| "grad_norm": 0.4034510850906372, | |
| "learning_rate": 4.999227864973929e-05, | |
| "loss": 1.2282, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.2254791431792559, | |
| "grad_norm": 0.3215184211730957, | |
| "learning_rate": 4.993053646194732e-05, | |
| "loss": 0.8247, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.2254791431792559, | |
| "eval_loss": 0.8878735303878784, | |
| "eval_runtime": 20.7994, | |
| "eval_samples_per_second": 8.991, | |
| "eval_steps_per_second": 4.519, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.2705749718151071, | |
| "grad_norm": 0.3615158498287201, | |
| "learning_rate": 4.980720461738333e-05, | |
| "loss": 0.7585, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.3156708004509583, | |
| "grad_norm": 0.26657992601394653, | |
| "learning_rate": 4.962258780126689e-05, | |
| "loss": 0.775, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.36076662908680945, | |
| "grad_norm": 0.3843010663986206, | |
| "learning_rate": 4.937714210030742e-05, | |
| "loss": 0.7218, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.40586245772266066, | |
| "grad_norm": 0.3022189140319824, | |
| "learning_rate": 4.907147387596452e-05, | |
| "loss": 0.5914, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.4509582863585118, | |
| "grad_norm": 0.3209262788295746, | |
| "learning_rate": 4.8706338266462066e-05, | |
| "loss": 0.6643, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.4509582863585118, | |
| "eval_loss": 0.6771596670150757, | |
| "eval_runtime": 20.7653, | |
| "eval_samples_per_second": 9.005, | |
| "eval_steps_per_second": 4.527, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.496054114994363, | |
| "grad_norm": 0.28819140791893005, | |
| "learning_rate": 4.828263732125649e-05, | |
| "loss": 0.6084, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.5411499436302142, | |
| "grad_norm": 0.27068546414375305, | |
| "learning_rate": 4.78014177725683e-05, | |
| "loss": 0.6108, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.5862457722660653, | |
| "grad_norm": 0.2257184237241745, | |
| "learning_rate": 4.7263868449481895e-05, | |
| "loss": 0.5649, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.6313416009019166, | |
| "grad_norm": 0.3302956819534302, | |
| "learning_rate": 4.667131734100215e-05, | |
| "loss": 0.6019, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.6764374295377678, | |
| "grad_norm": 0.3101862668991089, | |
| "learning_rate": 4.602522831532328e-05, | |
| "loss": 0.6504, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.6764374295377678, | |
| "eval_loss": 0.6201105713844299, | |
| "eval_runtime": 20.759, | |
| "eval_samples_per_second": 9.008, | |
| "eval_steps_per_second": 4.528, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.7215332581736189, | |
| "grad_norm": 0.28751006722450256, | |
| "learning_rate": 4.5327197503414856e-05, | |
| "loss": 0.5339, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.7666290868094702, | |
| "grad_norm": 0.25935712456703186, | |
| "learning_rate": 4.4578949355859326e-05, | |
| "loss": 0.5744, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.8117249154453213, | |
| "grad_norm": 0.21612298488616943, | |
| "learning_rate": 4.378233238268206e-05, | |
| "loss": 0.5989, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.8568207440811725, | |
| "grad_norm": 0.2203262448310852, | |
| "learning_rate": 4.293931458669888e-05, | |
| "loss": 0.6229, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.9019165727170236, | |
| "grad_norm": 0.23329846560955048, | |
| "learning_rate": 4.205197860166242e-05, | |
| "loss": 0.5673, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.9019165727170236, | |
| "eval_loss": 0.5971513986587524, | |
| "eval_runtime": 20.7549, | |
| "eval_samples_per_second": 9.01, | |
| "eval_steps_per_second": 4.529, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.9470124013528749, | |
| "grad_norm": 0.19123254716396332, | |
| "learning_rate": 4.112251654721855e-05, | |
| "loss": 0.504, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.992108229988726, | |
| "grad_norm": 0.3501538038253784, | |
| "learning_rate": 4.015322461338319e-05, | |
| "loss": 0.5844, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.0360766629086808, | |
| "grad_norm": 0.24078238010406494, | |
| "learning_rate": 3.9146497387918546e-05, | |
| "loss": 0.5213, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.0811724915445322, | |
| "grad_norm": 0.20395271480083466, | |
| "learning_rate": 3.8104821940622484e-05, | |
| "loss": 0.5404, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.1262683201803834, | |
| "grad_norm": 0.18885444104671478, | |
| "learning_rate": 3.703077167914571e-05, | |
| "loss": 0.4906, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.1262683201803834, | |
| "eval_loss": 0.5830020308494568, | |
| "eval_runtime": 20.7437, | |
| "eval_samples_per_second": 9.015, | |
| "eval_steps_per_second": 4.532, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.1713641488162345, | |
| "grad_norm": 0.26414477825164795, | |
| "learning_rate": 3.592699999151542e-05, | |
| "loss": 0.5721, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.2164599774520857, | |
| "grad_norm": 0.277395099401474, | |
| "learning_rate": 3.479623369107154e-05, | |
| "loss": 0.5371, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.2615558060879368, | |
| "grad_norm": 0.2577001452445984, | |
| "learning_rate": 3.3641266280009265e-05, | |
| "loss": 0.5174, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.306651634723788, | |
| "grad_norm": 0.30697473883628845, | |
| "learning_rate": 3.2464951048170114e-05, | |
| "loss": 0.4956, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.3517474633596391, | |
| "grad_norm": 0.25417396426200867, | |
| "learning_rate": 3.1270194024130506e-05, | |
| "loss": 0.56, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.3517474633596391, | |
| "eval_loss": 0.5748847723007202, | |
| "eval_runtime": 20.7551, | |
| "eval_samples_per_second": 9.01, | |
| "eval_steps_per_second": 4.529, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.3968432919954905, | |
| "grad_norm": 0.28659453988075256, | |
| "learning_rate": 3.0059946796002047e-05, | |
| "loss": 0.5368, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.4419391206313417, | |
| "grad_norm": 0.25941741466522217, | |
| "learning_rate": 2.8837199219679072e-05, | |
| "loss": 0.6031, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.4870349492671928, | |
| "grad_norm": 0.23582515120506287, | |
| "learning_rate": 2.7604972032547628e-05, | |
| "loss": 0.4953, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.532130777903044, | |
| "grad_norm": 0.26195406913757324, | |
| "learning_rate": 2.6366309390903178e-05, | |
| "loss": 0.5443, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.5772266065388951, | |
| "grad_norm": 0.23843364417552948, | |
| "learning_rate": 2.5124271349513184e-05, | |
| "loss": 0.5114, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.5772266065388951, | |
| "eval_loss": 0.5691995024681091, | |
| "eval_runtime": 20.7581, | |
| "eval_samples_per_second": 9.009, | |
| "eval_steps_per_second": 4.528, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.6223224351747465, | |
| "grad_norm": 0.3796253502368927, | |
| "learning_rate": 2.388192630190314e-05, | |
| "loss": 0.5246, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.6674182638105974, | |
| "grad_norm": 0.2228076159954071, | |
| "learning_rate": 2.26423434000422e-05, | |
| "loss": 0.4801, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.7125140924464488, | |
| "grad_norm": 0.4152977764606476, | |
| "learning_rate": 2.140858497215516e-05, | |
| "loss": 0.5636, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.7576099210822997, | |
| "grad_norm": 0.3037204146385193, | |
| "learning_rate": 2.0183698957392015e-05, | |
| "loss": 0.5122, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.8027057497181511, | |
| "grad_norm": 0.42664799094200134, | |
| "learning_rate": 1.897071137604517e-05, | |
| "loss": 0.5104, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.8027057497181511, | |
| "eval_loss": 0.5620473623275757, | |
| "eval_runtime": 20.7694, | |
| "eval_samples_per_second": 9.004, | |
| "eval_steps_per_second": 4.526, | |
| "step": 400 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 666, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.44623567199232e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |