| { | |
| "best_metric": 14.6166, | |
| "best_model_checkpoint": "./nllb_finetuned_base_II_improved/checkpoint-18612", | |
| "epoch": 49.87428932406822, | |
| "eval_steps": 500, | |
| "global_step": 19750, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.002526847757422615, | |
| "grad_norm": 0.03834182024002075, | |
| "learning_rate": 5.063291139240507e-08, | |
| "loss": 0.0009, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.2526847757422615, | |
| "grad_norm": 0.055799346417188644, | |
| "learning_rate": 5.063291139240506e-06, | |
| "loss": 0.002, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.505369551484523, | |
| "grad_norm": 0.5792537927627563, | |
| "learning_rate": 1.0126582278481012e-05, | |
| "loss": 0.0021, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.7580543272267846, | |
| "grad_norm": 0.008475471287965775, | |
| "learning_rate": 1.5189873417721521e-05, | |
| "loss": 0.0023, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_bleu": 14.2158, | |
| "eval_gen_len": 14.4698, | |
| "eval_loss": 5.007449150085449, | |
| "eval_runtime": 537.7389, | |
| "eval_samples_per_second": 10.937, | |
| "eval_steps_per_second": 1.369, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 1.0101073910296905, | |
| "grad_norm": 0.264018177986145, | |
| "learning_rate": 2.0253164556962025e-05, | |
| "loss": 0.0032, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.262792166771952, | |
| "grad_norm": 0.011777933686971664, | |
| "learning_rate": 2.5316455696202533e-05, | |
| "loss": 0.0025, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.5154769425142134, | |
| "grad_norm": 0.027416467666625977, | |
| "learning_rate": 3.0379746835443042e-05, | |
| "loss": 0.0027, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.768161718256475, | |
| "grad_norm": 0.14406521618366241, | |
| "learning_rate": 3.5443037974683544e-05, | |
| "loss": 0.0024, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_bleu": 14.2734, | |
| "eval_gen_len": 14.3467, | |
| "eval_loss": 5.034389972686768, | |
| "eval_runtime": 533.7562, | |
| "eval_samples_per_second": 11.018, | |
| "eval_steps_per_second": 1.379, | |
| "step": 792 | |
| }, | |
| { | |
| "epoch": 2.020214782059381, | |
| "grad_norm": 0.0178065225481987, | |
| "learning_rate": 4.050632911392405e-05, | |
| "loss": 0.0024, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.2728995578016424, | |
| "grad_norm": 0.018611092120409012, | |
| "learning_rate": 4.556962025316456e-05, | |
| "loss": 0.002, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.525584333543904, | |
| "grad_norm": 0.01996198110282421, | |
| "learning_rate": 5.0632911392405066e-05, | |
| "loss": 0.004, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.7782691092861658, | |
| "grad_norm": 0.01915626786649227, | |
| "learning_rate": 5.569620253164557e-05, | |
| "loss": 0.0027, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_bleu": 14.2059, | |
| "eval_gen_len": 14.3137, | |
| "eval_loss": 5.052088737487793, | |
| "eval_runtime": 529.7444, | |
| "eval_samples_per_second": 11.102, | |
| "eval_steps_per_second": 1.389, | |
| "step": 1188 | |
| }, | |
| { | |
| "epoch": 3.0303221730890715, | |
| "grad_norm": 0.06470153480768204, | |
| "learning_rate": 6.0759493670886084e-05, | |
| "loss": 0.0032, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 3.283006948831333, | |
| "grad_norm": 0.019087791442871094, | |
| "learning_rate": 6.582278481012658e-05, | |
| "loss": 0.0029, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 3.5356917245735944, | |
| "grad_norm": 0.7321619391441345, | |
| "learning_rate": 7.088607594936709e-05, | |
| "loss": 0.004, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 3.788376500315856, | |
| "grad_norm": 0.08558339625597, | |
| "learning_rate": 7.59493670886076e-05, | |
| "loss": 0.0039, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_bleu": 13.7306, | |
| "eval_gen_len": 14.7393, | |
| "eval_loss": 5.003371715545654, | |
| "eval_runtime": 542.3943, | |
| "eval_samples_per_second": 10.843, | |
| "eval_steps_per_second": 1.357, | |
| "step": 1584 | |
| }, | |
| { | |
| "epoch": 4.040429564118762, | |
| "grad_norm": 0.026173993945121765, | |
| "learning_rate": 8.10126582278481e-05, | |
| "loss": 0.0044, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 4.293114339861023, | |
| "grad_norm": 0.01453397236764431, | |
| "learning_rate": 8.607594936708861e-05, | |
| "loss": 0.0051, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 4.545799115603285, | |
| "grad_norm": 0.01603219285607338, | |
| "learning_rate": 9.113924050632912e-05, | |
| "loss": 0.0061, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 4.798483891345547, | |
| "grad_norm": 0.0410076268017292, | |
| "learning_rate": 9.620253164556962e-05, | |
| "loss": 0.0069, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_bleu": 13.8802, | |
| "eval_gen_len": 14.4926, | |
| "eval_loss": 5.007750988006592, | |
| "eval_runtime": 536.2346, | |
| "eval_samples_per_second": 10.967, | |
| "eval_steps_per_second": 1.373, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 5.050536955148452, | |
| "grad_norm": 0.03955426439642906, | |
| "learning_rate": 0.00010126582278481013, | |
| "loss": 0.0062, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 5.303221730890714, | |
| "grad_norm": 0.07262468338012695, | |
| "learning_rate": 0.00010632911392405063, | |
| "loss": 0.0082, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 5.555906506632976, | |
| "grad_norm": 0.8466947078704834, | |
| "learning_rate": 0.00011139240506329114, | |
| "loss": 0.011, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 5.808591282375237, | |
| "grad_norm": 0.4297804832458496, | |
| "learning_rate": 0.00011645569620253166, | |
| "loss": 0.013, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_bleu": 13.5899, | |
| "eval_gen_len": 14.494, | |
| "eval_loss": 4.995738983154297, | |
| "eval_runtime": 536.5639, | |
| "eval_samples_per_second": 10.96, | |
| "eval_steps_per_second": 1.372, | |
| "step": 2376 | |
| }, | |
| { | |
| "epoch": 6.060644346178143, | |
| "grad_norm": 0.14445021748542786, | |
| "learning_rate": 0.00012151898734177217, | |
| "loss": 0.0133, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 6.313329121920404, | |
| "grad_norm": 0.1337200254201889, | |
| "learning_rate": 0.00012658227848101267, | |
| "loss": 0.0115, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 6.566013897662666, | |
| "grad_norm": 0.5902842879295349, | |
| "learning_rate": 0.00013164556962025315, | |
| "loss": 0.0124, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 6.818698673404928, | |
| "grad_norm": 0.709742546081543, | |
| "learning_rate": 0.00013670886075949366, | |
| "loss": 0.0165, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_bleu": 13.324, | |
| "eval_gen_len": 14.9148, | |
| "eval_loss": 4.997140407562256, | |
| "eval_runtime": 541.5428, | |
| "eval_samples_per_second": 10.86, | |
| "eval_steps_per_second": 1.359, | |
| "step": 2772 | |
| }, | |
| { | |
| "epoch": 7.070751737207833, | |
| "grad_norm": 0.3325135111808777, | |
| "learning_rate": 0.00014177215189873418, | |
| "loss": 0.0205, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 7.323436512950095, | |
| "grad_norm": 0.8612397909164429, | |
| "learning_rate": 0.0001468354430379747, | |
| "loss": 0.0141, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 7.576121288692356, | |
| "grad_norm": 0.5200220346450806, | |
| "learning_rate": 0.0001518987341772152, | |
| "loss": 0.0131, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 7.828806064434618, | |
| "grad_norm": 0.18251368403434753, | |
| "learning_rate": 0.00015696202531645568, | |
| "loss": 0.0195, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_bleu": 13.5516, | |
| "eval_gen_len": 14.4363, | |
| "eval_loss": 4.994857311248779, | |
| "eval_runtime": 535.7057, | |
| "eval_samples_per_second": 10.978, | |
| "eval_steps_per_second": 1.374, | |
| "step": 3168 | |
| }, | |
| { | |
| "epoch": 8.080859128237524, | |
| "grad_norm": 0.8557725548744202, | |
| "learning_rate": 0.0001620253164556962, | |
| "loss": 0.0207, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 8.333543903979786, | |
| "grad_norm": 0.6467623114585876, | |
| "learning_rate": 0.0001670886075949367, | |
| "loss": 0.0223, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 8.586228679722046, | |
| "grad_norm": 0.9199110269546509, | |
| "learning_rate": 0.00017215189873417722, | |
| "loss": 0.0197, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 8.838913455464308, | |
| "grad_norm": 0.3197132647037506, | |
| "learning_rate": 0.00017721518987341773, | |
| "loss": 0.0218, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_bleu": 13.6364, | |
| "eval_gen_len": 14.1306, | |
| "eval_loss": 4.960774898529053, | |
| "eval_runtime": 526.2912, | |
| "eval_samples_per_second": 11.174, | |
| "eval_steps_per_second": 1.398, | |
| "step": 3564 | |
| }, | |
| { | |
| "epoch": 9.090966519267214, | |
| "grad_norm": 0.1220143586397171, | |
| "learning_rate": 0.00018227848101265824, | |
| "loss": 0.0297, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 9.343651295009476, | |
| "grad_norm": 0.31142914295196533, | |
| "learning_rate": 0.00018734177215189873, | |
| "loss": 0.0212, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 9.596336070751738, | |
| "grad_norm": 0.7474793791770935, | |
| "learning_rate": 0.00019240506329113924, | |
| "loss": 0.0218, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 9.849020846494, | |
| "grad_norm": 0.4890414774417877, | |
| "learning_rate": 0.00019746835443037975, | |
| "loss": 0.0249, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_bleu": 13.1309, | |
| "eval_gen_len": 14.3164, | |
| "eval_loss": 4.990716934204102, | |
| "eval_runtime": 536.4931, | |
| "eval_samples_per_second": 10.962, | |
| "eval_steps_per_second": 1.372, | |
| "step": 3960 | |
| }, | |
| { | |
| "epoch": 10.101073910296904, | |
| "grad_norm": 0.650865375995636, | |
| "learning_rate": 0.00019936740506329114, | |
| "loss": 0.0282, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 10.353758686039166, | |
| "grad_norm": 0.616794764995575, | |
| "learning_rate": 0.00019810221518987343, | |
| "loss": 0.0232, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 10.606443461781428, | |
| "grad_norm": 0.1520662158727646, | |
| "learning_rate": 0.00019683702531645572, | |
| "loss": 0.0271, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 10.85912823752369, | |
| "grad_norm": 0.6819707751274109, | |
| "learning_rate": 0.00019557183544303798, | |
| "loss": 0.0237, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_bleu": 13.389, | |
| "eval_gen_len": 14.4307, | |
| "eval_loss": 4.9949188232421875, | |
| "eval_runtime": 532.7791, | |
| "eval_samples_per_second": 11.038, | |
| "eval_steps_per_second": 1.381, | |
| "step": 4356 | |
| }, | |
| { | |
| "epoch": 11.111181301326596, | |
| "grad_norm": 0.14406563341617584, | |
| "learning_rate": 0.00019430664556962024, | |
| "loss": 0.0236, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 11.363866077068856, | |
| "grad_norm": 0.30580630898475647, | |
| "learning_rate": 0.00019304145569620256, | |
| "loss": 0.0199, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 11.616550852811118, | |
| "grad_norm": 0.2938636541366577, | |
| "learning_rate": 0.00019177626582278482, | |
| "loss": 0.0212, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 11.86923562855338, | |
| "grad_norm": 0.30495572090148926, | |
| "learning_rate": 0.00019051107594936708, | |
| "loss": 0.0183, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_bleu": 13.4564, | |
| "eval_gen_len": 14.6526, | |
| "eval_loss": 5.026724338531494, | |
| "eval_runtime": 539.3787, | |
| "eval_samples_per_second": 10.903, | |
| "eval_steps_per_second": 1.365, | |
| "step": 4752 | |
| }, | |
| { | |
| "epoch": 12.121288692356286, | |
| "grad_norm": 0.4794355630874634, | |
| "learning_rate": 0.0001892458860759494, | |
| "loss": 0.0209, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 12.373973468098548, | |
| "grad_norm": 0.08356456458568573, | |
| "learning_rate": 0.00018798069620253166, | |
| "loss": 0.0181, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 12.626658243840808, | |
| "grad_norm": 0.12282651662826538, | |
| "learning_rate": 0.00018671550632911392, | |
| "loss": 0.0164, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 12.87934301958307, | |
| "grad_norm": 0.44435593485832214, | |
| "learning_rate": 0.0001854503164556962, | |
| "loss": 0.0212, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_bleu": 13.59, | |
| "eval_gen_len": 14.2952, | |
| "eval_loss": 5.072442054748535, | |
| "eval_runtime": 529.5691, | |
| "eval_samples_per_second": 11.105, | |
| "eval_steps_per_second": 1.39, | |
| "step": 5148 | |
| }, | |
| { | |
| "epoch": 13.131396083385976, | |
| "grad_norm": 0.24175015091896057, | |
| "learning_rate": 0.0001841851265822785, | |
| "loss": 0.0175, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 13.384080859128238, | |
| "grad_norm": 0.18917737901210785, | |
| "learning_rate": 0.00018291993670886076, | |
| "loss": 0.0154, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 13.6367656348705, | |
| "grad_norm": 0.10555186867713928, | |
| "learning_rate": 0.00018165474683544305, | |
| "loss": 0.0171, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 13.88945041061276, | |
| "grad_norm": 0.5594084858894348, | |
| "learning_rate": 0.00018038955696202534, | |
| "loss": 0.0158, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_bleu": 13.3564, | |
| "eval_gen_len": 14.5018, | |
| "eval_loss": 5.083232879638672, | |
| "eval_runtime": 540.6508, | |
| "eval_samples_per_second": 10.878, | |
| "eval_steps_per_second": 1.361, | |
| "step": 5544 | |
| }, | |
| { | |
| "epoch": 14.141503474415666, | |
| "grad_norm": 0.5325390100479126, | |
| "learning_rate": 0.0001791243670886076, | |
| "loss": 0.0148, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 14.394188250157928, | |
| "grad_norm": 0.16868151724338531, | |
| "learning_rate": 0.00017785917721518986, | |
| "loss": 0.0136, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 14.64687302590019, | |
| "grad_norm": 0.22546489536762238, | |
| "learning_rate": 0.00017659398734177218, | |
| "loss": 0.0129, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 14.899557801642452, | |
| "grad_norm": 0.4606294631958008, | |
| "learning_rate": 0.00017532879746835444, | |
| "loss": 0.0149, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_bleu": 13.71, | |
| "eval_gen_len": 14.4261, | |
| "eval_loss": 5.047977447509766, | |
| "eval_runtime": 536.9368, | |
| "eval_samples_per_second": 10.953, | |
| "eval_steps_per_second": 1.371, | |
| "step": 5940 | |
| }, | |
| { | |
| "epoch": 15.151610865445356, | |
| "grad_norm": 0.2645217776298523, | |
| "learning_rate": 0.0001740636075949367, | |
| "loss": 0.0149, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 15.404295641187618, | |
| "grad_norm": 0.18864086270332336, | |
| "learning_rate": 0.000172798417721519, | |
| "loss": 0.0133, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 15.65698041692988, | |
| "grad_norm": 0.3654481768608093, | |
| "learning_rate": 0.00017153322784810128, | |
| "loss": 0.0127, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 15.909665192672142, | |
| "grad_norm": 0.47671639919281006, | |
| "learning_rate": 0.00017026803797468354, | |
| "loss": 0.0152, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_bleu": 13.3368, | |
| "eval_gen_len": 14.4033, | |
| "eval_loss": 5.045422554016113, | |
| "eval_runtime": 533.8922, | |
| "eval_samples_per_second": 11.015, | |
| "eval_steps_per_second": 1.379, | |
| "step": 6336 | |
| }, | |
| { | |
| "epoch": 16.161718256475048, | |
| "grad_norm": 0.23764218389987946, | |
| "learning_rate": 0.00016900284810126583, | |
| "loss": 0.0174, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 16.414403032217308, | |
| "grad_norm": 0.2533399164676666, | |
| "learning_rate": 0.00016775031012658228, | |
| "loss": 0.016, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 16.66708780795957, | |
| "grad_norm": 0.1537361741065979, | |
| "learning_rate": 0.00016648512025316457, | |
| "loss": 0.015, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 16.919772583701832, | |
| "grad_norm": 0.2747589349746704, | |
| "learning_rate": 0.00016521993037974683, | |
| "loss": 0.0179, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_bleu": 13.2518, | |
| "eval_gen_len": 14.4889, | |
| "eval_loss": 5.028156280517578, | |
| "eval_runtime": 538.1372, | |
| "eval_samples_per_second": 10.928, | |
| "eval_steps_per_second": 1.368, | |
| "step": 6732 | |
| }, | |
| { | |
| "epoch": 17.171825647504736, | |
| "grad_norm": 0.216201052069664, | |
| "learning_rate": 0.00016395474050632914, | |
| "loss": 0.0151, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 17.424510423247, | |
| "grad_norm": 0.08341749012470245, | |
| "learning_rate": 0.00016270220253164556, | |
| "loss": 0.0119, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 17.67719519898926, | |
| "grad_norm": 0.6413178443908691, | |
| "learning_rate": 0.00016143701265822785, | |
| "loss": 0.0145, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 17.929879974731524, | |
| "grad_norm": 0.05727590620517731, | |
| "learning_rate": 0.00016018447468354433, | |
| "loss": 0.0139, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_bleu": 13.4478, | |
| "eval_gen_len": 14.5729, | |
| "eval_loss": 5.039746284484863, | |
| "eval_runtime": 538.0034, | |
| "eval_samples_per_second": 10.931, | |
| "eval_steps_per_second": 1.368, | |
| "step": 7128 | |
| }, | |
| { | |
| "epoch": 18.181933038534428, | |
| "grad_norm": 0.09456099569797516, | |
| "learning_rate": 0.0001589192848101266, | |
| "loss": 0.0107, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 18.434617814276688, | |
| "grad_norm": 0.5516796112060547, | |
| "learning_rate": 0.00015765409493670885, | |
| "loss": 0.0082, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 18.68730259001895, | |
| "grad_norm": 0.73766028881073, | |
| "learning_rate": 0.00015638890506329114, | |
| "loss": 0.0118, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 18.939987365761212, | |
| "grad_norm": 0.25607988238334656, | |
| "learning_rate": 0.00015512371518987343, | |
| "loss": 0.0124, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_bleu": 13.418, | |
| "eval_gen_len": 14.4207, | |
| "eval_loss": 5.1243767738342285, | |
| "eval_runtime": 534.6138, | |
| "eval_samples_per_second": 11.0, | |
| "eval_steps_per_second": 1.377, | |
| "step": 7524 | |
| }, | |
| { | |
| "epoch": 19.19204042956412, | |
| "grad_norm": 0.19441089034080505, | |
| "learning_rate": 0.00015385852531645572, | |
| "loss": 0.0113, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 19.44472520530638, | |
| "grad_norm": 0.41388899087905884, | |
| "learning_rate": 0.00015259333544303798, | |
| "loss": 0.0118, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 19.697409981048644, | |
| "grad_norm": 0.20537462830543518, | |
| "learning_rate": 0.00015132814556962027, | |
| "loss": 0.0081, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 19.950094756790904, | |
| "grad_norm": 0.07861676812171936, | |
| "learning_rate": 0.00015006295569620253, | |
| "loss": 0.0107, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_bleu": 13.4141, | |
| "eval_gen_len": 14.5943, | |
| "eval_loss": 5.130436897277832, | |
| "eval_runtime": 536.0796, | |
| "eval_samples_per_second": 10.97, | |
| "eval_steps_per_second": 1.373, | |
| "step": 7920 | |
| }, | |
| { | |
| "epoch": 20.202147820593808, | |
| "grad_norm": 0.4762495756149292, | |
| "learning_rate": 0.0001487977658227848, | |
| "loss": 0.009, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 20.45483259633607, | |
| "grad_norm": 1.3390281200408936, | |
| "learning_rate": 0.0001475325759493671, | |
| "loss": 0.0095, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 20.707517372078332, | |
| "grad_norm": 0.06131729856133461, | |
| "learning_rate": 0.0001462673860759494, | |
| "loss": 0.0088, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 20.960202147820596, | |
| "grad_norm": 0.10782083868980408, | |
| "learning_rate": 0.00014500219620253166, | |
| "loss": 0.0104, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 21.0, | |
| "eval_bleu": 13.6054, | |
| "eval_gen_len": 14.0954, | |
| "eval_loss": 5.0841474533081055, | |
| "eval_runtime": 523.9577, | |
| "eval_samples_per_second": 11.224, | |
| "eval_steps_per_second": 1.405, | |
| "step": 8316 | |
| }, | |
| { | |
| "epoch": 21.2122552116235, | |
| "grad_norm": 0.5227333903312683, | |
| "learning_rate": 0.00014373700632911395, | |
| "loss": 0.011, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 21.46493998736576, | |
| "grad_norm": 0.3831775486469269, | |
| "learning_rate": 0.0001424718164556962, | |
| "loss": 0.0087, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 21.717624763108024, | |
| "grad_norm": 0.18052732944488525, | |
| "learning_rate": 0.00014120662658227847, | |
| "loss": 0.0091, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 21.970309538850284, | |
| "grad_norm": 0.17256955802440643, | |
| "learning_rate": 0.00013994143670886076, | |
| "loss": 0.0121, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "eval_bleu": 13.4688, | |
| "eval_gen_len": 14.6354, | |
| "eval_loss": 5.096054553985596, | |
| "eval_runtime": 540.1153, | |
| "eval_samples_per_second": 10.888, | |
| "eval_steps_per_second": 1.363, | |
| "step": 8712 | |
| }, | |
| { | |
| "epoch": 22.22236260265319, | |
| "grad_norm": 0.847822904586792, | |
| "learning_rate": 0.00013867624683544305, | |
| "loss": 0.008, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 22.475047378395452, | |
| "grad_norm": 0.04867393895983696, | |
| "learning_rate": 0.00013741105696202534, | |
| "loss": 0.0115, | |
| "step": 8900 | |
| }, | |
| { | |
| "epoch": 22.727732154137712, | |
| "grad_norm": 0.24624444544315338, | |
| "learning_rate": 0.0001361458670886076, | |
| "loss": 0.0098, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 22.980416929879976, | |
| "grad_norm": 0.6290240287780762, | |
| "learning_rate": 0.0001348806772151899, | |
| "loss": 0.0086, | |
| "step": 9100 | |
| }, | |
| { | |
| "epoch": 23.0, | |
| "eval_bleu": 13.5374, | |
| "eval_gen_len": 14.4979, | |
| "eval_loss": 5.132958889007568, | |
| "eval_runtime": 536.865, | |
| "eval_samples_per_second": 10.954, | |
| "eval_steps_per_second": 1.371, | |
| "step": 9108 | |
| }, | |
| { | |
| "epoch": 23.23246999368288, | |
| "grad_norm": 0.19094829261302948, | |
| "learning_rate": 0.00013361548734177215, | |
| "loss": 0.0069, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 23.485154769425144, | |
| "grad_norm": 0.031919267028570175, | |
| "learning_rate": 0.0001323502974683544, | |
| "loss": 0.0105, | |
| "step": 9300 | |
| }, | |
| { | |
| "epoch": 23.737839545167404, | |
| "grad_norm": 0.07248106598854065, | |
| "learning_rate": 0.00013108510759493673, | |
| "loss": 0.0089, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 23.990524320909664, | |
| "grad_norm": 0.059244658797979355, | |
| "learning_rate": 0.00012981991772151902, | |
| "loss": 0.0097, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_bleu": 13.4956, | |
| "eval_gen_len": 14.4816, | |
| "eval_loss": 5.115517616271973, | |
| "eval_runtime": 538.8426, | |
| "eval_samples_per_second": 10.914, | |
| "eval_steps_per_second": 1.366, | |
| "step": 9504 | |
| }, | |
| { | |
| "epoch": 24.242577384712572, | |
| "grad_norm": 0.6686831116676331, | |
| "learning_rate": 0.00012855472784810128, | |
| "loss": 0.0075, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 24.495262160454832, | |
| "grad_norm": 0.15613651275634766, | |
| "learning_rate": 0.00012728953797468357, | |
| "loss": 0.0071, | |
| "step": 9700 | |
| }, | |
| { | |
| "epoch": 24.747946936197096, | |
| "grad_norm": 0.03811512514948845, | |
| "learning_rate": 0.00012602434810126583, | |
| "loss": 0.0062, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "grad_norm": 0.0894945040345192, | |
| "learning_rate": 0.0001247591582278481, | |
| "loss": 0.0074, | |
| "step": 9900 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "eval_bleu": 13.8177, | |
| "eval_gen_len": 14.3275, | |
| "eval_loss": 5.174163341522217, | |
| "eval_runtime": 531.7973, | |
| "eval_samples_per_second": 11.059, | |
| "eval_steps_per_second": 1.384, | |
| "step": 9900 | |
| }, | |
| { | |
| "epoch": 25.25268477574226, | |
| "grad_norm": 0.20836782455444336, | |
| "learning_rate": 0.00012349396835443038, | |
| "loss": 0.0057, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 25.505369551484524, | |
| "grad_norm": 0.5370838642120361, | |
| "learning_rate": 0.00012222877848101267, | |
| "loss": 0.0065, | |
| "step": 10100 | |
| }, | |
| { | |
| "epoch": 25.758054327226784, | |
| "grad_norm": 0.21669617295265198, | |
| "learning_rate": 0.00012096358860759496, | |
| "loss": 0.0058, | |
| "step": 10200 | |
| }, | |
| { | |
| "epoch": 26.0, | |
| "eval_bleu": 13.6641, | |
| "eval_gen_len": 14.219, | |
| "eval_loss": 5.147903919219971, | |
| "eval_runtime": 528.0869, | |
| "eval_samples_per_second": 11.136, | |
| "eval_steps_per_second": 1.394, | |
| "step": 10296 | |
| }, | |
| { | |
| "epoch": 26.010107391029692, | |
| "grad_norm": 0.2630160450935364, | |
| "learning_rate": 0.00011969839873417723, | |
| "loss": 0.006, | |
| "step": 10300 | |
| }, | |
| { | |
| "epoch": 26.262792166771952, | |
| "grad_norm": 0.048449043184518814, | |
| "learning_rate": 0.00011843320886075951, | |
| "loss": 0.0086, | |
| "step": 10400 | |
| }, | |
| { | |
| "epoch": 26.515476942514212, | |
| "grad_norm": 0.34334516525268555, | |
| "learning_rate": 0.00011716801898734178, | |
| "loss": 0.0058, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 26.768161718256476, | |
| "grad_norm": 0.12147314101457596, | |
| "learning_rate": 0.00011590282911392404, | |
| "loss": 0.0058, | |
| "step": 10600 | |
| }, | |
| { | |
| "epoch": 27.0, | |
| "eval_bleu": 13.7447, | |
| "eval_gen_len": 14.1751, | |
| "eval_loss": 5.193230628967285, | |
| "eval_runtime": 528.8414, | |
| "eval_samples_per_second": 11.121, | |
| "eval_steps_per_second": 1.392, | |
| "step": 10692 | |
| }, | |
| { | |
| "epoch": 27.02021478205938, | |
| "grad_norm": 0.29292619228363037, | |
| "learning_rate": 0.00011463763924050632, | |
| "loss": 0.0078, | |
| "step": 10700 | |
| }, | |
| { | |
| "epoch": 27.272899557801644, | |
| "grad_norm": 0.5977900624275208, | |
| "learning_rate": 0.00011337244936708862, | |
| "loss": 0.0066, | |
| "step": 10800 | |
| }, | |
| { | |
| "epoch": 27.525584333543904, | |
| "grad_norm": 0.8689119815826416, | |
| "learning_rate": 0.00011210725949367088, | |
| "loss": 0.0063, | |
| "step": 10900 | |
| }, | |
| { | |
| "epoch": 27.778269109286164, | |
| "grad_norm": 0.28833064436912537, | |
| "learning_rate": 0.00011084206962025316, | |
| "loss": 0.0044, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "eval_bleu": 13.488, | |
| "eval_gen_len": 14.7169, | |
| "eval_loss": 5.161106109619141, | |
| "eval_runtime": 537.3924, | |
| "eval_samples_per_second": 10.944, | |
| "eval_steps_per_second": 1.37, | |
| "step": 11088 | |
| }, | |
| { | |
| "epoch": 28.030322173089072, | |
| "grad_norm": 0.7058536410331726, | |
| "learning_rate": 0.00010957687974683546, | |
| "loss": 0.0063, | |
| "step": 11100 | |
| }, | |
| { | |
| "epoch": 28.283006948831332, | |
| "grad_norm": 0.12089253216981888, | |
| "learning_rate": 0.00010831168987341774, | |
| "loss": 0.0082, | |
| "step": 11200 | |
| }, | |
| { | |
| "epoch": 28.535691724573596, | |
| "grad_norm": 0.6483332514762878, | |
| "learning_rate": 0.00010704649999999999, | |
| "loss": 0.0058, | |
| "step": 11300 | |
| }, | |
| { | |
| "epoch": 28.788376500315856, | |
| "grad_norm": 0.10533745586872101, | |
| "learning_rate": 0.00010578131012658227, | |
| "loss": 0.0083, | |
| "step": 11400 | |
| }, | |
| { | |
| "epoch": 29.0, | |
| "eval_bleu": 13.8153, | |
| "eval_gen_len": 14.3556, | |
| "eval_loss": 5.157731533050537, | |
| "eval_runtime": 531.5203, | |
| "eval_samples_per_second": 11.064, | |
| "eval_steps_per_second": 1.385, | |
| "step": 11484 | |
| }, | |
| { | |
| "epoch": 29.04042956411876, | |
| "grad_norm": 0.058924734592437744, | |
| "learning_rate": 0.00010451612025316458, | |
| "loss": 0.0067, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 29.293114339861024, | |
| "grad_norm": 0.2670024633407593, | |
| "learning_rate": 0.00010325093037974685, | |
| "loss": 0.0052, | |
| "step": 11600 | |
| }, | |
| { | |
| "epoch": 29.545799115603284, | |
| "grad_norm": 0.3689946234226227, | |
| "learning_rate": 0.00010198574050632911, | |
| "loss": 0.0067, | |
| "step": 11700 | |
| }, | |
| { | |
| "epoch": 29.798483891345548, | |
| "grad_norm": 0.1571495085954666, | |
| "learning_rate": 0.00010072055063291139, | |
| "loss": 0.0053, | |
| "step": 11800 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "eval_bleu": 14.1224, | |
| "eval_gen_len": 14.1012, | |
| "eval_loss": 5.206099033355713, | |
| "eval_runtime": 526.5861, | |
| "eval_samples_per_second": 11.168, | |
| "eval_steps_per_second": 1.398, | |
| "step": 11880 | |
| }, | |
| { | |
| "epoch": 30.050536955148452, | |
| "grad_norm": 0.02085699699819088, | |
| "learning_rate": 9.945536075949369e-05, | |
| "loss": 0.0044, | |
| "step": 11900 | |
| }, | |
| { | |
| "epoch": 30.303221730890712, | |
| "grad_norm": 0.041063107550144196, | |
| "learning_rate": 9.819017088607595e-05, | |
| "loss": 0.005, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 30.555906506632976, | |
| "grad_norm": 0.03547859191894531, | |
| "learning_rate": 9.692498101265823e-05, | |
| "loss": 0.0065, | |
| "step": 12100 | |
| }, | |
| { | |
| "epoch": 30.808591282375236, | |
| "grad_norm": 0.03922571241855621, | |
| "learning_rate": 9.56597911392405e-05, | |
| "loss": 0.0046, | |
| "step": 12200 | |
| }, | |
| { | |
| "epoch": 31.0, | |
| "eval_bleu": 13.9126, | |
| "eval_gen_len": 14.5045, | |
| "eval_loss": 5.24795389175415, | |
| "eval_runtime": 530.0839, | |
| "eval_samples_per_second": 11.094, | |
| "eval_steps_per_second": 1.388, | |
| "step": 12276 | |
| }, | |
| { | |
| "epoch": 31.060644346178144, | |
| "grad_norm": 0.1546187549829483, | |
| "learning_rate": 9.439460126582279e-05, | |
| "loss": 0.0036, | |
| "step": 12300 | |
| }, | |
| { | |
| "epoch": 31.313329121920404, | |
| "grad_norm": 0.18324150145053864, | |
| "learning_rate": 9.312941139240507e-05, | |
| "loss": 0.0037, | |
| "step": 12400 | |
| }, | |
| { | |
| "epoch": 31.566013897662664, | |
| "grad_norm": 0.019374554976820946, | |
| "learning_rate": 9.186422151898734e-05, | |
| "loss": 0.0054, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 31.818698673404928, | |
| "grad_norm": 0.04015549644827843, | |
| "learning_rate": 9.059903164556963e-05, | |
| "loss": 0.0054, | |
| "step": 12600 | |
| }, | |
| { | |
| "epoch": 32.0, | |
| "eval_bleu": 14.019, | |
| "eval_gen_len": 14.16, | |
| "eval_loss": 5.196505069732666, | |
| "eval_runtime": 526.0182, | |
| "eval_samples_per_second": 11.18, | |
| "eval_steps_per_second": 1.399, | |
| "step": 12672 | |
| }, | |
| { | |
| "epoch": 32.070751737207836, | |
| "grad_norm": 0.7235105633735657, | |
| "learning_rate": 8.933384177215191e-05, | |
| "loss": 0.0046, | |
| "step": 12700 | |
| }, | |
| { | |
| "epoch": 32.323436512950096, | |
| "grad_norm": 0.01608334481716156, | |
| "learning_rate": 8.806865189873417e-05, | |
| "loss": 0.0039, | |
| "step": 12800 | |
| }, | |
| { | |
| "epoch": 32.576121288692356, | |
| "grad_norm": 0.027920836582779884, | |
| "learning_rate": 8.680346202531647e-05, | |
| "loss": 0.0051, | |
| "step": 12900 | |
| }, | |
| { | |
| "epoch": 32.828806064434616, | |
| "grad_norm": 0.9806521534919739, | |
| "learning_rate": 8.553827215189875e-05, | |
| "loss": 0.0035, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 33.0, | |
| "eval_bleu": 14.004, | |
| "eval_gen_len": 14.4037, | |
| "eval_loss": 5.184691905975342, | |
| "eval_runtime": 529.8109, | |
| "eval_samples_per_second": 11.1, | |
| "eval_steps_per_second": 1.389, | |
| "step": 13068 | |
| }, | |
| { | |
| "epoch": 33.08085912823752, | |
| "grad_norm": 0.03555789962410927, | |
| "learning_rate": 8.427308227848101e-05, | |
| "loss": 0.004, | |
| "step": 13100 | |
| }, | |
| { | |
| "epoch": 33.33354390397979, | |
| "grad_norm": 0.0131059680134058, | |
| "learning_rate": 8.300789240506328e-05, | |
| "loss": 0.0027, | |
| "step": 13200 | |
| }, | |
| { | |
| "epoch": 33.58622867972205, | |
| "grad_norm": 0.08038444817066193, | |
| "learning_rate": 8.175535443037976e-05, | |
| "loss": 0.0036, | |
| "step": 13300 | |
| }, | |
| { | |
| "epoch": 33.83891345546431, | |
| "grad_norm": 0.102862149477005, | |
| "learning_rate": 8.049016455696203e-05, | |
| "loss": 0.0032, | |
| "step": 13400 | |
| }, | |
| { | |
| "epoch": 34.0, | |
| "eval_bleu": 14.228, | |
| "eval_gen_len": 14.2273, | |
| "eval_loss": 5.212389945983887, | |
| "eval_runtime": 524.326, | |
| "eval_samples_per_second": 11.216, | |
| "eval_steps_per_second": 1.404, | |
| "step": 13464 | |
| }, | |
| { | |
| "epoch": 34.09096651926721, | |
| "grad_norm": 0.011331531219184399, | |
| "learning_rate": 7.922497468354431e-05, | |
| "loss": 0.0041, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 34.34365129500947, | |
| "grad_norm": 0.2522341012954712, | |
| "learning_rate": 7.79597848101266e-05, | |
| "loss": 0.0038, | |
| "step": 13600 | |
| }, | |
| { | |
| "epoch": 34.59633607075174, | |
| "grad_norm": 0.0073499069549143314, | |
| "learning_rate": 7.669459493670887e-05, | |
| "loss": 0.0028, | |
| "step": 13700 | |
| }, | |
| { | |
| "epoch": 34.849020846494, | |
| "grad_norm": 0.006374124903231859, | |
| "learning_rate": 7.542940506329114e-05, | |
| "loss": 0.0024, | |
| "step": 13800 | |
| }, | |
| { | |
| "epoch": 35.0, | |
| "eval_bleu": 14.2703, | |
| "eval_gen_len": 14.0995, | |
| "eval_loss": 5.208999156951904, | |
| "eval_runtime": 526.3437, | |
| "eval_samples_per_second": 11.173, | |
| "eval_steps_per_second": 1.398, | |
| "step": 13860 | |
| }, | |
| { | |
| "epoch": 35.101073910296904, | |
| "grad_norm": 0.10820985585451126, | |
| "learning_rate": 7.416421518987341e-05, | |
| "loss": 0.0031, | |
| "step": 13900 | |
| }, | |
| { | |
| "epoch": 35.353758686039164, | |
| "grad_norm": 0.011806854978203773, | |
| "learning_rate": 7.289902531645571e-05, | |
| "loss": 0.0035, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 35.606443461781424, | |
| "grad_norm": 0.039563409984111786, | |
| "learning_rate": 7.163383544303798e-05, | |
| "loss": 0.0025, | |
| "step": 14100 | |
| }, | |
| { | |
| "epoch": 35.85912823752369, | |
| "grad_norm": 0.03173103556036949, | |
| "learning_rate": 7.036864556962025e-05, | |
| "loss": 0.0029, | |
| "step": 14200 | |
| }, | |
| { | |
| "epoch": 36.0, | |
| "eval_bleu": 13.7593, | |
| "eval_gen_len": 14.604, | |
| "eval_loss": 5.232673168182373, | |
| "eval_runtime": 537.4765, | |
| "eval_samples_per_second": 10.942, | |
| "eval_steps_per_second": 1.369, | |
| "step": 14256 | |
| }, | |
| { | |
| "epoch": 36.111181301326596, | |
| "grad_norm": 0.08217954635620117, | |
| "learning_rate": 6.910345569620254e-05, | |
| "loss": 0.003, | |
| "step": 14300 | |
| }, | |
| { | |
| "epoch": 36.363866077068856, | |
| "grad_norm": 0.010925635695457458, | |
| "learning_rate": 6.783826582278481e-05, | |
| "loss": 0.0036, | |
| "step": 14400 | |
| }, | |
| { | |
| "epoch": 36.616550852811116, | |
| "grad_norm": 0.6118758916854858, | |
| "learning_rate": 6.657307594936709e-05, | |
| "loss": 0.0039, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 36.869235628553376, | |
| "grad_norm": 0.01771758496761322, | |
| "learning_rate": 6.530788607594938e-05, | |
| "loss": 0.0043, | |
| "step": 14600 | |
| }, | |
| { | |
| "epoch": 37.0, | |
| "eval_bleu": 14.3019, | |
| "eval_gen_len": 14.0886, | |
| "eval_loss": 5.2005295753479, | |
| "eval_runtime": 524.3574, | |
| "eval_samples_per_second": 11.216, | |
| "eval_steps_per_second": 1.404, | |
| "step": 14652 | |
| }, | |
| { | |
| "epoch": 37.12128869235629, | |
| "grad_norm": 0.015916310250759125, | |
| "learning_rate": 6.404269620253165e-05, | |
| "loss": 0.0029, | |
| "step": 14700 | |
| }, | |
| { | |
| "epoch": 37.37397346809855, | |
| "grad_norm": 0.022140830755233765, | |
| "learning_rate": 6.277750632911393e-05, | |
| "loss": 0.0034, | |
| "step": 14800 | |
| }, | |
| { | |
| "epoch": 37.62665824384081, | |
| "grad_norm": 0.01691320165991783, | |
| "learning_rate": 6.151231645569619e-05, | |
| "loss": 0.003, | |
| "step": 14900 | |
| }, | |
| { | |
| "epoch": 37.87934301958307, | |
| "grad_norm": 0.02250687964260578, | |
| "learning_rate": 6.024712658227849e-05, | |
| "loss": 0.0022, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 38.0, | |
| "eval_bleu": 14.2565, | |
| "eval_gen_len": 14.1928, | |
| "eval_loss": 5.221782684326172, | |
| "eval_runtime": 528.2072, | |
| "eval_samples_per_second": 11.134, | |
| "eval_steps_per_second": 1.393, | |
| "step": 15048 | |
| }, | |
| { | |
| "epoch": 38.13139608338598, | |
| "grad_norm": 0.008933404460549355, | |
| "learning_rate": 5.898193670886076e-05, | |
| "loss": 0.0025, | |
| "step": 15100 | |
| }, | |
| { | |
| "epoch": 38.38408085912824, | |
| "grad_norm": 0.008681390434503555, | |
| "learning_rate": 5.771674683544304e-05, | |
| "loss": 0.0023, | |
| "step": 15200 | |
| }, | |
| { | |
| "epoch": 38.6367656348705, | |
| "grad_norm": 0.032701168209314346, | |
| "learning_rate": 5.6451556962025333e-05, | |
| "loss": 0.0021, | |
| "step": 15300 | |
| }, | |
| { | |
| "epoch": 38.88945041061276, | |
| "grad_norm": 0.01919535920023918, | |
| "learning_rate": 5.51863670886076e-05, | |
| "loss": 0.0031, | |
| "step": 15400 | |
| }, | |
| { | |
| "epoch": 39.0, | |
| "eval_bleu": 14.1208, | |
| "eval_gen_len": 14.438, | |
| "eval_loss": 5.240268707275391, | |
| "eval_runtime": 534.3637, | |
| "eval_samples_per_second": 11.006, | |
| "eval_steps_per_second": 1.377, | |
| "step": 15444 | |
| }, | |
| { | |
| "epoch": 39.141503474415664, | |
| "grad_norm": 0.07539849728345871, | |
| "learning_rate": 5.3921177215189864e-05, | |
| "loss": 0.0022, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 39.39418825015793, | |
| "grad_norm": 0.00894533097743988, | |
| "learning_rate": 5.265598734177216e-05, | |
| "loss": 0.0021, | |
| "step": 15600 | |
| }, | |
| { | |
| "epoch": 39.64687302590019, | |
| "grad_norm": 0.03846971318125725, | |
| "learning_rate": 5.1390797468354435e-05, | |
| "loss": 0.0019, | |
| "step": 15700 | |
| }, | |
| { | |
| "epoch": 39.89955780164245, | |
| "grad_norm": 0.009916193783283234, | |
| "learning_rate": 5.012560759493671e-05, | |
| "loss": 0.0022, | |
| "step": 15800 | |
| }, | |
| { | |
| "epoch": 40.0, | |
| "eval_bleu": 14.2927, | |
| "eval_gen_len": 14.3079, | |
| "eval_loss": 5.250655651092529, | |
| "eval_runtime": 527.1014, | |
| "eval_samples_per_second": 11.157, | |
| "eval_steps_per_second": 1.396, | |
| "step": 15840 | |
| }, | |
| { | |
| "epoch": 40.151610865445356, | |
| "grad_norm": 0.004169174004346132, | |
| "learning_rate": 4.886041772151898e-05, | |
| "loss": 0.0016, | |
| "step": 15900 | |
| }, | |
| { | |
| "epoch": 40.404295641187616, | |
| "grad_norm": 0.019835354760289192, | |
| "learning_rate": 4.7595227848101274e-05, | |
| "loss": 0.0024, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 40.65698041692988, | |
| "grad_norm": 0.004063699394464493, | |
| "learning_rate": 4.633003797468354e-05, | |
| "loss": 0.0027, | |
| "step": 16100 | |
| }, | |
| { | |
| "epoch": 40.90966519267214, | |
| "grad_norm": 0.023647163063287735, | |
| "learning_rate": 4.506484810126582e-05, | |
| "loss": 0.0014, | |
| "step": 16200 | |
| }, | |
| { | |
| "epoch": 41.0, | |
| "eval_bleu": 14.2727, | |
| "eval_gen_len": 14.2874, | |
| "eval_loss": 5.255828380584717, | |
| "eval_runtime": 526.7142, | |
| "eval_samples_per_second": 11.165, | |
| "eval_steps_per_second": 1.397, | |
| "step": 16236 | |
| }, | |
| { | |
| "epoch": 41.16171825647505, | |
| "grad_norm": 0.022712113335728645, | |
| "learning_rate": 4.3799658227848114e-05, | |
| "loss": 0.0016, | |
| "step": 16300 | |
| }, | |
| { | |
| "epoch": 41.41440303221731, | |
| "grad_norm": 0.006393743213266134, | |
| "learning_rate": 4.253446835443038e-05, | |
| "loss": 0.0015, | |
| "step": 16400 | |
| }, | |
| { | |
| "epoch": 41.66708780795957, | |
| "grad_norm": 0.020621536299586296, | |
| "learning_rate": 4.126927848101266e-05, | |
| "loss": 0.0019, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 41.919772583701835, | |
| "grad_norm": 0.06859384477138519, | |
| "learning_rate": 4.000408860759495e-05, | |
| "loss": 0.0021, | |
| "step": 16600 | |
| }, | |
| { | |
| "epoch": 42.0, | |
| "eval_bleu": 14.1117, | |
| "eval_gen_len": 14.1115, | |
| "eval_loss": 5.27353572845459, | |
| "eval_runtime": 523.6053, | |
| "eval_samples_per_second": 11.232, | |
| "eval_steps_per_second": 1.406, | |
| "step": 16632 | |
| }, | |
| { | |
| "epoch": 42.17182564750474, | |
| "grad_norm": 0.025820279493927956, | |
| "learning_rate": 3.873889873417722e-05, | |
| "loss": 0.0019, | |
| "step": 16700 | |
| }, | |
| { | |
| "epoch": 42.424510423247, | |
| "grad_norm": 0.01880701631307602, | |
| "learning_rate": 3.747370886075949e-05, | |
| "loss": 0.0018, | |
| "step": 16800 | |
| }, | |
| { | |
| "epoch": 42.67719519898926, | |
| "grad_norm": 0.08723778277635574, | |
| "learning_rate": 3.6208518987341786e-05, | |
| "loss": 0.0016, | |
| "step": 16900 | |
| }, | |
| { | |
| "epoch": 42.92987997473152, | |
| "grad_norm": 0.05921424925327301, | |
| "learning_rate": 3.4943329113924055e-05, | |
| "loss": 0.0013, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 43.0, | |
| "eval_bleu": 14.4166, | |
| "eval_gen_len": 14.1923, | |
| "eval_loss": 5.2706618309021, | |
| "eval_runtime": 523.861, | |
| "eval_samples_per_second": 11.226, | |
| "eval_steps_per_second": 1.405, | |
| "step": 17028 | |
| }, | |
| { | |
| "epoch": 43.18193303853443, | |
| "grad_norm": 0.0028839095029979944, | |
| "learning_rate": 3.367813924050633e-05, | |
| "loss": 0.0021, | |
| "step": 17100 | |
| }, | |
| { | |
| "epoch": 43.43461781427669, | |
| "grad_norm": 0.008104875683784485, | |
| "learning_rate": 3.2425601265822784e-05, | |
| "loss": 0.0014, | |
| "step": 17200 | |
| }, | |
| { | |
| "epoch": 43.68730259001895, | |
| "grad_norm": 0.043954506516456604, | |
| "learning_rate": 3.116041139240508e-05, | |
| "loss": 0.0013, | |
| "step": 17300 | |
| }, | |
| { | |
| "epoch": 43.93998736576121, | |
| "grad_norm": 0.003915693145245314, | |
| "learning_rate": 2.989522151898734e-05, | |
| "loss": 0.0021, | |
| "step": 17400 | |
| }, | |
| { | |
| "epoch": 44.0, | |
| "eval_bleu": 14.4223, | |
| "eval_gen_len": 14.2129, | |
| "eval_loss": 5.279012203216553, | |
| "eval_runtime": 523.2065, | |
| "eval_samples_per_second": 11.24, | |
| "eval_steps_per_second": 1.407, | |
| "step": 17424 | |
| }, | |
| { | |
| "epoch": 44.192040429564116, | |
| "grad_norm": 0.017741164192557335, | |
| "learning_rate": 2.8630031645569617e-05, | |
| "loss": 0.0015, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 44.44472520530638, | |
| "grad_norm": 0.018855927512049675, | |
| "learning_rate": 2.736484177215189e-05, | |
| "loss": 0.0011, | |
| "step": 17600 | |
| }, | |
| { | |
| "epoch": 44.697409981048644, | |
| "grad_norm": 0.005235583987087011, | |
| "learning_rate": 2.6099651898734178e-05, | |
| "loss": 0.0015, | |
| "step": 17700 | |
| }, | |
| { | |
| "epoch": 44.950094756790904, | |
| "grad_norm": 0.009746776893734932, | |
| "learning_rate": 2.4834462025316453e-05, | |
| "loss": 0.0016, | |
| "step": 17800 | |
| }, | |
| { | |
| "epoch": 45.0, | |
| "eval_bleu": 14.486, | |
| "eval_gen_len": 14.2625, | |
| "eval_loss": 5.275847911834717, | |
| "eval_runtime": 521.3464, | |
| "eval_samples_per_second": 11.28, | |
| "eval_steps_per_second": 1.412, | |
| "step": 17820 | |
| }, | |
| { | |
| "epoch": 45.20214782059381, | |
| "grad_norm": 0.016078555956482887, | |
| "learning_rate": 2.3569272151898725e-05, | |
| "loss": 0.0026, | |
| "step": 17900 | |
| }, | |
| { | |
| "epoch": 45.45483259633607, | |
| "grad_norm": 0.043376993387937546, | |
| "learning_rate": 2.230408227848102e-05, | |
| "loss": 0.0012, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 45.707517372078335, | |
| "grad_norm": 0.0051286788657307625, | |
| "learning_rate": 2.1038892405063293e-05, | |
| "loss": 0.001, | |
| "step": 18100 | |
| }, | |
| { | |
| "epoch": 45.960202147820596, | |
| "grad_norm": 0.003953118342906237, | |
| "learning_rate": 1.9773702531645565e-05, | |
| "loss": 0.0019, | |
| "step": 18200 | |
| }, | |
| { | |
| "epoch": 46.0, | |
| "eval_bleu": 14.5501, | |
| "eval_gen_len": 14.2695, | |
| "eval_loss": 5.25464391708374, | |
| "eval_runtime": 524.8843, | |
| "eval_samples_per_second": 11.204, | |
| "eval_steps_per_second": 1.402, | |
| "step": 18216 | |
| }, | |
| { | |
| "epoch": 46.2122552116235, | |
| "grad_norm": 0.013465896248817444, | |
| "learning_rate": 1.8508512658227857e-05, | |
| "loss": 0.0012, | |
| "step": 18300 | |
| }, | |
| { | |
| "epoch": 46.46493998736576, | |
| "grad_norm": 0.3650831878185272, | |
| "learning_rate": 1.724332278481013e-05, | |
| "loss": 0.0012, | |
| "step": 18400 | |
| }, | |
| { | |
| "epoch": 46.71762476310802, | |
| "grad_norm": 0.0039703696966171265, | |
| "learning_rate": 1.59781329113924e-05, | |
| "loss": 0.0014, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 46.97030953885029, | |
| "grad_norm": 0.00649018120020628, | |
| "learning_rate": 1.4712943037974674e-05, | |
| "loss": 0.0011, | |
| "step": 18600 | |
| }, | |
| { | |
| "epoch": 47.0, | |
| "eval_bleu": 14.6166, | |
| "eval_gen_len": 14.1882, | |
| "eval_loss": 5.265357494354248, | |
| "eval_runtime": 516.9862, | |
| "eval_samples_per_second": 11.376, | |
| "eval_steps_per_second": 1.424, | |
| "step": 18612 | |
| }, | |
| { | |
| "epoch": 47.22236260265319, | |
| "grad_norm": 0.013269159942865372, | |
| "learning_rate": 1.3447753164556967e-05, | |
| "loss": 0.0012, | |
| "step": 18700 | |
| }, | |
| { | |
| "epoch": 47.47504737839545, | |
| "grad_norm": 0.005190219730138779, | |
| "learning_rate": 1.2182563291139239e-05, | |
| "loss": 0.0008, | |
| "step": 18800 | |
| }, | |
| { | |
| "epoch": 47.72773215413771, | |
| "grad_norm": 0.03904829919338226, | |
| "learning_rate": 1.091737341772151e-05, | |
| "loss": 0.0012, | |
| "step": 18900 | |
| }, | |
| { | |
| "epoch": 47.98041692987997, | |
| "grad_norm": 0.011222691275179386, | |
| "learning_rate": 9.652183544303805e-06, | |
| "loss": 0.0016, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 48.0, | |
| "eval_bleu": 14.5838, | |
| "eval_gen_len": 14.2617, | |
| "eval_loss": 5.261044502258301, | |
| "eval_runtime": 523.6855, | |
| "eval_samples_per_second": 11.23, | |
| "eval_steps_per_second": 1.405, | |
| "step": 19008 | |
| }, | |
| { | |
| "epoch": 48.232469993682884, | |
| "grad_norm": 0.006541873328387737, | |
| "learning_rate": 8.386993670886077e-06, | |
| "loss": 0.0011, | |
| "step": 19100 | |
| }, | |
| { | |
| "epoch": 48.485154769425144, | |
| "grad_norm": 0.026967084035277367, | |
| "learning_rate": 7.121803797468348e-06, | |
| "loss": 0.0006, | |
| "step": 19200 | |
| }, | |
| { | |
| "epoch": 48.737839545167404, | |
| "grad_norm": 0.013220852240920067, | |
| "learning_rate": 5.856613924050642e-06, | |
| "loss": 0.0012, | |
| "step": 19300 | |
| }, | |
| { | |
| "epoch": 48.990524320909664, | |
| "grad_norm": 0.01667727902531624, | |
| "learning_rate": 4.591424050632914e-06, | |
| "loss": 0.0011, | |
| "step": 19400 | |
| }, | |
| { | |
| "epoch": 49.0, | |
| "eval_bleu": 14.5987, | |
| "eval_gen_len": 14.2119, | |
| "eval_loss": 5.264169216156006, | |
| "eval_runtime": 516.2093, | |
| "eval_samples_per_second": 11.393, | |
| "eval_steps_per_second": 1.426, | |
| "step": 19404 | |
| }, | |
| { | |
| "epoch": 49.24257738471257, | |
| "grad_norm": 0.009594439528882504, | |
| "learning_rate": 3.3262341772151843e-06, | |
| "loss": 0.0007, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 49.495262160454836, | |
| "grad_norm": 0.015515263192355633, | |
| "learning_rate": 2.0610443037974787e-06, | |
| "loss": 0.0009, | |
| "step": 19600 | |
| }, | |
| { | |
| "epoch": 49.747946936197096, | |
| "grad_norm": 0.34153661131858826, | |
| "learning_rate": 7.958544303797504e-07, | |
| "loss": 0.001, | |
| "step": 19700 | |
| }, | |
| { | |
| "epoch": 49.87428932406822, | |
| "eval_bleu": 14.576, | |
| "eval_gen_len": 14.2289, | |
| "eval_loss": 5.264520168304443, | |
| "eval_runtime": 519.5989, | |
| "eval_samples_per_second": 11.318, | |
| "eval_steps_per_second": 1.416, | |
| "step": 19750 | |
| }, | |
| { | |
| "epoch": 49.87428932406822, | |
| "step": 19750, | |
| "total_flos": 6.840351903574917e+17, | |
| "train_loss": 0.00785975846080581, | |
| "train_runtime": 68802.4337, | |
| "train_samples_per_second": 18.397, | |
| "train_steps_per_second": 0.287 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 19750, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.840351903574917e+17, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |