| { | |
| "best_global_step": 4689, | |
| "best_metric": 0.32149304295216685, | |
| "best_model_checkpoint": "/kaggle/working/codebert_b_local/checkpoint-4689", | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 4689, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06397952655150352, | |
| "grad_norm": 4.3835320472717285, | |
| "learning_rate": 3.96e-06, | |
| "loss": 1.6251, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.12795905310300704, | |
| "grad_norm": 1.2909411191940308, | |
| "learning_rate": 7.960000000000002e-06, | |
| "loss": 0.6209, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.19193857965451055, | |
| "grad_norm": 7.392430305480957, | |
| "learning_rate": 1.196e-05, | |
| "loss": 0.5322, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.2559181062060141, | |
| "grad_norm": 2.060398578643799, | |
| "learning_rate": 1.5960000000000003e-05, | |
| "loss": 0.4804, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.3198976327575176, | |
| "grad_norm": 19.443620681762695, | |
| "learning_rate": 1.9960000000000002e-05, | |
| "loss": 0.4048, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.3838771593090211, | |
| "grad_norm": 12.475482940673828, | |
| "learning_rate": 1.9527333492480306e-05, | |
| "loss": 0.4219, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.44785668586052463, | |
| "grad_norm": 7.762031555175781, | |
| "learning_rate": 1.9049892575793746e-05, | |
| "loss": 0.3789, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.5118362124120281, | |
| "grad_norm": 2.098180055618286, | |
| "learning_rate": 1.8572451659107187e-05, | |
| "loss": 0.4134, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5758157389635317, | |
| "grad_norm": 2.177507162094116, | |
| "learning_rate": 1.8095010742420627e-05, | |
| "loss": 0.3903, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6397952655150352, | |
| "grad_norm": 5.113386631011963, | |
| "learning_rate": 1.7617569825734067e-05, | |
| "loss": 0.3559, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7037747920665387, | |
| "grad_norm": 4.909190654754639, | |
| "learning_rate": 1.7140128909047508e-05, | |
| "loss": 0.3219, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.7677543186180422, | |
| "grad_norm": 5.605203628540039, | |
| "learning_rate": 1.6662687992360948e-05, | |
| "loss": 0.3821, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.8317338451695457, | |
| "grad_norm": 6.7283453941345215, | |
| "learning_rate": 1.6185247075674385e-05, | |
| "loss": 0.3674, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.8957133717210493, | |
| "grad_norm": 4.659298419952393, | |
| "learning_rate": 1.5707806158987825e-05, | |
| "loss": 0.3485, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.9596928982725528, | |
| "grad_norm": 6.4099931716918945, | |
| "learning_rate": 1.5230365242301266e-05, | |
| "loss": 0.3565, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9024, | |
| "eval_f1_macro": 0.23154395234377326, | |
| "eval_f1_weighted": 0.8916810274029613, | |
| "eval_loss": 0.3302375078201294, | |
| "eval_precision_macro": 0.2442325389892706, | |
| "eval_recall_macro": 0.2618191076765516, | |
| "eval_runtime": 91.0613, | |
| "eval_samples_per_second": 109.816, | |
| "eval_steps_per_second": 3.437, | |
| "step": 1563 | |
| }, | |
| { | |
| "epoch": 1.0236724248240563, | |
| "grad_norm": 5.717738151550293, | |
| "learning_rate": 1.4752924325614706e-05, | |
| "loss": 0.318, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.0876519513755598, | |
| "grad_norm": 4.969061851501465, | |
| "learning_rate": 1.4275483408928146e-05, | |
| "loss": 0.3001, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.1516314779270633, | |
| "grad_norm": 3.3130006790161133, | |
| "learning_rate": 1.3798042492241585e-05, | |
| "loss": 0.3332, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.2156110044785668, | |
| "grad_norm": 4.077260971069336, | |
| "learning_rate": 1.3320601575555025e-05, | |
| "loss": 0.32, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.2795905310300704, | |
| "grad_norm": 3.1868574619293213, | |
| "learning_rate": 1.2843160658868466e-05, | |
| "loss": 0.3095, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.3435700575815739, | |
| "grad_norm": 6.247142314910889, | |
| "learning_rate": 1.2365719742181906e-05, | |
| "loss": 0.3105, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.4075495841330774, | |
| "grad_norm": 11.029816627502441, | |
| "learning_rate": 1.1888278825495345e-05, | |
| "loss": 0.2975, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.471529110684581, | |
| "grad_norm": 1.82353937625885, | |
| "learning_rate": 1.1410837908808785e-05, | |
| "loss": 0.3047, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.5355086372360844, | |
| "grad_norm": 8.549129486083984, | |
| "learning_rate": 1.0933396992122225e-05, | |
| "loss": 0.2983, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.599488163787588, | |
| "grad_norm": 5.218935966491699, | |
| "learning_rate": 1.0455956075435664e-05, | |
| "loss": 0.2965, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.6634676903390915, | |
| "grad_norm": 1.3732364177703857, | |
| "learning_rate": 9.978515158749106e-06, | |
| "loss": 0.274, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.727447216890595, | |
| "grad_norm": 5.042181968688965, | |
| "learning_rate": 9.501074242062547e-06, | |
| "loss": 0.2804, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.7914267434420985, | |
| "grad_norm": 2.0916764736175537, | |
| "learning_rate": 9.023633325375985e-06, | |
| "loss": 0.2689, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.855406269993602, | |
| "grad_norm": 5.730693817138672, | |
| "learning_rate": 8.546192408689426e-06, | |
| "loss": 0.2798, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.9193857965451055, | |
| "grad_norm": 1.510636806488037, | |
| "learning_rate": 8.068751492002866e-06, | |
| "loss": 0.2811, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.983365323096609, | |
| "grad_norm": 5.20271110534668, | |
| "learning_rate": 7.591310575316305e-06, | |
| "loss": 0.2862, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9119, | |
| "eval_f1_macro": 0.284542217831853, | |
| "eval_f1_weighted": 0.900679083795208, | |
| "eval_loss": 0.30098485946655273, | |
| "eval_precision_macro": 0.36539760346541894, | |
| "eval_recall_macro": 0.2947437498965655, | |
| "eval_runtime": 91.3907, | |
| "eval_samples_per_second": 109.42, | |
| "eval_steps_per_second": 3.425, | |
| "step": 3126 | |
| }, | |
| { | |
| "epoch": 2.0473448496481126, | |
| "grad_norm": 4.349801063537598, | |
| "learning_rate": 7.113869658629745e-06, | |
| "loss": 0.2495, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 2.111324376199616, | |
| "grad_norm": 4.5755085945129395, | |
| "learning_rate": 6.636428741943185e-06, | |
| "loss": 0.2693, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 2.1753039027511196, | |
| "grad_norm": 5.139939308166504, | |
| "learning_rate": 6.158987825256625e-06, | |
| "loss": 0.2124, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 2.239283429302623, | |
| "grad_norm": 2.8210389614105225, | |
| "learning_rate": 5.681546908570065e-06, | |
| "loss": 0.2504, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.3032629558541267, | |
| "grad_norm": 3.741081476211548, | |
| "learning_rate": 5.204105991883505e-06, | |
| "loss": 0.2478, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 2.36724248240563, | |
| "grad_norm": 5.447690010070801, | |
| "learning_rate": 4.726665075196944e-06, | |
| "loss": 0.2405, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 2.4312220089571337, | |
| "grad_norm": 4.314721584320068, | |
| "learning_rate": 4.249224158510385e-06, | |
| "loss": 0.2561, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 2.495201535508637, | |
| "grad_norm": 4.551843643188477, | |
| "learning_rate": 3.771783241823825e-06, | |
| "loss": 0.2435, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 2.5591810620601407, | |
| "grad_norm": 3.1414506435394287, | |
| "learning_rate": 3.2943423251372647e-06, | |
| "loss": 0.2721, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.6231605886116443, | |
| "grad_norm": 8.281805992126465, | |
| "learning_rate": 2.8169014084507046e-06, | |
| "loss": 0.2602, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 2.6871401151631478, | |
| "grad_norm": 4.985534191131592, | |
| "learning_rate": 2.3394604917641446e-06, | |
| "loss": 0.2394, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 2.7511196417146513, | |
| "grad_norm": 7.348026275634766, | |
| "learning_rate": 1.8620195750775843e-06, | |
| "loss": 0.2416, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 2.815099168266155, | |
| "grad_norm": 2.279198408126831, | |
| "learning_rate": 1.3845786583910242e-06, | |
| "loss": 0.2323, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 2.8790786948176583, | |
| "grad_norm": 5.688992023468018, | |
| "learning_rate": 9.071377417044641e-07, | |
| "loss": 0.2455, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.943058221369162, | |
| "grad_norm": 3.175658941268921, | |
| "learning_rate": 4.2969682501790407e-07, | |
| "loss": 0.2379, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9091, | |
| "eval_f1_macro": 0.32149304295216685, | |
| "eval_f1_weighted": 0.903091739400006, | |
| "eval_loss": 0.305535227060318, | |
| "eval_precision_macro": 0.38093123210850466, | |
| "eval_recall_macro": 0.3358332333714398, | |
| "eval_runtime": 91.427, | |
| "eval_samples_per_second": 109.377, | |
| "eval_steps_per_second": 3.423, | |
| "step": 4689 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 4689, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 3, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.97349237504e+16, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |