| { | |
| "best_global_step": 3126, | |
| "best_metric": 0.284542217831853, | |
| "best_model_checkpoint": "/kaggle/working/codebert_b_local/checkpoint-3126", | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 3126, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06397952655150352, | |
| "grad_norm": 4.3835320472717285, | |
| "learning_rate": 3.96e-06, | |
| "loss": 1.6251, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.12795905310300704, | |
| "grad_norm": 1.2909411191940308, | |
| "learning_rate": 7.960000000000002e-06, | |
| "loss": 0.6209, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.19193857965451055, | |
| "grad_norm": 7.392430305480957, | |
| "learning_rate": 1.196e-05, | |
| "loss": 0.5322, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.2559181062060141, | |
| "grad_norm": 2.060398578643799, | |
| "learning_rate": 1.5960000000000003e-05, | |
| "loss": 0.4804, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.3198976327575176, | |
| "grad_norm": 19.443620681762695, | |
| "learning_rate": 1.9960000000000002e-05, | |
| "loss": 0.4048, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.3838771593090211, | |
| "grad_norm": 12.475482940673828, | |
| "learning_rate": 1.9527333492480306e-05, | |
| "loss": 0.4219, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.44785668586052463, | |
| "grad_norm": 7.762031555175781, | |
| "learning_rate": 1.9049892575793746e-05, | |
| "loss": 0.3789, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.5118362124120281, | |
| "grad_norm": 2.098180055618286, | |
| "learning_rate": 1.8572451659107187e-05, | |
| "loss": 0.4134, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5758157389635317, | |
| "grad_norm": 2.177507162094116, | |
| "learning_rate": 1.8095010742420627e-05, | |
| "loss": 0.3903, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6397952655150352, | |
| "grad_norm": 5.113386631011963, | |
| "learning_rate": 1.7617569825734067e-05, | |
| "loss": 0.3559, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7037747920665387, | |
| "grad_norm": 4.909190654754639, | |
| "learning_rate": 1.7140128909047508e-05, | |
| "loss": 0.3219, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.7677543186180422, | |
| "grad_norm": 5.605203628540039, | |
| "learning_rate": 1.6662687992360948e-05, | |
| "loss": 0.3821, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.8317338451695457, | |
| "grad_norm": 6.7283453941345215, | |
| "learning_rate": 1.6185247075674385e-05, | |
| "loss": 0.3674, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.8957133717210493, | |
| "grad_norm": 4.659298419952393, | |
| "learning_rate": 1.5707806158987825e-05, | |
| "loss": 0.3485, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.9596928982725528, | |
| "grad_norm": 6.4099931716918945, | |
| "learning_rate": 1.5230365242301266e-05, | |
| "loss": 0.3565, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9024, | |
| "eval_f1_macro": 0.23154395234377326, | |
| "eval_f1_weighted": 0.8916810274029613, | |
| "eval_loss": 0.3302375078201294, | |
| "eval_precision_macro": 0.2442325389892706, | |
| "eval_recall_macro": 0.2618191076765516, | |
| "eval_runtime": 91.0613, | |
| "eval_samples_per_second": 109.816, | |
| "eval_steps_per_second": 3.437, | |
| "step": 1563 | |
| }, | |
| { | |
| "epoch": 1.0236724248240563, | |
| "grad_norm": 5.717738151550293, | |
| "learning_rate": 1.4752924325614706e-05, | |
| "loss": 0.318, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.0876519513755598, | |
| "grad_norm": 4.969061851501465, | |
| "learning_rate": 1.4275483408928146e-05, | |
| "loss": 0.3001, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.1516314779270633, | |
| "grad_norm": 3.3130006790161133, | |
| "learning_rate": 1.3798042492241585e-05, | |
| "loss": 0.3332, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.2156110044785668, | |
| "grad_norm": 4.077260971069336, | |
| "learning_rate": 1.3320601575555025e-05, | |
| "loss": 0.32, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.2795905310300704, | |
| "grad_norm": 3.1868574619293213, | |
| "learning_rate": 1.2843160658868466e-05, | |
| "loss": 0.3095, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.3435700575815739, | |
| "grad_norm": 6.247142314910889, | |
| "learning_rate": 1.2365719742181906e-05, | |
| "loss": 0.3105, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.4075495841330774, | |
| "grad_norm": 11.029816627502441, | |
| "learning_rate": 1.1888278825495345e-05, | |
| "loss": 0.2975, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.471529110684581, | |
| "grad_norm": 1.82353937625885, | |
| "learning_rate": 1.1410837908808785e-05, | |
| "loss": 0.3047, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.5355086372360844, | |
| "grad_norm": 8.549129486083984, | |
| "learning_rate": 1.0933396992122225e-05, | |
| "loss": 0.2983, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.599488163787588, | |
| "grad_norm": 5.218935966491699, | |
| "learning_rate": 1.0455956075435664e-05, | |
| "loss": 0.2965, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.6634676903390915, | |
| "grad_norm": 1.3732364177703857, | |
| "learning_rate": 9.978515158749106e-06, | |
| "loss": 0.274, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.727447216890595, | |
| "grad_norm": 5.042181968688965, | |
| "learning_rate": 9.501074242062547e-06, | |
| "loss": 0.2804, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.7914267434420985, | |
| "grad_norm": 2.0916764736175537, | |
| "learning_rate": 9.023633325375985e-06, | |
| "loss": 0.2689, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.855406269993602, | |
| "grad_norm": 5.730693817138672, | |
| "learning_rate": 8.546192408689426e-06, | |
| "loss": 0.2798, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.9193857965451055, | |
| "grad_norm": 1.510636806488037, | |
| "learning_rate": 8.068751492002866e-06, | |
| "loss": 0.2811, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.983365323096609, | |
| "grad_norm": 5.20271110534668, | |
| "learning_rate": 7.591310575316305e-06, | |
| "loss": 0.2862, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9119, | |
| "eval_f1_macro": 0.284542217831853, | |
| "eval_f1_weighted": 0.900679083795208, | |
| "eval_loss": 0.30098485946655273, | |
| "eval_precision_macro": 0.36539760346541894, | |
| "eval_recall_macro": 0.2947437498965655, | |
| "eval_runtime": 91.3907, | |
| "eval_samples_per_second": 109.42, | |
| "eval_steps_per_second": 3.425, | |
| "step": 3126 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 4689, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 3, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.31566158336e+16, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |