| { | |
| "best_global_step": 3000, | |
| "best_metric": 0.7091743119266055, | |
| "best_model_checkpoint": "/content/drive/MyDrive/NLP/Multi-Task/data/massive_tr/xlmr-multihead/checkpoint-3000", | |
| "epoch": 6.0, | |
| "eval_steps": 500, | |
| "global_step": 3000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 31.571680068969727, | |
| "learning_rate": 8.166666666666666e-06, | |
| "loss": 9.5398, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 20.519420623779297, | |
| "learning_rate": 1.65e-05, | |
| "loss": 6.6539, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 14.259832382202148, | |
| "learning_rate": 2.483333333333333e-05, | |
| "loss": 6.1233, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 28.014291763305664, | |
| "learning_rate": 2.979787234042553e-05, | |
| "loss": 5.6725, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 28.722820281982422, | |
| "learning_rate": 2.926595744680851e-05, | |
| "loss": 5.3297, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 38.051170349121094, | |
| "learning_rate": 2.873404255319149e-05, | |
| "loss": 4.5911, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 26.39080810546875, | |
| "learning_rate": 2.820212765957447e-05, | |
| "loss": 4.017, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 18.74272918701172, | |
| "learning_rate": 2.7670212765957448e-05, | |
| "loss": 3.6629, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 18.32183074951172, | |
| "learning_rate": 2.7138297872340427e-05, | |
| "loss": 3.3781, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 29.03256607055664, | |
| "learning_rate": 2.6606382978723407e-05, | |
| "loss": 2.9562, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_intent_accuracy": 0.704, | |
| "eval_intent_macro_f1": 0.48933652636220204, | |
| "eval_joint_score": 0.5881757105943153, | |
| "eval_loss": 2.396956443786621, | |
| "eval_ner_f1": 0.4723514211886305, | |
| "eval_ner_precision": 0.5163841807909605, | |
| "eval_ner_recall": 0.43523809523809526, | |
| "eval_runtime": 0.5889, | |
| "eval_samples_per_second": 1698.154, | |
| "eval_steps_per_second": 54.341, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 40.97825241088867, | |
| "learning_rate": 2.6074468085106382e-05, | |
| "loss": 2.701, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 23.553442001342773, | |
| "learning_rate": 2.554255319148936e-05, | |
| "loss": 2.4746, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "grad_norm": 26.10988426208496, | |
| "learning_rate": 2.501063829787234e-05, | |
| "loss": 2.3423, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 19.3520565032959, | |
| "learning_rate": 2.447872340425532e-05, | |
| "loss": 2.1157, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 34.30548858642578, | |
| "learning_rate": 2.3946808510638296e-05, | |
| "loss": 1.9679, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 15.152769088745117, | |
| "learning_rate": 2.341489361702128e-05, | |
| "loss": 1.9404, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "grad_norm": 25.86590003967285, | |
| "learning_rate": 2.2882978723404258e-05, | |
| "loss": 1.97, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 18.924379348754883, | |
| "learning_rate": 2.2351063829787237e-05, | |
| "loss": 1.8073, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "grad_norm": 20.5709171295166, | |
| "learning_rate": 2.1819148936170213e-05, | |
| "loss": 1.7987, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 31.26434898376465, | |
| "learning_rate": 2.1287234042553192e-05, | |
| "loss": 1.7652, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_intent_accuracy": 0.833, | |
| "eval_intent_macro_f1": 0.7032350726349212, | |
| "eval_joint_score": 0.7102992125984251, | |
| "eval_loss": 1.4518628120422363, | |
| "eval_ner_f1": 0.5875984251968503, | |
| "eval_ner_precision": 0.6079429735234216, | |
| "eval_ner_recall": 0.5685714285714286, | |
| "eval_runtime": 0.595, | |
| "eval_samples_per_second": 1680.634, | |
| "eval_steps_per_second": 53.78, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "grad_norm": 35.977622985839844, | |
| "learning_rate": 2.075531914893617e-05, | |
| "loss": 1.4852, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "grad_norm": 30.942049026489258, | |
| "learning_rate": 2.0223404255319147e-05, | |
| "loss": 1.3909, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "grad_norm": 29.61802101135254, | |
| "learning_rate": 1.9691489361702126e-05, | |
| "loss": 1.4372, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 17.332490921020508, | |
| "learning_rate": 1.915957446808511e-05, | |
| "loss": 1.4184, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 9.733820915222168, | |
| "learning_rate": 1.8627659574468088e-05, | |
| "loss": 1.3071, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 22.356639862060547, | |
| "learning_rate": 1.8095744680851064e-05, | |
| "loss": 1.3381, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "grad_norm": 26.953872680664062, | |
| "learning_rate": 1.7563829787234043e-05, | |
| "loss": 1.347, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 39.545013427734375, | |
| "learning_rate": 1.7031914893617022e-05, | |
| "loss": 1.2399, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "grad_norm": 29.881067276000977, | |
| "learning_rate": 1.65e-05, | |
| "loss": 1.2018, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 34.384517669677734, | |
| "learning_rate": 1.5968085106382977e-05, | |
| "loss": 1.2898, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_intent_accuracy": 0.843, | |
| "eval_intent_macro_f1": 0.7342243232665543, | |
| "eval_joint_score": 0.7419320432043204, | |
| "eval_loss": 1.2403136491775513, | |
| "eval_ner_f1": 0.6408640864086408, | |
| "eval_ner_precision": 0.6075085324232082, | |
| "eval_ner_recall": 0.6780952380952381, | |
| "eval_runtime": 0.5931, | |
| "eval_samples_per_second": 1686.094, | |
| "eval_steps_per_second": 53.955, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.1, | |
| "grad_norm": 40.15689468383789, | |
| "learning_rate": 1.5436170212765956e-05, | |
| "loss": 1.0928, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "grad_norm": 14.882131576538086, | |
| "learning_rate": 1.4904255319148937e-05, | |
| "loss": 0.971, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "grad_norm": 16.319412231445312, | |
| "learning_rate": 1.4372340425531915e-05, | |
| "loss": 1.1631, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "grad_norm": 26.943748474121094, | |
| "learning_rate": 1.3840425531914896e-05, | |
| "loss": 1.0528, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "grad_norm": 19.07010841369629, | |
| "learning_rate": 1.3308510638297873e-05, | |
| "loss": 1.0475, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "grad_norm": 16.704652786254883, | |
| "learning_rate": 1.277659574468085e-05, | |
| "loss": 1.0072, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 3.7, | |
| "grad_norm": 20.118215560913086, | |
| "learning_rate": 1.224468085106383e-05, | |
| "loss": 1.0892, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "grad_norm": 25.932292938232422, | |
| "learning_rate": 1.1712765957446809e-05, | |
| "loss": 0.8766, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "grad_norm": 20.448410034179688, | |
| "learning_rate": 1.1180851063829788e-05, | |
| "loss": 0.8948, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 13.030745506286621, | |
| "learning_rate": 1.0648936170212766e-05, | |
| "loss": 0.9611, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_intent_accuracy": 0.865, | |
| "eval_intent_macro_f1": 0.7845036463088115, | |
| "eval_joint_score": 0.7702941855099215, | |
| "eval_loss": 1.1503586769104004, | |
| "eval_ner_f1": 0.675588371019843, | |
| "eval_ner_precision": 0.6553267681289168, | |
| "eval_ner_recall": 0.6971428571428572, | |
| "eval_runtime": 0.5885, | |
| "eval_samples_per_second": 1699.13, | |
| "eval_steps_per_second": 54.372, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "grad_norm": 7.756314754486084, | |
| "learning_rate": 1.0117021276595745e-05, | |
| "loss": 0.7831, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 4.2, | |
| "grad_norm": 15.828548431396484, | |
| "learning_rate": 9.585106382978724e-06, | |
| "loss": 0.7917, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 4.3, | |
| "grad_norm": 17.722307205200195, | |
| "learning_rate": 9.053191489361703e-06, | |
| "loss": 0.7958, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 4.4, | |
| "grad_norm": 31.272441864013672, | |
| "learning_rate": 8.52127659574468e-06, | |
| "loss": 0.7537, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "grad_norm": 37.739131927490234, | |
| "learning_rate": 7.98936170212766e-06, | |
| "loss": 0.8002, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 4.6, | |
| "grad_norm": 29.077699661254883, | |
| "learning_rate": 7.457446808510638e-06, | |
| "loss": 0.8878, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 4.7, | |
| "grad_norm": 43.24278259277344, | |
| "learning_rate": 6.925531914893618e-06, | |
| "loss": 0.8453, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 4.8, | |
| "grad_norm": 94.07080841064453, | |
| "learning_rate": 6.393617021276596e-06, | |
| "loss": 0.7931, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 4.9, | |
| "grad_norm": 20.592226028442383, | |
| "learning_rate": 5.861702127659575e-06, | |
| "loss": 0.7832, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 36.34307098388672, | |
| "learning_rate": 5.3297872340425535e-06, | |
| "loss": 0.8036, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_intent_accuracy": 0.868, | |
| "eval_intent_macro_f1": 0.8051212440624268, | |
| "eval_joint_score": 0.7852150389729482, | |
| "eval_loss": 1.0913232564926147, | |
| "eval_ner_f1": 0.7024300779458964, | |
| "eval_ner_precision": 0.6772767462422635, | |
| "eval_ner_recall": 0.7295238095238096, | |
| "eval_runtime": 0.5824, | |
| "eval_samples_per_second": 1717.074, | |
| "eval_steps_per_second": 54.946, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 5.1, | |
| "grad_norm": 13.311659812927246, | |
| "learning_rate": 4.797872340425533e-06, | |
| "loss": 0.6409, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 5.2, | |
| "grad_norm": 17.807374954223633, | |
| "learning_rate": 4.265957446808511e-06, | |
| "loss": 0.7401, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 5.3, | |
| "grad_norm": 8.320006370544434, | |
| "learning_rate": 3.7340425531914894e-06, | |
| "loss": 0.668, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 5.4, | |
| "grad_norm": 20.279203414916992, | |
| "learning_rate": 3.202127659574468e-06, | |
| "loss": 0.6477, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 5.5, | |
| "grad_norm": 23.9965763092041, | |
| "learning_rate": 2.670212765957447e-06, | |
| "loss": 0.7239, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 5.6, | |
| "grad_norm": 38.03826904296875, | |
| "learning_rate": 2.1382978723404258e-06, | |
| "loss": 0.6816, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 5.7, | |
| "grad_norm": 17.692941665649414, | |
| "learning_rate": 1.6063829787234043e-06, | |
| "loss": 0.6778, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 5.8, | |
| "grad_norm": 33.07294464111328, | |
| "learning_rate": 1.074468085106383e-06, | |
| "loss": 0.694, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 5.9, | |
| "grad_norm": 17.965429306030273, | |
| "learning_rate": 5.425531914893617e-07, | |
| "loss": 0.6421, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 15.919111251831055, | |
| "learning_rate": 1.0638297872340427e-08, | |
| "loss": 0.6727, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_intent_accuracy": 0.87, | |
| "eval_intent_macro_f1": 0.8193369098618751, | |
| "eval_joint_score": 0.7895871559633028, | |
| "eval_loss": 1.0895804166793823, | |
| "eval_ner_f1": 0.7091743119266055, | |
| "eval_ner_precision": 0.684070796460177, | |
| "eval_ner_recall": 0.7361904761904762, | |
| "eval_runtime": 0.6107, | |
| "eval_samples_per_second": 1637.488, | |
| "eval_steps_per_second": 52.4, | |
| "step": 3000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 3000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 2, | |
| "early_stopping_threshold": 0.0005 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |