| { | |
| "best_global_step": 2049, | |
| "best_metric": 0.6610541727672035, | |
| "best_model_checkpoint": "./saved_models/graphcodebert/checkpoint-2049", | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 3415, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07320644216691069, | |
| "grad_norm": 1.5853087902069092, | |
| "learning_rate": 1.971303074670571e-05, | |
| "loss": 0.6901, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.14641288433382138, | |
| "grad_norm": 2.419438123703003, | |
| "learning_rate": 1.942020497803807e-05, | |
| "loss": 0.6902, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.21961932650073207, | |
| "grad_norm": 1.6440658569335938, | |
| "learning_rate": 1.9127379209370426e-05, | |
| "loss": 0.685, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.29282576866764276, | |
| "grad_norm": 1.883244514465332, | |
| "learning_rate": 1.8834553440702785e-05, | |
| "loss": 0.6857, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.36603221083455345, | |
| "grad_norm": 3.6438748836517334, | |
| "learning_rate": 1.854172767203514e-05, | |
| "loss": 0.682, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.43923865300146414, | |
| "grad_norm": 3.641892194747925, | |
| "learning_rate": 1.8248901903367496e-05, | |
| "loss": 0.6656, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.5124450951683748, | |
| "grad_norm": 2.7403852939605713, | |
| "learning_rate": 1.7956076134699855e-05, | |
| "loss": 0.6656, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5856515373352855, | |
| "grad_norm": 1.8151828050613403, | |
| "learning_rate": 1.766325036603221e-05, | |
| "loss": 0.6762, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6588579795021962, | |
| "grad_norm": 2.336029291152954, | |
| "learning_rate": 1.737042459736457e-05, | |
| "loss": 0.6701, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7320644216691069, | |
| "grad_norm": 2.9843106269836426, | |
| "learning_rate": 1.7077598828696925e-05, | |
| "loss": 0.6661, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8052708638360175, | |
| "grad_norm": 2.4253265857696533, | |
| "learning_rate": 1.6784773060029284e-05, | |
| "loss": 0.6776, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8784773060029283, | |
| "grad_norm": 3.049626350402832, | |
| "learning_rate": 1.649194729136164e-05, | |
| "loss": 0.6626, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9516837481698389, | |
| "grad_norm": 2.690331220626831, | |
| "learning_rate": 1.6199121522694e-05, | |
| "loss": 0.658, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.6332357247437774, | |
| "eval_loss": 0.6321774125099182, | |
| "eval_runtime": 11.6506, | |
| "eval_samples_per_second": 234.494, | |
| "eval_steps_per_second": 3.691, | |
| "step": 683 | |
| }, | |
| { | |
| "epoch": 1.0248901903367496, | |
| "grad_norm": 3.389944553375244, | |
| "learning_rate": 1.5906295754026355e-05, | |
| "loss": 0.645, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0980966325036603, | |
| "grad_norm": 2.976720094680786, | |
| "learning_rate": 1.5613469985358714e-05, | |
| "loss": 0.633, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.171303074670571, | |
| "grad_norm": 3.046492576599121, | |
| "learning_rate": 1.532064421669107e-05, | |
| "loss": 0.63, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2445095168374818, | |
| "grad_norm": 2.867762804031372, | |
| "learning_rate": 1.5027818448023428e-05, | |
| "loss": 0.6293, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3177159590043923, | |
| "grad_norm": 3.334984064102173, | |
| "learning_rate": 1.4734992679355784e-05, | |
| "loss": 0.6251, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.390922401171303, | |
| "grad_norm": 4.373927116394043, | |
| "learning_rate": 1.4442166910688143e-05, | |
| "loss": 0.6224, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4641288433382138, | |
| "grad_norm": 3.5130934715270996, | |
| "learning_rate": 1.4149341142020499e-05, | |
| "loss": 0.6182, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5373352855051245, | |
| "grad_norm": 3.466965675354004, | |
| "learning_rate": 1.3856515373352856e-05, | |
| "loss": 0.6139, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.610541727672035, | |
| "grad_norm": 4.486567974090576, | |
| "learning_rate": 1.3563689604685213e-05, | |
| "loss": 0.6221, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.6837481698389458, | |
| "grad_norm": 3.9357879161834717, | |
| "learning_rate": 1.327086383601757e-05, | |
| "loss": 0.6185, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7569546120058566, | |
| "grad_norm": 3.294029474258423, | |
| "learning_rate": 1.2978038067349928e-05, | |
| "loss": 0.6048, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.830161054172767, | |
| "grad_norm": 4.328296661376953, | |
| "learning_rate": 1.2685212298682286e-05, | |
| "loss": 0.6191, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.903367496339678, | |
| "grad_norm": 3.7641713619232178, | |
| "learning_rate": 1.2392386530014641e-05, | |
| "loss": 0.6028, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9765739385065886, | |
| "grad_norm": 4.414918422698975, | |
| "learning_rate": 1.2099560761347e-05, | |
| "loss": 0.5892, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.636896046852123, | |
| "eval_loss": 0.6083204746246338, | |
| "eval_runtime": 11.6592, | |
| "eval_samples_per_second": 234.32, | |
| "eval_steps_per_second": 3.688, | |
| "step": 1366 | |
| }, | |
| { | |
| "epoch": 2.049780380673499, | |
| "grad_norm": 5.577649116516113, | |
| "learning_rate": 1.1806734992679356e-05, | |
| "loss": 0.5804, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.12298682284041, | |
| "grad_norm": 5.713629722595215, | |
| "learning_rate": 1.1513909224011715e-05, | |
| "loss": 0.5494, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.1961932650073206, | |
| "grad_norm": 14.552216529846191, | |
| "learning_rate": 1.122108345534407e-05, | |
| "loss": 0.5611, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.269399707174231, | |
| "grad_norm": 4.35590124130249, | |
| "learning_rate": 1.092825768667643e-05, | |
| "loss": 0.5653, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.342606149341142, | |
| "grad_norm": 4.740884304046631, | |
| "learning_rate": 1.0635431918008785e-05, | |
| "loss": 0.5658, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4158125915080526, | |
| "grad_norm": 6.103859901428223, | |
| "learning_rate": 1.0342606149341143e-05, | |
| "loss": 0.5605, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.4890190336749636, | |
| "grad_norm": 5.405401229858398, | |
| "learning_rate": 1.00497803806735e-05, | |
| "loss": 0.5657, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.562225475841874, | |
| "grad_norm": 5.416690349578857, | |
| "learning_rate": 9.756954612005857e-06, | |
| "loss": 0.545, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.6354319180087846, | |
| "grad_norm": 3.8133316040039062, | |
| "learning_rate": 9.464128843338215e-06, | |
| "loss": 0.5518, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.7086383601756956, | |
| "grad_norm": 11.942543029785156, | |
| "learning_rate": 9.171303074670572e-06, | |
| "loss": 0.5516, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.781844802342606, | |
| "grad_norm": 7.067141532897949, | |
| "learning_rate": 8.87847730600293e-06, | |
| "loss": 0.5597, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.855051244509517, | |
| "grad_norm": 5.501221656799316, | |
| "learning_rate": 8.585651537335287e-06, | |
| "loss": 0.536, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.9282576866764276, | |
| "grad_norm": 9.329773902893066, | |
| "learning_rate": 8.292825768667644e-06, | |
| "loss": 0.5298, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.6610541727672035, | |
| "eval_loss": 0.6209500432014465, | |
| "eval_runtime": 11.6196, | |
| "eval_samples_per_second": 235.121, | |
| "eval_steps_per_second": 3.701, | |
| "step": 2049 | |
| }, | |
| { | |
| "epoch": 3.001464128843338, | |
| "grad_norm": 4.468164443969727, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.5594, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.074670571010249, | |
| "grad_norm": 6.163565158843994, | |
| "learning_rate": 7.707174231332359e-06, | |
| "loss": 0.488, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.1478770131771596, | |
| "grad_norm": 6.804717063903809, | |
| "learning_rate": 7.414348462664715e-06, | |
| "loss": 0.491, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.22108345534407, | |
| "grad_norm": 7.049874782562256, | |
| "learning_rate": 7.1215226939970725e-06, | |
| "loss": 0.4866, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.294289897510981, | |
| "grad_norm": 10.319636344909668, | |
| "learning_rate": 6.82869692532943e-06, | |
| "loss": 0.4837, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.3674963396778916, | |
| "grad_norm": 7.2100677490234375, | |
| "learning_rate": 6.535871156661787e-06, | |
| "loss": 0.4773, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.440702781844802, | |
| "grad_norm": 10.271724700927734, | |
| "learning_rate": 6.2430453879941446e-06, | |
| "loss": 0.5051, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.513909224011713, | |
| "grad_norm": 6.702558517456055, | |
| "learning_rate": 5.950219619326502e-06, | |
| "loss": 0.4979, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.5871156661786237, | |
| "grad_norm": 7.939746856689453, | |
| "learning_rate": 5.657393850658858e-06, | |
| "loss": 0.5013, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.660322108345534, | |
| "grad_norm": 7.762535095214844, | |
| "learning_rate": 5.364568081991216e-06, | |
| "loss": 0.4999, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.733528550512445, | |
| "grad_norm": 7.71613073348999, | |
| "learning_rate": 5.071742313323573e-06, | |
| "loss": 0.4935, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.8067349926793557, | |
| "grad_norm": 6.267000675201416, | |
| "learning_rate": 4.77891654465593e-06, | |
| "loss": 0.4855, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.8799414348462666, | |
| "grad_norm": 10.031179428100586, | |
| "learning_rate": 4.486090775988287e-06, | |
| "loss": 0.4932, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 3.953147877013177, | |
| "grad_norm": 5.606414794921875, | |
| "learning_rate": 4.193265007320644e-06, | |
| "loss": 0.4849, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.6442166910688141, | |
| "eval_loss": 0.6546101570129395, | |
| "eval_runtime": 11.6102, | |
| "eval_samples_per_second": 235.31, | |
| "eval_steps_per_second": 3.704, | |
| "step": 2732 | |
| }, | |
| { | |
| "epoch": 4.026354319180088, | |
| "grad_norm": 7.047442436218262, | |
| "learning_rate": 3.900439238653002e-06, | |
| "loss": 0.4756, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.099560761346998, | |
| "grad_norm": 8.444013595581055, | |
| "learning_rate": 3.607613469985359e-06, | |
| "loss": 0.4465, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.172767203513909, | |
| "grad_norm": 8.391129493713379, | |
| "learning_rate": 3.314787701317716e-06, | |
| "loss": 0.4532, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.24597364568082, | |
| "grad_norm": 8.165177345275879, | |
| "learning_rate": 3.0219619326500732e-06, | |
| "loss": 0.4591, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.31918008784773, | |
| "grad_norm": 10.9652099609375, | |
| "learning_rate": 2.7291361639824306e-06, | |
| "loss": 0.4126, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.392386530014641, | |
| "grad_norm": 7.355696201324463, | |
| "learning_rate": 2.436310395314788e-06, | |
| "loss": 0.4195, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.465592972181552, | |
| "grad_norm": 9.898347854614258, | |
| "learning_rate": 2.1434846266471453e-06, | |
| "loss": 0.4476, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.538799414348462, | |
| "grad_norm": 12.907100677490234, | |
| "learning_rate": 1.8506588579795024e-06, | |
| "loss": 0.441, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.612005856515373, | |
| "grad_norm": 6.766877174377441, | |
| "learning_rate": 1.5578330893118595e-06, | |
| "loss": 0.4316, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.685212298682284, | |
| "grad_norm": 6.144525527954102, | |
| "learning_rate": 1.2650073206442169e-06, | |
| "loss": 0.4415, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.758418740849194, | |
| "grad_norm": 12.745665550231934, | |
| "learning_rate": 9.72181551976574e-07, | |
| "loss": 0.4525, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.831625183016105, | |
| "grad_norm": 8.909607887268066, | |
| "learning_rate": 6.793557833089313e-07, | |
| "loss": 0.4426, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 4.904831625183016, | |
| "grad_norm": 8.444921493530273, | |
| "learning_rate": 3.865300146412885e-07, | |
| "loss": 0.4379, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 4.978038067349927, | |
| "grad_norm": 9.61002254486084, | |
| "learning_rate": 9.370424597364569e-08, | |
| "loss": 0.4603, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.6548316251830161, | |
| "eval_loss": 0.687939465045929, | |
| "eval_runtime": 11.6277, | |
| "eval_samples_per_second": 234.957, | |
| "eval_steps_per_second": 3.698, | |
| "step": 3415 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 3415, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.43750725095936e+16, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |