Invalid JSON: Unexpected token 'I', ..."istance": Infinity,
"... is not valid JSON
| { | |
| "best_metric": 0.5455028328611898, | |
| "best_model_checkpoint": "/mimer/NOBACKUP/groups/naiss2023-6-290/stefano/models//PROTAC-Splitter-EncoderDecoder-lr_reduce-opt25-rand-smiles/checkpoint-10000", | |
| "epoch": 3.4523574669560073, | |
| "eval_steps": 2500, | |
| "global_step": 35000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.049319392385085814, | |
| "grad_norm": 0.7027302384376526, | |
| "learning_rate": 5e-05, | |
| "loss": 0.9638, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.09863878477017163, | |
| "grad_norm": 0.5134932994842529, | |
| "learning_rate": 5e-05, | |
| "loss": 0.1891, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.14795817715525744, | |
| "grad_norm": 0.27161791920661926, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0815, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.19727756954034326, | |
| "grad_norm": 0.27095702290534973, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0463, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.24659696192542907, | |
| "grad_norm": 0.22146300971508026, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0303, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.2959163543105149, | |
| "grad_norm": 0.20155303180217743, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0225, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.3452357466956007, | |
| "grad_norm": 0.15531788766384125, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0172, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.3945551390806865, | |
| "grad_norm": 0.18832802772521973, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0142, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.4438745314657723, | |
| "grad_norm": 0.141372412443161, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0117, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.49319392385085814, | |
| "grad_norm": 0.1557079702615738, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0107, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.49319392385085814, | |
| "eval_all_ligands_equal": 0.5010623229461756, | |
| "eval_e3_equal": 0.7893944759206799, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9859242209631728, | |
| "eval_e3_heavy_atoms_difference": 0.44741501416430596, | |
| "eval_e3_heavy_atoms_difference_norm": 0.010527970436043508, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9859242209631728, | |
| "eval_has_all_attachment_points": 0.9880488668555241, | |
| "eval_has_three_substructures": 0.9992917847025495, | |
| "eval_heavy_atoms_difference": 4.677850566572238, | |
| "eval_heavy_atoms_difference_norm": 0.06169203765049902, | |
| "eval_linker_equal": 0.7242386685552408, | |
| "eval_linker_graph_edit_distance": Infinity, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.9977868271954674, | |
| "eval_linker_heavy_atoms_difference": 0.5833923512747875, | |
| "eval_linker_heavy_atoms_difference_norm": 0.017295105295040032, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9977868271954674, | |
| "eval_loss": 0.30077189207077026, | |
| "eval_num_fragments": 3.0001770538243626, | |
| "eval_poi_equal": 0.7466359773371105, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9623760623229461, | |
| "eval_poi_heavy_atoms_difference": 1.126150849858357, | |
| "eval_poi_heavy_atoms_difference_norm": 0.03167513129582722, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9623760623229461, | |
| "eval_reassembly": 0.5097379603399433, | |
| "eval_reassembly_nostereo": 0.5343484419263456, | |
| "eval_runtime": 2394.3813, | |
| "eval_samples_per_second": 4.718, | |
| "eval_steps_per_second": 0.074, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9481232294617564, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.542513316235944, | |
| "grad_norm": 0.16844329237937927, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0086, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.5918327086210298, | |
| "grad_norm": 0.13382764160633087, | |
| "learning_rate": 5e-05, | |
| "loss": 0.008, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.6411521010061156, | |
| "grad_norm": 0.10914590954780579, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0082, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.6904714933912014, | |
| "grad_norm": 0.1176648959517479, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0064, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.7397908857762873, | |
| "grad_norm": 0.11995041370391846, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0058, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.7397908857762873, | |
| "eval_all_ligands_equal": 0.5324893767705382, | |
| "eval_e3_equal": 0.8015226628895185, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9835339943342776, | |
| "eval_e3_heavy_atoms_difference": 0.6172096317280453, | |
| "eval_e3_heavy_atoms_difference_norm": 0.01718153606779645, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9835339943342776, | |
| "eval_has_all_attachment_points": 0.9898194050991501, | |
| "eval_has_three_substructures": 0.9992032577903682, | |
| "eval_heavy_atoms_difference": 4.7888633144475925, | |
| "eval_heavy_atoms_difference_norm": 0.06261895375354189, | |
| "eval_linker_equal": 0.7643413597733711, | |
| "eval_linker_graph_edit_distance": 5.931303116147309e+61, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.9940686968838527, | |
| "eval_linker_heavy_atoms_difference": 0.2994865439093484, | |
| "eval_linker_heavy_atoms_difference_norm": 0.003906866403538865, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9940686968838527, | |
| "eval_loss": 0.3187354505062103, | |
| "eval_num_fragments": 2.9999114730878187, | |
| "eval_poi_equal": 0.7585871104815864, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9615793201133145, | |
| "eval_poi_heavy_atoms_difference": 0.9885800283286119, | |
| "eval_poi_heavy_atoms_difference_norm": 0.026428449227846533, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9615793201133145, | |
| "eval_reassembly": 0.5401912181303116, | |
| "eval_reassembly_nostereo": 0.564270538243626, | |
| "eval_runtime": 2423.1193, | |
| "eval_samples_per_second": 4.662, | |
| "eval_steps_per_second": 0.073, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9417492917847026, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.789110278161373, | |
| "grad_norm": 0.10560354590415955, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0054, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.8384296705464589, | |
| "grad_norm": 0.10797900706529617, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0054, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.8877490629315447, | |
| "grad_norm": 0.08978835493326187, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0047, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.9370684553166305, | |
| "grad_norm": 0.10517807304859161, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0043, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.9863878477017163, | |
| "grad_norm": 0.08841440826654434, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0049, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.9863878477017163, | |
| "eval_all_ligands_equal": 0.5455028328611898, | |
| "eval_e3_equal": 0.8076310198300283, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9866324362606232, | |
| "eval_e3_heavy_atoms_difference": 0.4690155807365439, | |
| "eval_e3_heavy_atoms_difference_norm": 0.012301846377999403, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9866324362606232, | |
| "eval_has_all_attachment_points": 0.9866324362606232, | |
| "eval_has_three_substructures": 0.9990262039660056, | |
| "eval_heavy_atoms_difference": 5.74459985835694, | |
| "eval_heavy_atoms_difference_norm": 0.07440553604997951, | |
| "eval_linker_equal": 0.7855878186968839, | |
| "eval_linker_graph_edit_distance": Infinity, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.9952195467422096, | |
| "eval_linker_heavy_atoms_difference": 0.20015934844192634, | |
| "eval_linker_heavy_atoms_difference_norm": -0.0013312150850062455, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9952195467422096, | |
| "eval_loss": 0.32511967420578003, | |
| "eval_num_fragments": 2.999734419263456, | |
| "eval_poi_equal": 0.7631905099150141, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9458215297450425, | |
| "eval_poi_heavy_atoms_difference": 1.7547804532577904, | |
| "eval_poi_heavy_atoms_difference_norm": 0.05102178524901975, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9458215297450425, | |
| "eval_reassembly": 0.5547981586402266, | |
| "eval_reassembly_nostereo": 0.5788774787535411, | |
| "eval_runtime": 2357.728, | |
| "eval_samples_per_second": 4.791, | |
| "eval_steps_per_second": 0.075, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9307719546742209, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.0357072400868022, | |
| "grad_norm": 0.06631127744913101, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0048, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.085026632471888, | |
| "grad_norm": 0.050583675503730774, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0034, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.1343460248569737, | |
| "grad_norm": 0.07966043800115585, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0032, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.1836654172420595, | |
| "grad_norm": 0.06630385667085648, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0031, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.2329848096271454, | |
| "grad_norm": 0.07357371598482132, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0032, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.2823042020122313, | |
| "grad_norm": 0.04489985853433609, | |
| "learning_rate": 5e-05, | |
| "loss": 0.003, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.3316235943973171, | |
| "grad_norm": 0.07262099534273148, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0029, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.3809429867824028, | |
| "grad_norm": 0.04908349737524986, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0027, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.4302623791674887, | |
| "grad_norm": 0.07208685576915741, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0031, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.4795817715525745, | |
| "grad_norm": 0.06099744513630867, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0035, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.5289011639376602, | |
| "grad_norm": 0.044646210968494415, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0022, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.578220556322746, | |
| "grad_norm": 0.04811064526438713, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0021, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.627539948707832, | |
| "grad_norm": 0.054762303829193115, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0024, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.6768593410929178, | |
| "grad_norm": 0.04429342970252037, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0022, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.7261787334780037, | |
| "grad_norm": 0.06518573313951492, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0022, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.7754981258630895, | |
| "grad_norm": 0.043648600578308105, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0021, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.8248175182481752, | |
| "grad_norm": 0.044574983417987823, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0021, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.874136910633261, | |
| "grad_norm": 0.05319111794233322, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0021, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.9234563030183467, | |
| "grad_norm": 0.05567469820380211, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0021, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.9727756954034326, | |
| "grad_norm": 0.045425355434417725, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0019, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 2.0220950877885184, | |
| "grad_norm": 0.05095202848315239, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0024, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 2.0714144801736043, | |
| "grad_norm": 0.027557415887713432, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0016, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 2.12073387255869, | |
| "grad_norm": 0.04820004105567932, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0016, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 2.170053264943776, | |
| "grad_norm": 0.041186604648828506, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0016, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 2.219372657328862, | |
| "grad_norm": 0.028476761654019356, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0016, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 2.2686920497139473, | |
| "grad_norm": 0.045205824077129364, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0016, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 2.318011442099033, | |
| "grad_norm": 0.04366511106491089, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0016, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 2.367330834484119, | |
| "grad_norm": 0.022254638373851776, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0016, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 2.416650226869205, | |
| "grad_norm": 0.039716288447380066, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0015, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 2.465969619254291, | |
| "grad_norm": 0.055284593254327774, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0015, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 2.5152890116393767, | |
| "grad_norm": 0.045403581112623215, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0015, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 2.5646084040244626, | |
| "grad_norm": 0.059790823608636856, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0018, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 2.6139277964095484, | |
| "grad_norm": 0.05007553473114967, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0015, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 2.6632471887946343, | |
| "grad_norm": 0.03945387527346611, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0014, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 2.7125665811797197, | |
| "grad_norm": 0.033401645720005035, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0013, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 2.7618859735648056, | |
| "grad_norm": 0.05006084218621254, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0013, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 2.8112053659498915, | |
| "grad_norm": 0.04564574733376503, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0014, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 2.8605247583349773, | |
| "grad_norm": 0.0373481847345829, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0013, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 2.909844150720063, | |
| "grad_norm": 0.030000342056155205, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0019, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 2.959163543105149, | |
| "grad_norm": 0.012932154349982738, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0021, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 3.008482935490235, | |
| "grad_norm": 0.029712537303566933, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 3.0578023278753204, | |
| "grad_norm": 0.022233586758375168, | |
| "learning_rate": 5e-05, | |
| "loss": 0.001, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 3.1071217202604062, | |
| "grad_norm": 0.04128130525350571, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 3.156441112645492, | |
| "grad_norm": 0.031225621700286865, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0012, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 3.205760505030578, | |
| "grad_norm": 0.04397075995802879, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 3.255079897415664, | |
| "grad_norm": 0.03178331255912781, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 3.3043992898007497, | |
| "grad_norm": 0.04911533370614052, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0012, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 3.3537186821858356, | |
| "grad_norm": 0.036495912820100784, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 3.4030380745709214, | |
| "grad_norm": 0.0601472444832325, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0012, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 3.4523574669560073, | |
| "grad_norm": 0.035126738250255585, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0014, | |
| "step": 35000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 100000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 5000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.055328543636475e+17, | |
| "train_batch_size": 128, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |