| { | |
| "best_metric": 0.6034879603399433, | |
| "best_model_checkpoint": "/mimer/NOBACKUP/groups/naiss2023-6-290/stefano/models//PROTAC-Splitter-EncoderDecoder-lr_reduce-rand-smiles/checkpoint-100000", | |
| "epoch": 9.863878477017163, | |
| "eval_steps": 2500, | |
| "global_step": 100000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.049319392385085814, | |
| "grad_norm": 0.7027302384376526, | |
| "learning_rate": 5e-05, | |
| "loss": 0.9638, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.09863878477017163, | |
| "grad_norm": 0.5134932994842529, | |
| "learning_rate": 5e-05, | |
| "loss": 0.1891, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.14795817715525744, | |
| "grad_norm": 0.27161791920661926, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0815, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.19727756954034326, | |
| "grad_norm": 0.27095702290534973, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0463, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.24659696192542907, | |
| "grad_norm": 0.22146300971508026, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0303, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.2959163543105149, | |
| "grad_norm": 0.20155303180217743, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0225, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.3452357466956007, | |
| "grad_norm": 0.15531788766384125, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0172, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.3945551390806865, | |
| "grad_norm": 0.18832802772521973, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0142, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.4438745314657723, | |
| "grad_norm": 0.141372412443161, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0117, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.49319392385085814, | |
| "grad_norm": 0.1557079702615738, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0107, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.49319392385085814, | |
| "eval_all_ligands_equal": 0.5010623229461756, | |
| "eval_e3_equal": 0.7893944759206799, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9859242209631728, | |
| "eval_e3_heavy_atoms_difference": 0.44741501416430596, | |
| "eval_e3_heavy_atoms_difference_norm": 0.010527970436043508, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9859242209631728, | |
| "eval_has_all_attachment_points": 0.9880488668555241, | |
| "eval_has_three_substructures": 0.9992917847025495, | |
| "eval_heavy_atoms_difference": 4.677850566572238, | |
| "eval_heavy_atoms_difference_norm": 0.06169203765049902, | |
| "eval_linker_equal": 0.7242386685552408, | |
| "eval_linker_graph_edit_distance": Infinity, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.9977868271954674, | |
| "eval_linker_heavy_atoms_difference": 0.5833923512747875, | |
| "eval_linker_heavy_atoms_difference_norm": 0.017295105295040032, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9977868271954674, | |
| "eval_loss": 0.30077189207077026, | |
| "eval_num_fragments": 3.0001770538243626, | |
| "eval_poi_equal": 0.7466359773371105, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9623760623229461, | |
| "eval_poi_heavy_atoms_difference": 1.126150849858357, | |
| "eval_poi_heavy_atoms_difference_norm": 0.03167513129582722, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9623760623229461, | |
| "eval_reassembly": 0.5097379603399433, | |
| "eval_reassembly_nostereo": 0.5343484419263456, | |
| "eval_runtime": 2394.3813, | |
| "eval_samples_per_second": 4.718, | |
| "eval_steps_per_second": 0.074, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9481232294617564, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.542513316235944, | |
| "grad_norm": 0.16844329237937927, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0086, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.5918327086210298, | |
| "grad_norm": 0.13382764160633087, | |
| "learning_rate": 5e-05, | |
| "loss": 0.008, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.6411521010061156, | |
| "grad_norm": 0.10914590954780579, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0082, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.6904714933912014, | |
| "grad_norm": 0.1176648959517479, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0064, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.7397908857762873, | |
| "grad_norm": 0.11995041370391846, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0058, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.7397908857762873, | |
| "eval_all_ligands_equal": 0.5324893767705382, | |
| "eval_e3_equal": 0.8015226628895185, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9835339943342776, | |
| "eval_e3_heavy_atoms_difference": 0.6172096317280453, | |
| "eval_e3_heavy_atoms_difference_norm": 0.01718153606779645, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9835339943342776, | |
| "eval_has_all_attachment_points": 0.9898194050991501, | |
| "eval_has_three_substructures": 0.9992032577903682, | |
| "eval_heavy_atoms_difference": 4.7888633144475925, | |
| "eval_heavy_atoms_difference_norm": 0.06261895375354189, | |
| "eval_linker_equal": 0.7643413597733711, | |
| "eval_linker_graph_edit_distance": 5.931303116147309e+61, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.9940686968838527, | |
| "eval_linker_heavy_atoms_difference": 0.2994865439093484, | |
| "eval_linker_heavy_atoms_difference_norm": 0.003906866403538865, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9940686968838527, | |
| "eval_loss": 0.3187354505062103, | |
| "eval_num_fragments": 2.9999114730878187, | |
| "eval_poi_equal": 0.7585871104815864, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9615793201133145, | |
| "eval_poi_heavy_atoms_difference": 0.9885800283286119, | |
| "eval_poi_heavy_atoms_difference_norm": 0.026428449227846533, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9615793201133145, | |
| "eval_reassembly": 0.5401912181303116, | |
| "eval_reassembly_nostereo": 0.564270538243626, | |
| "eval_runtime": 2423.1193, | |
| "eval_samples_per_second": 4.662, | |
| "eval_steps_per_second": 0.073, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9417492917847026, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.789110278161373, | |
| "grad_norm": 0.10560354590415955, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0054, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.8384296705464589, | |
| "grad_norm": 0.10797900706529617, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0054, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.8877490629315447, | |
| "grad_norm": 0.08978835493326187, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0047, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.9370684553166305, | |
| "grad_norm": 0.10517807304859161, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0043, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.9863878477017163, | |
| "grad_norm": 0.08841440826654434, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0049, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.9863878477017163, | |
| "eval_all_ligands_equal": 0.5455028328611898, | |
| "eval_e3_equal": 0.8076310198300283, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9866324362606232, | |
| "eval_e3_heavy_atoms_difference": 0.4690155807365439, | |
| "eval_e3_heavy_atoms_difference_norm": 0.012301846377999403, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9866324362606232, | |
| "eval_has_all_attachment_points": 0.9866324362606232, | |
| "eval_has_three_substructures": 0.9990262039660056, | |
| "eval_heavy_atoms_difference": 5.74459985835694, | |
| "eval_heavy_atoms_difference_norm": 0.07440553604997951, | |
| "eval_linker_equal": 0.7855878186968839, | |
| "eval_linker_graph_edit_distance": Infinity, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.9952195467422096, | |
| "eval_linker_heavy_atoms_difference": 0.20015934844192634, | |
| "eval_linker_heavy_atoms_difference_norm": -0.0013312150850062455, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9952195467422096, | |
| "eval_loss": 0.32511967420578003, | |
| "eval_num_fragments": 2.999734419263456, | |
| "eval_poi_equal": 0.7631905099150141, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9458215297450425, | |
| "eval_poi_heavy_atoms_difference": 1.7547804532577904, | |
| "eval_poi_heavy_atoms_difference_norm": 0.05102178524901975, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9458215297450425, | |
| "eval_reassembly": 0.5547981586402266, | |
| "eval_reassembly_nostereo": 0.5788774787535411, | |
| "eval_runtime": 2357.728, | |
| "eval_samples_per_second": 4.791, | |
| "eval_steps_per_second": 0.075, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9307719546742209, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.0357072400868022, | |
| "grad_norm": 0.06631127744913101, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0048, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.085026632471888, | |
| "grad_norm": 0.050583675503730774, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0034, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.1343460248569737, | |
| "grad_norm": 0.07966043800115585, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0032, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.1836654172420595, | |
| "grad_norm": 0.06630385667085648, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0031, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.2329848096271454, | |
| "grad_norm": 0.07357371598482132, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0032, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.2823042020122313, | |
| "grad_norm": 0.04489985853433609, | |
| "learning_rate": 5e-05, | |
| "loss": 0.003, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.3316235943973171, | |
| "grad_norm": 0.07262099534273148, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0029, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.3809429867824028, | |
| "grad_norm": 0.04908349737524986, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0027, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.4302623791674887, | |
| "grad_norm": 0.07208685576915741, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0031, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.4795817715525745, | |
| "grad_norm": 0.06099744513630867, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0035, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.5289011639376602, | |
| "grad_norm": 0.044646210968494415, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0022, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.578220556322746, | |
| "grad_norm": 0.04811064526438713, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0021, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.627539948707832, | |
| "grad_norm": 0.054762303829193115, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0024, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.6768593410929178, | |
| "grad_norm": 0.04429342970252037, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0022, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.7261787334780037, | |
| "grad_norm": 0.06518573313951492, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0022, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.7754981258630895, | |
| "grad_norm": 0.043648600578308105, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0021, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.8248175182481752, | |
| "grad_norm": 0.044574983417987823, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0021, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.874136910633261, | |
| "grad_norm": 0.05319111794233322, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0021, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.9234563030183467, | |
| "grad_norm": 0.05567469820380211, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0021, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.9727756954034326, | |
| "grad_norm": 0.045425355434417725, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0019, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 2.0220950877885184, | |
| "grad_norm": 0.05095202848315239, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0024, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 2.0714144801736043, | |
| "grad_norm": 0.027557415887713432, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0016, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 2.12073387255869, | |
| "grad_norm": 0.04820004105567932, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0016, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 2.170053264943776, | |
| "grad_norm": 0.041186604648828506, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0016, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 2.219372657328862, | |
| "grad_norm": 0.028476761654019356, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0016, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 2.2686920497139473, | |
| "grad_norm": 0.045205824077129364, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0016, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 2.318011442099033, | |
| "grad_norm": 0.04366511106491089, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0016, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 2.367330834484119, | |
| "grad_norm": 0.022254638373851776, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0016, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 2.416650226869205, | |
| "grad_norm": 0.039716288447380066, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0015, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 2.465969619254291, | |
| "grad_norm": 0.055284593254327774, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0015, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 2.5152890116393767, | |
| "grad_norm": 0.045403581112623215, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0015, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 2.5646084040244626, | |
| "grad_norm": 0.059790823608636856, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0018, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 2.6139277964095484, | |
| "grad_norm": 0.05007553473114967, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0015, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 2.6632471887946343, | |
| "grad_norm": 0.03945387527346611, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0014, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 2.7125665811797197, | |
| "grad_norm": 0.033401645720005035, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0013, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 2.7618859735648056, | |
| "grad_norm": 0.05006084218621254, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0013, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 2.8112053659498915, | |
| "grad_norm": 0.04564574733376503, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0014, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 2.8605247583349773, | |
| "grad_norm": 0.0373481847345829, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0013, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 2.909844150720063, | |
| "grad_norm": 0.030000342056155205, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0019, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 2.959163543105149, | |
| "grad_norm": 0.012932154349982738, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0021, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 3.008482935490235, | |
| "grad_norm": 0.029712537303566933, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 3.0578023278753204, | |
| "grad_norm": 0.022233586758375168, | |
| "learning_rate": 5e-05, | |
| "loss": 0.001, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 3.1071217202604062, | |
| "grad_norm": 0.04128130525350571, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 3.156441112645492, | |
| "grad_norm": 0.031225621700286865, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0012, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 3.205760505030578, | |
| "grad_norm": 0.04397075995802879, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 3.255079897415664, | |
| "grad_norm": 0.03178331255912781, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 3.3043992898007497, | |
| "grad_norm": 0.04911533370614052, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0012, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 3.3537186821858356, | |
| "grad_norm": 0.036495912820100784, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 3.4030380745709214, | |
| "grad_norm": 0.0601472444832325, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0012, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 3.4523574669560073, | |
| "grad_norm": 0.035126738250255585, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0014, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 3.501676859341093, | |
| "grad_norm": 0.021409129723906517, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0014, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 3.5509962517261786, | |
| "grad_norm": 0.01704758033156395, | |
| "learning_rate": 5e-05, | |
| "loss": 0.001, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 3.6003156441112645, | |
| "grad_norm": 0.03872734308242798, | |
| "learning_rate": 5e-05, | |
| "loss": 0.001, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 3.6496350364963503, | |
| "grad_norm": 0.030823158100247383, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0012, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 3.698954428881436, | |
| "grad_norm": 0.03794977068901062, | |
| "learning_rate": 5e-05, | |
| "loss": 0.001, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 3.748273821266522, | |
| "grad_norm": 0.042292773723602295, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0013, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 3.797593213651608, | |
| "grad_norm": 0.040720898658037186, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0015, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 3.8469126060366934, | |
| "grad_norm": 0.017985910177230835, | |
| "learning_rate": 5e-05, | |
| "loss": 0.001, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 3.8962319984217793, | |
| "grad_norm": 0.04241044074296951, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 3.945551390806865, | |
| "grad_norm": 0.02607624977827072, | |
| "learning_rate": 5e-05, | |
| "loss": 0.001, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 3.994870783191951, | |
| "grad_norm": 0.02784532681107521, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 40500 | |
| }, | |
| { | |
| "epoch": 4.044190175577037, | |
| "grad_norm": 0.03348594531416893, | |
| "learning_rate": 5e-05, | |
| "loss": 0.001, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 4.093509567962123, | |
| "grad_norm": 0.06848876923322678, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 41500 | |
| }, | |
| { | |
| "epoch": 4.142828960347209, | |
| "grad_norm": 0.023778628557920456, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 4.1921483527322945, | |
| "grad_norm": 0.014797426760196686, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 42500 | |
| }, | |
| { | |
| "epoch": 4.24146774511738, | |
| "grad_norm": 0.019765177741646767, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 4.290787137502466, | |
| "grad_norm": 0.03716424107551575, | |
| "learning_rate": 5e-05, | |
| "loss": 0.001, | |
| "step": 43500 | |
| }, | |
| { | |
| "epoch": 4.340106529887552, | |
| "grad_norm": 0.04304041340947151, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 4.389425922272638, | |
| "grad_norm": 0.07148012518882751, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 44500 | |
| }, | |
| { | |
| "epoch": 4.438745314657724, | |
| "grad_norm": 0.025658031925559044, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 4.48806470704281, | |
| "grad_norm": 0.03474525734782219, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0008, | |
| "step": 45500 | |
| }, | |
| { | |
| "epoch": 4.537384099427895, | |
| "grad_norm": 0.036050453782081604, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 4.5867034918129805, | |
| "grad_norm": 0.027342744171619415, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0012, | |
| "step": 46500 | |
| }, | |
| { | |
| "epoch": 4.636022884198066, | |
| "grad_norm": 0.03911557421088219, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0008, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 4.685342276583152, | |
| "grad_norm": 0.04079333692789078, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 47500 | |
| }, | |
| { | |
| "epoch": 4.734661668968238, | |
| "grad_norm": 0.03646528348326683, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0008, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 4.783981061353324, | |
| "grad_norm": 0.0461641363799572, | |
| "learning_rate": 5e-05, | |
| "loss": 0.001, | |
| "step": 48500 | |
| }, | |
| { | |
| "epoch": 4.83330045373841, | |
| "grad_norm": 0.050128430128097534, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 49000 | |
| }, | |
| { | |
| "epoch": 4.882619846123496, | |
| "grad_norm": 0.02025548741221428, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0008, | |
| "step": 49500 | |
| }, | |
| { | |
| "epoch": 4.931939238508582, | |
| "grad_norm": 0.03685050085186958, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0008, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 4.9812586308936675, | |
| "grad_norm": 0.024761617183685303, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0012, | |
| "step": 50500 | |
| }, | |
| { | |
| "epoch": 5.030578023278753, | |
| "grad_norm": 0.02904408425092697, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0012, | |
| "step": 51000 | |
| }, | |
| { | |
| "epoch": 5.079897415663839, | |
| "grad_norm": 0.026762697845697403, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 51500 | |
| }, | |
| { | |
| "epoch": 5.129216808048925, | |
| "grad_norm": 0.02316400595009327, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 52000 | |
| }, | |
| { | |
| "epoch": 5.178536200434011, | |
| "grad_norm": 0.01717398874461651, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 52500 | |
| }, | |
| { | |
| "epoch": 5.227855592819097, | |
| "grad_norm": 0.056246623396873474, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0008, | |
| "step": 53000 | |
| }, | |
| { | |
| "epoch": 5.277174985204182, | |
| "grad_norm": 0.030238812789320946, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0008, | |
| "step": 53500 | |
| }, | |
| { | |
| "epoch": 5.326494377589268, | |
| "grad_norm": 0.021439244970679283, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0008, | |
| "step": 54000 | |
| }, | |
| { | |
| "epoch": 5.375813769974354, | |
| "grad_norm": 0.01937209442257881, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0008, | |
| "step": 54500 | |
| }, | |
| { | |
| "epoch": 5.425133162359439, | |
| "grad_norm": 0.02833039127290249, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0008, | |
| "step": 55000 | |
| }, | |
| { | |
| "epoch": 5.474452554744525, | |
| "grad_norm": 0.013502424582839012, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 55500 | |
| }, | |
| { | |
| "epoch": 5.523771947129611, | |
| "grad_norm": 0.05730258300900459, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0008, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 5.573091339514697, | |
| "grad_norm": 0.031574029475450516, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 56500 | |
| }, | |
| { | |
| "epoch": 5.622410731899783, | |
| "grad_norm": 0.03838383033871651, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 57000 | |
| }, | |
| { | |
| "epoch": 5.671730124284869, | |
| "grad_norm": 0.027948297560214996, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 57500 | |
| }, | |
| { | |
| "epoch": 5.721049516669955, | |
| "grad_norm": 0.03251497074961662, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 58000 | |
| }, | |
| { | |
| "epoch": 5.7703689090550405, | |
| "grad_norm": 0.04168747738003731, | |
| "learning_rate": 5e-05, | |
| "loss": 0.001, | |
| "step": 58500 | |
| }, | |
| { | |
| "epoch": 5.819688301440126, | |
| "grad_norm": 0.02773544192314148, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 59000 | |
| }, | |
| { | |
| "epoch": 5.869007693825212, | |
| "grad_norm": 0.019883353263139725, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 59500 | |
| }, | |
| { | |
| "epoch": 5.918327086210298, | |
| "grad_norm": 0.03476933389902115, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 5.967646478595384, | |
| "grad_norm": 0.0135957645252347, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 60500 | |
| }, | |
| { | |
| "epoch": 6.01696587098047, | |
| "grad_norm": 0.016737064346671104, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 61000 | |
| }, | |
| { | |
| "epoch": 6.066285263365556, | |
| "grad_norm": 0.0470641553401947, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0008, | |
| "step": 61500 | |
| }, | |
| { | |
| "epoch": 6.115604655750641, | |
| "grad_norm": 0.037444982677698135, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 62000 | |
| }, | |
| { | |
| "epoch": 6.164924048135727, | |
| "grad_norm": 0.03800535574555397, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 62500 | |
| }, | |
| { | |
| "epoch": 6.2142434405208125, | |
| "grad_norm": 0.02700197882950306, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 63000 | |
| }, | |
| { | |
| "epoch": 6.263562832905898, | |
| "grad_norm": 0.03783607482910156, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0008, | |
| "step": 63500 | |
| }, | |
| { | |
| "epoch": 6.312882225290984, | |
| "grad_norm": 0.03808404505252838, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 64000 | |
| }, | |
| { | |
| "epoch": 6.36220161767607, | |
| "grad_norm": 0.016872331500053406, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 64500 | |
| }, | |
| { | |
| "epoch": 6.411521010061156, | |
| "grad_norm": 0.032465968281030655, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0005, | |
| "step": 65000 | |
| }, | |
| { | |
| "epoch": 6.460840402446242, | |
| "grad_norm": 0.007632725406438112, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 65500 | |
| }, | |
| { | |
| "epoch": 6.510159794831328, | |
| "grad_norm": 0.021445443853735924, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 66000 | |
| }, | |
| { | |
| "epoch": 6.5594791872164135, | |
| "grad_norm": 0.006442953832447529, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 66500 | |
| }, | |
| { | |
| "epoch": 6.608798579601499, | |
| "grad_norm": 0.013008514419198036, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 67000 | |
| }, | |
| { | |
| "epoch": 6.658117971986585, | |
| "grad_norm": 0.010181767866015434, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 67500 | |
| }, | |
| { | |
| "epoch": 6.707437364371671, | |
| "grad_norm": 0.03756224736571312, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 68000 | |
| }, | |
| { | |
| "epoch": 6.756756756756757, | |
| "grad_norm": 0.04266593977808952, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 68500 | |
| }, | |
| { | |
| "epoch": 6.806076149141843, | |
| "grad_norm": 0.020077640190720558, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0008, | |
| "step": 69000 | |
| }, | |
| { | |
| "epoch": 6.855395541526929, | |
| "grad_norm": 0.017746519297361374, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 69500 | |
| }, | |
| { | |
| "epoch": 6.904714933912015, | |
| "grad_norm": 0.030025839805603027, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 70000 | |
| }, | |
| { | |
| "epoch": 6.904714933912015, | |
| "eval_all_ligands_equal": 0.5933073654390935, | |
| "eval_e3_equal": 0.8271954674220963, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9917669971671388, | |
| "eval_e3_heavy_atoms_difference": 0.4047450424929179, | |
| "eval_e3_heavy_atoms_difference_norm": 0.00843519659565475, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9917669971671388, | |
| "eval_has_all_attachment_points": 0.990350566572238, | |
| "eval_has_three_substructures": 0.9999114730878187, | |
| "eval_heavy_atoms_difference": 5.034082861189802, | |
| "eval_heavy_atoms_difference_norm": 0.06779925037570428, | |
| "eval_linker_equal": 0.84782223796034, | |
| "eval_linker_graph_edit_distance": 2.5672804532577904e+61, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.9974327195467422, | |
| "eval_linker_heavy_atoms_difference": 0.17236189801699717, | |
| "eval_linker_heavy_atoms_difference_norm": 0.0012798111547145205, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9974327195467422, | |
| "eval_loss": 0.3657114505767822, | |
| "eval_num_fragments": 2.9999114730878187, | |
| "eval_poi_equal": 0.7852337110481586, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9508675637393768, | |
| "eval_poi_heavy_atoms_difference": 1.4603399433427762, | |
| "eval_poi_heavy_atoms_difference_norm": 0.046673695753555136, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9508675637393768, | |
| "eval_reassembly": 0.6011862606232294, | |
| "eval_reassembly_nostereo": 0.6374822946175638, | |
| "eval_runtime": 2302.2232, | |
| "eval_samples_per_second": 4.907, | |
| "eval_steps_per_second": 0.077, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9407754957507082, | |
| "step": 70000 | |
| }, | |
| { | |
| "epoch": 6.9540343262971, | |
| "grad_norm": 0.03940337523818016, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 70500 | |
| }, | |
| { | |
| "epoch": 7.0033537186821855, | |
| "grad_norm": 0.013938716612756252, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 71000 | |
| }, | |
| { | |
| "epoch": 7.052673111067271, | |
| "grad_norm": 0.011412853375077248, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 71500 | |
| }, | |
| { | |
| "epoch": 7.101992503452357, | |
| "grad_norm": 0.01979513093829155, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 72000 | |
| }, | |
| { | |
| "epoch": 7.151311895837443, | |
| "grad_norm": 0.01855366677045822, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 72500 | |
| }, | |
| { | |
| "epoch": 7.151311895837443, | |
| "eval_all_ligands_equal": 0.5905630311614731, | |
| "eval_e3_equal": 0.8244511331444759, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9896423512747875, | |
| "eval_e3_heavy_atoms_difference": 0.41324362606232296, | |
| "eval_e3_heavy_atoms_difference_norm": 0.009797145206793536, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9896423512747875, | |
| "eval_has_all_attachment_points": 0.9855701133144475, | |
| "eval_has_three_substructures": 0.9999114730878187, | |
| "eval_heavy_atoms_difference": 5.1041961756373935, | |
| "eval_heavy_atoms_difference_norm": 0.06840612220299803, | |
| "eval_linker_equal": 0.8442811614730878, | |
| "eval_linker_graph_edit_distance": Infinity, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.9977868271954674, | |
| "eval_linker_heavy_atoms_difference": 0.13225920679886685, | |
| "eval_linker_heavy_atoms_difference_norm": -0.003350444216765269, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9977868271954674, | |
| "eval_loss": 0.36827388405799866, | |
| "eval_num_fragments": 3.0000885269121813, | |
| "eval_poi_equal": 0.7870042492917847, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9498937677053825, | |
| "eval_poi_heavy_atoms_difference": 1.4326310198300283, | |
| "eval_poi_heavy_atoms_difference_norm": 0.047336977702972384, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9498937677053825, | |
| "eval_reassembly": 0.5973796033994334, | |
| "eval_reassembly_nostereo": 0.6340297450424929, | |
| "eval_runtime": 2309.4187, | |
| "eval_samples_per_second": 4.891, | |
| "eval_steps_per_second": 0.077, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9390049575070821, | |
| "step": 72500 | |
| }, | |
| { | |
| "epoch": 7.200631288222529, | |
| "grad_norm": 0.009863517247140408, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 73000 | |
| }, | |
| { | |
| "epoch": 7.249950680607615, | |
| "grad_norm": 0.03714962303638458, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 73500 | |
| }, | |
| { | |
| "epoch": 7.299270072992701, | |
| "grad_norm": 0.016814785078167915, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 74000 | |
| }, | |
| { | |
| "epoch": 7.348589465377787, | |
| "grad_norm": 0.012567018158733845, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 74500 | |
| }, | |
| { | |
| "epoch": 7.397908857762872, | |
| "grad_norm": 0.012865165248513222, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 75000 | |
| }, | |
| { | |
| "epoch": 7.397908857762872, | |
| "eval_all_ligands_equal": 0.5893236543909348, | |
| "eval_e3_equal": 0.8291430594900849, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9945113314447592, | |
| "eval_e3_heavy_atoms_difference": 0.391643059490085, | |
| "eval_e3_heavy_atoms_difference_norm": 0.011925543938631603, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9945113314447592, | |
| "eval_has_all_attachment_points": 0.990350566572238, | |
| "eval_has_three_substructures": 0.9992917847025495, | |
| "eval_heavy_atoms_difference": 4.8410056657223794, | |
| "eval_heavy_atoms_difference_norm": 0.06490009063872229, | |
| "eval_linker_equal": 0.8384383852691218, | |
| "eval_linker_graph_edit_distance": Infinity, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.9958392351274787, | |
| "eval_linker_heavy_atoms_difference": 0.2242386685552408, | |
| "eval_linker_heavy_atoms_difference_norm": 0.002240399364420046, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9958392351274787, | |
| "eval_loss": 0.3680249750614166, | |
| "eval_num_fragments": 2.9992917847025495, | |
| "eval_poi_equal": 0.7831090651558074, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9505134560906515, | |
| "eval_poi_heavy_atoms_difference": 1.3942103399433428, | |
| "eval_poi_heavy_atoms_difference_norm": 0.044281004444645264, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9505134560906515, | |
| "eval_reassembly": 0.5965828611898017, | |
| "eval_reassembly_nostereo": 0.6320821529745042, | |
| "eval_runtime": 2316.3543, | |
| "eval_samples_per_second": 4.877, | |
| "eval_steps_per_second": 0.076, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9429001416430595, | |
| "step": 75000 | |
| }, | |
| { | |
| "epoch": 7.447228250147958, | |
| "grad_norm": 0.044476717710494995, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 75500 | |
| }, | |
| { | |
| "epoch": 7.496547642533044, | |
| "grad_norm": 0.026762189343571663, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 76000 | |
| }, | |
| { | |
| "epoch": 7.54586703491813, | |
| "grad_norm": 0.01543725747615099, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 76500 | |
| }, | |
| { | |
| "epoch": 7.595186427303216, | |
| "grad_norm": 0.03706732764840126, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0009, | |
| "step": 77000 | |
| }, | |
| { | |
| "epoch": 7.644505819688302, | |
| "grad_norm": 0.018150992691516876, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 77500 | |
| }, | |
| { | |
| "epoch": 7.644505819688302, | |
| "eval_all_ligands_equal": 0.5914483002832861, | |
| "eval_e3_equal": 0.8292315864022662, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9844192634560907, | |
| "eval_e3_heavy_atoms_difference": 0.6906869688385269, | |
| "eval_e3_heavy_atoms_difference_norm": 0.023107174439915137, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9844192634560907, | |
| "eval_has_all_attachment_points": 0.9919440509915014, | |
| "eval_has_three_substructures": 1.0, | |
| "eval_heavy_atoms_difference": 5.788597733711049, | |
| "eval_heavy_atoms_difference_norm": 0.07756553947808909, | |
| "eval_linker_equal": 0.8436614730878187, | |
| "eval_linker_graph_edit_distance": 1.2393767705382436e+61, | |
| "eval_linker_graph_edit_distance_norm": 0.02975442150108382, | |
| "eval_linker_has_attachment_point(s)": 0.9987606232294618, | |
| "eval_linker_heavy_atoms_difference": 0.2759383852691218, | |
| "eval_linker_heavy_atoms_difference_norm": 0.005473719712206602, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9987606232294618, | |
| "eval_loss": 0.3599880337715149, | |
| "eval_num_fragments": 3.0, | |
| "eval_poi_equal": 0.7856763456090652, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9498937677053825, | |
| "eval_poi_heavy_atoms_difference": 1.5098264872521245, | |
| "eval_poi_heavy_atoms_difference_norm": 0.04864452944775116, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9498937677053825, | |
| "eval_reassembly": 0.5994157223796034, | |
| "eval_reassembly_nostereo": 0.6316395184135978, | |
| "eval_runtime": 2294.7006, | |
| "eval_samples_per_second": 4.923, | |
| "eval_steps_per_second": 0.077, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9340474504249292, | |
| "step": 77500 | |
| }, | |
| { | |
| "epoch": 7.693825212073387, | |
| "grad_norm": 0.01304660364985466, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0005, | |
| "step": 78000 | |
| }, | |
| { | |
| "epoch": 7.7431446044584735, | |
| "grad_norm": 0.0160418301820755, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 78500 | |
| }, | |
| { | |
| "epoch": 7.7924639968435585, | |
| "grad_norm": 0.014610692858695984, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 79000 | |
| }, | |
| { | |
| "epoch": 7.841783389228644, | |
| "grad_norm": 0.01852056384086609, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 79500 | |
| }, | |
| { | |
| "epoch": 7.89110278161373, | |
| "grad_norm": 0.027739811688661575, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 80000 | |
| }, | |
| { | |
| "epoch": 7.89110278161373, | |
| "eval_all_ligands_equal": 0.5888810198300283, | |
| "eval_e3_equal": 0.8214412181303116, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9830028328611898, | |
| "eval_e3_heavy_atoms_difference": 0.5424043909348442, | |
| "eval_e3_heavy_atoms_difference_norm": 0.01575299549507441, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9830028328611898, | |
| "eval_has_all_attachment_points": 0.9889341359773371, | |
| "eval_has_three_substructures": 0.9995573654390935, | |
| "eval_heavy_atoms_difference": 5.354284702549575, | |
| "eval_heavy_atoms_difference_norm": 0.07234431726909056, | |
| "eval_linker_equal": 0.8450779036827195, | |
| "eval_linker_graph_edit_distance": 3.629603399433428e+61, | |
| "eval_linker_graph_edit_distance_norm": 0.029242047752320765, | |
| "eval_linker_has_attachment_point(s)": 0.9963703966005666, | |
| "eval_linker_heavy_atoms_difference": 0.20573654390934845, | |
| "eval_linker_heavy_atoms_difference_norm": 0.0011033908121370318, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9963703966005666, | |
| "eval_loss": 0.34423062205314636, | |
| "eval_num_fragments": 2.999734419263456, | |
| "eval_poi_equal": 0.7865616147308782, | |
| "eval_poi_graph_edit_distance": 4.727337110481586e+62, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9527266288951841, | |
| "eval_poi_heavy_atoms_difference": 1.464058073654391, | |
| "eval_poi_heavy_atoms_difference_norm": 0.045003402378614825, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9527266288951841, | |
| "eval_reassembly": 0.5959631728045326, | |
| "eval_reassembly_nostereo": 0.6296919263456091, | |
| "eval_runtime": 2330.481, | |
| "eval_samples_per_second": 4.847, | |
| "eval_steps_per_second": 0.076, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9351983002832861, | |
| "step": 80000 | |
| }, | |
| { | |
| "epoch": 7.940422173998816, | |
| "grad_norm": 0.032377783209085464, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 80500 | |
| }, | |
| { | |
| "epoch": 7.989741566383902, | |
| "grad_norm": 0.05784532427787781, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 81000 | |
| }, | |
| { | |
| "epoch": 8.039060958768989, | |
| "grad_norm": 0.015815122053027153, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 81500 | |
| }, | |
| { | |
| "epoch": 8.088380351154074, | |
| "grad_norm": 0.04601709917187691, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 82000 | |
| }, | |
| { | |
| "epoch": 8.137699743539159, | |
| "grad_norm": 0.03440980985760689, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 82500 | |
| }, | |
| { | |
| "epoch": 8.137699743539159, | |
| "eval_all_ligands_equal": 0.5929532577903682, | |
| "eval_e3_equal": 0.8276381019830028, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9927407932011332, | |
| "eval_e3_heavy_atoms_difference": 0.4409525495750708, | |
| "eval_e3_heavy_atoms_difference_norm": 0.012459579597145226, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9927407932011332, | |
| "eval_has_all_attachment_points": 0.9905276203966006, | |
| "eval_has_three_substructures": 1.0, | |
| "eval_heavy_atoms_difference": 4.797273371104816, | |
| "eval_heavy_atoms_difference_norm": 0.0636371430002183, | |
| "eval_linker_equal": 0.8479107648725213, | |
| "eval_linker_graph_edit_distance": Infinity, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.9975212464589235, | |
| "eval_linker_heavy_atoms_difference": 0.2184844192634561, | |
| "eval_linker_heavy_atoms_difference_norm": 0.0032624460089282054, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9975212464589235, | |
| "eval_loss": 0.3474595844745636, | |
| "eval_num_fragments": 3.0, | |
| "eval_poi_equal": 0.7876239376770539, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9552053824362606, | |
| "eval_poi_heavy_atoms_difference": 1.4236012747875355, | |
| "eval_poi_heavy_atoms_difference_norm": 0.04291573465848191, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9552053824362606, | |
| "eval_reassembly": 0.6003895184135978, | |
| "eval_reassembly_nostereo": 0.6358002832861189, | |
| "eval_runtime": 2308.8139, | |
| "eval_samples_per_second": 4.893, | |
| "eval_steps_per_second": 0.077, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9459100566572238, | |
| "step": 82500 | |
| }, | |
| { | |
| "epoch": 8.187019135924245, | |
| "grad_norm": 0.019819585606455803, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 83000 | |
| }, | |
| { | |
| "epoch": 8.23633852830933, | |
| "grad_norm": 0.019882431253790855, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 83500 | |
| }, | |
| { | |
| "epoch": 8.285657920694417, | |
| "grad_norm": 0.019609618932008743, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0005, | |
| "step": 84000 | |
| }, | |
| { | |
| "epoch": 8.334977313079502, | |
| "grad_norm": 0.019066104665398598, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 84500 | |
| }, | |
| { | |
| "epoch": 8.384296705464589, | |
| "grad_norm": 0.04364117607474327, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 85000 | |
| }, | |
| { | |
| "epoch": 8.384296705464589, | |
| "eval_all_ligands_equal": 0.5923335694050992, | |
| "eval_e3_equal": 0.8279036827195467, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9934490084985835, | |
| "eval_e3_heavy_atoms_difference": 0.4311260623229462, | |
| "eval_e3_heavy_atoms_difference_norm": 0.010632867247124637, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9934490084985835, | |
| "eval_has_all_attachment_points": 0.9875177053824362, | |
| "eval_has_three_substructures": 0.9999114730878187, | |
| "eval_heavy_atoms_difference": 5.205116855524079, | |
| "eval_heavy_atoms_difference_norm": 0.06920894495644549, | |
| "eval_linker_equal": 0.8379957507082153, | |
| "eval_linker_graph_edit_distance": 2.301699716713881e+61, | |
| "eval_linker_graph_edit_distance_norm": 0.029033307034527205, | |
| "eval_linker_has_attachment_point(s)": 0.9976983002832861, | |
| "eval_linker_heavy_atoms_difference": 0.24530807365439095, | |
| "eval_linker_heavy_atoms_difference_norm": 0.0043104668859274544, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9976983002832861, | |
| "eval_loss": 0.3690963089466095, | |
| "eval_num_fragments": 3.0000885269121813, | |
| "eval_poi_equal": 0.7858533994334278, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9467067988668555, | |
| "eval_poi_heavy_atoms_difference": 1.4565332861189801, | |
| "eval_poi_heavy_atoms_difference_norm": 0.04493219380856177, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9467067988668555, | |
| "eval_reassembly": 0.5997698300283286, | |
| "eval_reassembly_nostereo": 0.6350920679886686, | |
| "eval_runtime": 2356.0254, | |
| "eval_samples_per_second": 4.795, | |
| "eval_steps_per_second": 0.075, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9382967422096318, | |
| "step": 85000 | |
| }, | |
| { | |
| "epoch": 8.433616097849674, | |
| "grad_norm": 0.039685726165771484, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0005, | |
| "step": 85500 | |
| }, | |
| { | |
| "epoch": 8.48293549023476, | |
| "grad_norm": 0.03857019916176796, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 86000 | |
| }, | |
| { | |
| "epoch": 8.532254882619846, | |
| "grad_norm": 0.009983392432332039, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 86500 | |
| }, | |
| { | |
| "epoch": 8.581574275004932, | |
| "grad_norm": 0.016462204977869987, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 87000 | |
| }, | |
| { | |
| "epoch": 8.630893667390017, | |
| "grad_norm": 0.04031758010387421, | |
| "learning_rate": 5e-05, | |
| "loss": 0.001, | |
| "step": 87500 | |
| }, | |
| { | |
| "epoch": 8.630893667390017, | |
| "eval_all_ligands_equal": 0.5933958923512748, | |
| "eval_e3_equal": 0.8293201133144475, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9881373937677054, | |
| "eval_e3_heavy_atoms_difference": 0.6075601983002833, | |
| "eval_e3_heavy_atoms_difference_norm": 0.017386111936693323, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9881373937677054, | |
| "eval_has_all_attachment_points": 0.9930063739376771, | |
| "eval_has_three_substructures": 0.9997344192634561, | |
| "eval_heavy_atoms_difference": 5.84985835694051, | |
| "eval_heavy_atoms_difference_norm": 0.07722163878655638, | |
| "eval_linker_equal": 0.8428647308781869, | |
| "eval_linker_graph_edit_distance": 2.832861189801699e+61, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.9971671388101983, | |
| "eval_linker_heavy_atoms_difference": 0.2046742209631728, | |
| "eval_linker_heavy_atoms_difference_norm": -0.0008778341533898135, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9971671388101983, | |
| "eval_loss": 0.35258108377456665, | |
| "eval_num_fragments": 2.999734419263456, | |
| "eval_poi_equal": 0.78328611898017, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9449362606232294, | |
| "eval_poi_heavy_atoms_difference": 1.7052939093484418, | |
| "eval_poi_heavy_atoms_difference_norm": 0.05204069250730441, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9449362606232294, | |
| "eval_reassembly": 0.6005665722379604, | |
| "eval_reassembly_nostereo": 0.6351805949008499, | |
| "eval_runtime": 2282.6691, | |
| "eval_samples_per_second": 4.949, | |
| "eval_steps_per_second": 0.078, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9323654390934845, | |
| "step": 87500 | |
| }, | |
| { | |
| "epoch": 8.680213059775104, | |
| "grad_norm": 0.00966018158942461, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0007, | |
| "step": 88000 | |
| }, | |
| { | |
| "epoch": 8.72953245216019, | |
| "grad_norm": 0.033011872321367264, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0005, | |
| "step": 88500 | |
| }, | |
| { | |
| "epoch": 8.778851844545276, | |
| "grad_norm": 0.030984072014689445, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 89000 | |
| }, | |
| { | |
| "epoch": 8.82817123693036, | |
| "grad_norm": 0.026483656838536263, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0006, | |
| "step": 89500 | |
| }, | |
| { | |
| "epoch": 8.877490629315448, | |
| "grad_norm": 0.02804381400346756, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0011, | |
| "step": 90000 | |
| }, | |
| { | |
| "epoch": 8.877490629315448, | |
| "eval_all_ligands_equal": 0.595520538243626, | |
| "eval_e3_equal": 0.8275495750708215, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9827372521246459, | |
| "eval_e3_heavy_atoms_difference": 0.7811614730878187, | |
| "eval_e3_heavy_atoms_difference_norm": 0.02834301211747189, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9827372521246459, | |
| "eval_has_all_attachment_points": 0.9923866855524079, | |
| "eval_has_three_substructures": 0.9995573654390935, | |
| "eval_heavy_atoms_difference": 5.942457507082153, | |
| "eval_heavy_atoms_difference_norm": 0.08051500387133764, | |
| "eval_linker_equal": 0.8484419263456091, | |
| "eval_linker_graph_edit_distance": 3.0984419263456094e+61, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.9969015580736544, | |
| "eval_linker_heavy_atoms_difference": 0.292050283286119, | |
| "eval_linker_heavy_atoms_difference_norm": 0.006916113507416433, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9969015580736544, | |
| "eval_loss": 0.3571625053882599, | |
| "eval_num_fragments": 2.9995573654390935, | |
| "eval_poi_equal": 0.7940864022662889, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9503364022662889, | |
| "eval_poi_heavy_atoms_difference": 1.5164660056657224, | |
| "eval_poi_heavy_atoms_difference_norm": 0.04747549331915556, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9503364022662889, | |
| "eval_reassembly": 0.6022485835694051, | |
| "eval_reassembly_nostereo": 0.6323477337110481, | |
| "eval_runtime": 2303.5105, | |
| "eval_samples_per_second": 4.904, | |
| "eval_steps_per_second": 0.077, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9310375354107648, | |
| "step": 90000 | |
| }, | |
| { | |
| "epoch": 8.926810021700533, | |
| "grad_norm": 0.007490255404263735, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0004, | |
| "step": 90500 | |
| }, | |
| { | |
| "epoch": 8.97612941408562, | |
| "grad_norm": 0.019985361024737358, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0004, | |
| "step": 91000 | |
| }, | |
| { | |
| "epoch": 9.025448806470704, | |
| "grad_norm": 0.011463545262813568, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 91500 | |
| }, | |
| { | |
| "epoch": 9.07476819885579, | |
| "grad_norm": 0.002497961511835456, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 92000 | |
| }, | |
| { | |
| "epoch": 9.124087591240876, | |
| "grad_norm": 0.0028062395285815, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 92500 | |
| }, | |
| { | |
| "epoch": 9.124087591240876, | |
| "eval_all_ligands_equal": 0.6010977337110481, | |
| "eval_e3_equal": 0.8302053824362606, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9813208215297451, | |
| "eval_e3_heavy_atoms_difference": 0.8631373937677054, | |
| "eval_e3_heavy_atoms_difference_norm": 0.03305171358393188, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9813208215297451, | |
| "eval_has_all_attachment_points": 0.9928293201133145, | |
| "eval_has_three_substructures": 0.9996458923512748, | |
| "eval_heavy_atoms_difference": 5.261242917847025, | |
| "eval_heavy_atoms_difference_norm": 0.07232162750734558, | |
| "eval_linker_equal": 0.859242209631728, | |
| "eval_linker_graph_edit_distance": Infinity, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.9978753541076487, | |
| "eval_linker_heavy_atoms_difference": 0.1762570821529745, | |
| "eval_linker_heavy_atoms_difference_norm": 0.00016629646986268825, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9978753541076487, | |
| "eval_loss": 0.357794851064682, | |
| "eval_num_fragments": 2.9998229461756374, | |
| "eval_poi_equal": 0.7982471671388102, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9594546742209632, | |
| "eval_poi_heavy_atoms_difference": 1.2296388101983002, | |
| "eval_poi_heavy_atoms_difference_norm": 0.03919380504292805, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9594546742209632, | |
| "eval_reassembly": 0.6080028328611898, | |
| "eval_reassembly_nostereo": 0.6372167138810199, | |
| "eval_runtime": 2303.9503, | |
| "eval_samples_per_second": 4.903, | |
| "eval_steps_per_second": 0.077, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9396246458923513, | |
| "step": 92500 | |
| }, | |
| { | |
| "epoch": 9.173406983625961, | |
| "grad_norm": 0.031869933009147644, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 93000 | |
| }, | |
| { | |
| "epoch": 9.222726376011048, | |
| "grad_norm": 0.0030138411093503237, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 93500 | |
| }, | |
| { | |
| "epoch": 9.272045768396133, | |
| "grad_norm": 0.0038213497027754784, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 94000 | |
| }, | |
| { | |
| "epoch": 9.32136516078122, | |
| "grad_norm": 0.005241777747869492, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 94500 | |
| }, | |
| { | |
| "epoch": 9.370684553166305, | |
| "grad_norm": 0.0022596395574510098, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 95000 | |
| }, | |
| { | |
| "epoch": 9.370684553166305, | |
| "eval_all_ligands_equal": 0.6000354107648725, | |
| "eval_e3_equal": 0.8310021246458924, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9879603399433428, | |
| "eval_e3_heavy_atoms_difference": 0.6214589235127479, | |
| "eval_e3_heavy_atoms_difference_norm": 0.020972715432390276, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9879603399433428, | |
| "eval_has_all_attachment_points": 0.9925637393767706, | |
| "eval_has_three_substructures": 0.9997344192634561, | |
| "eval_heavy_atoms_difference": 4.6483711048158645, | |
| "eval_heavy_atoms_difference_norm": 0.0627018245353709, | |
| "eval_linker_equal": 0.8579143059490085, | |
| "eval_linker_graph_edit_distance": 2.1246458923512748e+61, | |
| "eval_linker_graph_edit_distance_norm": 0.02610823235680867, | |
| "eval_linker_has_attachment_point(s)": 0.9978753541076487, | |
| "eval_linker_heavy_atoms_difference": 0.1959100566572238, | |
| "eval_linker_heavy_atoms_difference_norm": 0.0018271796307546642, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9978753541076487, | |
| "eval_loss": 0.3646354079246521, | |
| "eval_num_fragments": 2.999734419263456, | |
| "eval_poi_equal": 0.7988668555240793, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9614022662889519, | |
| "eval_poi_heavy_atoms_difference": 1.1566926345609065, | |
| "eval_poi_heavy_atoms_difference_norm": 0.03523055603665878, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9614022662889519, | |
| "eval_reassembly": 0.6070290368271954, | |
| "eval_reassembly_nostereo": 0.6392528328611898, | |
| "eval_runtime": 2308.4142, | |
| "eval_samples_per_second": 4.893, | |
| "eval_steps_per_second": 0.077, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9479461756373938, | |
| "step": 95000 | |
| }, | |
| { | |
| "epoch": 9.420003945551391, | |
| "grad_norm": 0.0013870379189029336, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 95500 | |
| }, | |
| { | |
| "epoch": 9.469323337936476, | |
| "grad_norm": 0.0017357923788949847, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 96000 | |
| }, | |
| { | |
| "epoch": 9.518642730321563, | |
| "grad_norm": 0.0026179002597928047, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 96500 | |
| }, | |
| { | |
| "epoch": 9.567962122706648, | |
| "grad_norm": 0.007731061894446611, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 97000 | |
| }, | |
| { | |
| "epoch": 9.617281515091735, | |
| "grad_norm": 0.004102593753486872, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 97500 | |
| }, | |
| { | |
| "epoch": 9.617281515091735, | |
| "eval_all_ligands_equal": 0.6024256373937678, | |
| "eval_e3_equal": 0.8306480169971672, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9885800283286119, | |
| "eval_e3_heavy_atoms_difference": 0.6768767705382436, | |
| "eval_e3_heavy_atoms_difference_norm": 0.023710215006293524, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9885800283286119, | |
| "eval_has_all_attachment_points": 0.9922981586402266, | |
| "eval_has_three_substructures": 0.9996458923512748, | |
| "eval_heavy_atoms_difference": 4.722556657223796, | |
| "eval_heavy_atoms_difference_norm": 0.06399858226310057, | |
| "eval_linker_equal": 0.8600389518413598, | |
| "eval_linker_graph_edit_distance": 1.7705382436260623e+61, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.998229461756374, | |
| "eval_linker_heavy_atoms_difference": 0.1565155807365439, | |
| "eval_linker_heavy_atoms_difference_norm": -0.001739679321310486, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.998229461756374, | |
| "eval_loss": 0.37079304456710815, | |
| "eval_num_fragments": 2.9996458923512748, | |
| "eval_poi_equal": 0.7978045325779037, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.9598087818696884, | |
| "eval_poi_heavy_atoms_difference": 1.1905984419263456, | |
| "eval_poi_heavy_atoms_difference_norm": 0.0370225567595165, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9598087818696884, | |
| "eval_reassembly": 0.6089766288951841, | |
| "eval_reassembly_nostereo": 0.6402266288951841, | |
| "eval_runtime": 2313.2584, | |
| "eval_samples_per_second": 4.883, | |
| "eval_steps_per_second": 0.077, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9477691218130312, | |
| "step": 97500 | |
| }, | |
| { | |
| "epoch": 9.66660090747682, | |
| "grad_norm": 0.0008252999396063387, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 98000 | |
| }, | |
| { | |
| "epoch": 9.715920299861907, | |
| "grad_norm": 0.007616125047206879, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 98500 | |
| }, | |
| { | |
| "epoch": 9.765239692246992, | |
| "grad_norm": 0.007242043036967516, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 99000 | |
| }, | |
| { | |
| "epoch": 9.814559084632076, | |
| "grad_norm": 0.0036465292796492577, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 99500 | |
| }, | |
| { | |
| "epoch": 9.863878477017163, | |
| "grad_norm": 0.0049673086032271385, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0003, | |
| "step": 100000 | |
| }, | |
| { | |
| "epoch": 9.863878477017163, | |
| "eval_all_ligands_equal": 0.6034879603399433, | |
| "eval_e3_equal": 0.8302939093484419, | |
| "eval_e3_graph_edit_distance": Infinity, | |
| "eval_e3_graph_edit_distance_norm": Infinity, | |
| "eval_e3_has_attachment_point(s)": 0.9926522662889519, | |
| "eval_e3_heavy_atoms_difference": 0.6158817280453258, | |
| "eval_e3_heavy_atoms_difference_norm": 0.02077531345945583, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9926522662889519, | |
| "eval_has_all_attachment_points": 0.9933604815864022, | |
| "eval_has_three_substructures": 0.9997344192634561, | |
| "eval_heavy_atoms_difference": 4.250354107648725, | |
| "eval_heavy_atoms_difference_norm": 0.057146879563550336, | |
| "eval_linker_equal": 0.8640226628895185, | |
| "eval_linker_graph_edit_distance": 1.593484419263456e+61, | |
| "eval_linker_graph_edit_distance_norm": Infinity, | |
| "eval_linker_has_attachment_point(s)": 0.9984065155807366, | |
| "eval_linker_heavy_atoms_difference": 0.13544617563739378, | |
| "eval_linker_heavy_atoms_difference_norm": -0.0024512414387051758, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9984065155807366, | |
| "eval_loss": 0.36921027302742004, | |
| "eval_num_fragments": 2.999734419263456, | |
| "eval_poi_equal": 0.7986898016997167, | |
| "eval_poi_graph_edit_distance": Infinity, | |
| "eval_poi_graph_edit_distance_norm": Infinity, | |
| "eval_poi_has_attachment_point(s)": 0.962907223796034, | |
| "eval_poi_heavy_atoms_difference": 1.0906515580736544, | |
| "eval_poi_heavy_atoms_difference_norm": 0.0332226888864283, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.962907223796034, | |
| "eval_reassembly": 0.6101274787535411, | |
| "eval_reassembly_nostereo": 0.6440332861189801, | |
| "eval_runtime": 2315.354, | |
| "eval_samples_per_second": 4.879, | |
| "eval_steps_per_second": 0.076, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.955028328611898, | |
| "step": 100000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 100000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 5000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.728946664336123e+17, | |
| "train_batch_size": 128, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |