code stringlengths 17 6.64M |
|---|
def maxpool_1_4():
pool = nn.MaxPool2d(kernel_size=2, stride=4, padding=0)
return pool
|
def maxpool_1_8():
pool = nn.MaxPool2d(kernel_size=2, stride=8, padding=0)
return pool
|
def maxpool_1_16():
pool = nn.MaxPool2d(kernel_size=2, stride=16, padding=0)
return pool
|
def maxpool_1_32():
pool = nn.MaxPool2d(kernel_size=2, stride=32, padding=0)
|
def conv_block_3(in_dim, out_dim, act_fn):
model = nn.Sequential(conv_block(in_dim, out_dim, act_fn), conv_block(out_dim, out_dim, act_fn), nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(out_dim))
return model
|
def classificationNet(D_in):
H = 400
D_out = 1
model = torch.nn.Sequential(torch.nn.Linear(D_in, H), torch.nn.ReLU(), torch.nn.Linear(H, int((H / 4))), torch.nn.ReLU(), torch.nn.Linear(int((H / 4)), D_out))
return model
|
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='=', empty=' ', tip='>', begin='[', end=']', done='[DONE]', clear=True):
'\n Print iterations progress.\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration [int]\n total - Required : total iterations [int]\n prefix - Optional : prefix string [str]\n suffix - Optional : suffix string [str]\n decimals - Optional : positive number of decimals in percent [int]\n length - Optional : character length of bar [int]\n fill - Optional : bar fill character [str] (ex: \'â– \', \'â–ˆ\', \'#\', \'=\')\n empty - Optional : not filled bar character [str] (ex: \'-\', \' \', \'•\')\n tip - Optional : character at the end of the fill bar [str] (ex: \'>\', \'\')\n begin - Optional : starting bar character [str] (ex: \'|\', \'â–•\', \'[\')\n end - Optional : ending bar character [str] (ex: \'|\', \'â–\x8f\', \']\')\n done - Optional : display message when 100% is reached [str] (ex: "[DONE]")\n clear - Optional : display completion message or leave as is [str]\n '
percent = (('{0:.' + str(decimals)) + 'f}').format((100 * (iteration / float(total))))
filledLength = int(((length * iteration) // total))
bar = (fill * filledLength)
if (iteration != total):
bar = (bar + tip)
bar = (bar + (empty * ((length - filledLength) - len(tip))))
display = '\r{prefix}{begin}{bar}{end} {percent}%{suffix}'.format(prefix=prefix, begin=begin, bar=bar, end=end, percent=percent, suffix=suffix)
(print(display, end=''),)
if (iteration == total):
if clear:
finish = '\r{prefix}{done}'.format(prefix=prefix, done=done)
if hasattr(str, 'decode'):
finish = finish.decode('utf-8')
display = display.decode('utf-8')
clear = (' ' * max((len(display) - len(finish)), 0))
print((finish + clear))
else:
print('')
|
def verbose(verboseLevel, requiredLevel, printFunc=print, *printArgs, **kwPrintArgs):
'\n Calls `printFunc` passing it `printArgs` and `kwPrintArgs`\n only if `verboseLevel` meets the `requiredLevel` of verbosity.\n\n Following forms are supported:\n\n > verbose(1, 0, "message")\n\n >> message\n\n > verbose(1, 0, "message1", "message2")\n\n >> message1 message2\n\n > verbose(1, 2, "message")\n\n >> <nothing since verbosity level not high enough>\n\n > verbose(1, 1, lambda x: print(\'MSG: \' + x), \'message\')\n\n >> MSG: message\n\n > def myprint(x, y="msg_y", z=True): print(\'MSG_Y: \' + y) if z else print(\'MSG_X: \' + x)\n > verbose(1, 1, myprint, "msg_x", "msg_y")\n\n >> MSG_Y: msg_y\n\n > verbose(1, 1, myprint, "msg_x", "msg_Y!", z=True)\n\n >> MSG_Y: msg_Y!\n\n > verbose(1, 1, myprint, "msg_x", z=False)\n\n >> MSG_X: msg_x\n\n > verbose(1, 1, myprint, "msg_x", z=True)\n\n >> MSG_Y: msg_y\n '
if (verboseLevel >= requiredLevel):
printArgs = (printArgs if (printArgs is not None) else tuple(['']))
if (not hasattr(printFunc, '__call__')):
printArgs = (tuple([printFunc]) + printArgs)
printFunc = print
printFunc(*printArgs, **kwPrintArgs)
|
def print_flush(txt=''):
print(txt)
sys.stdout.flush()
|
def hide_cursor():
if (os.name == 'nt'):
ci = _CursorInfo()
handle = ctypes.windll.kernel32.GetStdHandle((- 11))
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
ci.visible = False
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
elif (os.name == 'posix'):
sys.stdout.write('\x1b[?25l')
sys.stdout.flush()
|
def show_cursor():
if (os.name == 'nt'):
ci = _CursorInfo()
handle = ctypes.windll.kernel32.GetStdHandle((- 11))
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
ci.visible = True
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
elif (os.name == 'posix'):
sys.stdout.write('\x1b[?25h')
sys.stdout.flush()
|
def main():
rp = reaction_predictors.OpenNMTServerPredictor()
reactants = [multiset.FrozenMultiset(['C[S-]', '[Mg+]c1ccc(Cl)cc1']), multiset.FrozenMultiset(['CCOC(=O)C1CCN(C(=O)OC(C)(C)C)CC1', 'CC(C)(C)OC(=O)N1CCNCC1'])]
print(rp(reactants))
print(rp(reactants))
print(rp(reactants))
|
@dataclass
class _Dataset():
name: str
sources: typing.List[typing.Callable]
split_proportions: typing.Dict[(str, float)]
reactant_to_reactant_id_json_path: str
|
def _split_on_proportions_and_save(name, proportion_dict, logger, depth_and_tree_tuples):
length_all_data = len(depth_and_tree_tuples)
assert (sum(proportion_dict.values()) == 1.0), 'proportions should sum to one'
used_so_far = 0
out_dict = {}
out_trees_dict = {}
for (subset_name, proportion) in proportion_dict.items():
number_to_use = int(np.ceil((proportion * length_all_data)))
end_indx = min((used_so_far + number_to_use), length_all_data)
indices = list(range(used_so_far, end_indx))
depth_and_trees_for_subset = [depth_and_tree_tuples[i] for i in indices]
out_trees_dict[subset_name] = depth_and_trees_for_subset
misc.to_pickle(depth_and_trees_for_subset, path.join(PATH, f'{name}-{subset_name}-depth_and_tree_tuples.pick'))
depths = collections.Counter([el[0] for el in depth_and_trees_for_subset])
out_table = tabulate.tabulate((([('Number of levels', 'Freq')] + sorted(list(depths.items()))) + [('Total', len(depth_and_trees_for_subset))]))
logger.info(f'''For name: {name}, subset: {subset_name}, the tree levels are:
{out_table}''')
out_dict[subset_name] = indices
used_so_far = end_indx
return (out_dict, out_trees_dict)
|
def _create_equiv_train_val_sets_and_save(name, out_trees_dict, reactants_list, name_of_train):
def _get_top_smi(list_of_tt):
return [elem[1][0] for elem in list_of_tt]
out_smiles_lists = {}
out_smiles_lists[name_of_train] = _get_top_smi(out_trees_dict[name_of_train])
out_smiles_lists[name_of_train].extend(list(reactants_list))
other_names = (set(out_trees_dict.keys()) - {name_of_train})
out_smiles_lists.update({k: _get_top_smi(out_trees_dict[k]) for k in other_names})
for (subset_name, subset_smiles) in out_smiles_lists.items():
with open(path.join(PATH, f'{name}-{subset_name}-equiv_smiles.txt'), 'w') as fo:
fo.writelines('\n'.join(subset_smiles))
return out_smiles_lists
|
def main(params):
rng = np.random.RandomState(89424798)
log_hndlr_stream = logging.StreamHandler()
log_hndlr_stream.setLevel(logging.DEBUG)
log_handlr_file = logging.FileHandler(path.join(PATH, f'create_datasets_{datetime.datetime.now().isoformat()}.log'))
log_handlr_file.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log_hndlr_stream.setFormatter(formatter)
log_handlr_file.setFormatter(formatter)
dag_extraction_utils.logger.addHandler(log_hndlr_stream)
dag_extraction_utils.logger.addHandler(log_handlr_file)
dag_extraction_utils.logger.setLevel(logging.DEBUG)
for ds in params['datasets']:
dag_extraction_utils.logger.info(f'''
# Working on {ds.name}''')
reactant_to_reactant_id_map = dict(misc.load_json(ds.reactant_to_reactant_id_json_path))
all_reactions = set()
for ds_creator in ds.sources:
dataset = ds_creator()
(reactions, *_) = dag_extraction_utils.extract_reactions(dataset)
all_reactions.update(set(reactions))
all_reactions = list(all_reactions)
rng.shuffle(all_reactions)
dag_extraction_utils.logger.info(f'Finished merging all reaction sets, left with {len(all_reactions)} total reactions.')
mega_graph = dag_extraction_utils.create_mega_graph(all_reactions, reactant_to_reactant_id_map)
(depth_and_tree_tuples, _) = dag_extraction_utils.extract_tuple_trees_from_mega_dag(mega_graph, reactant_to_reactant_id_map)
rng.shuffle(depth_and_tree_tuples)
nx.write_gpickle(mega_graph, path.join(PATH, f'{ds.name}-mgraph.gpickle'))
misc.to_pickle(all_reactions, path.join(PATH, f'{ds.name}-reactions.pick'))
(indics, out_trees_dict) = _split_on_proportions_and_save(ds.name, ds.split_proportions, dag_extraction_utils.logger, depth_and_tree_tuples)
misc.to_pickle({'all_depth_and_tree_tuples': depth_and_tree_tuples, 'subset_indices': indics}, path.join(PATH, f'{ds.name}-all_depth_and_tree_tuples.pick'))
_create_equiv_train_val_sets_and_save(ds.name, out_trees_dict, list(reactant_to_reactant_id_map.keys()), NAME_FOR_TRAIN)
subprocess.run(f'cd {PATH}; shasum -a 256 * > {datetime.datetime.now().isoformat()}_data_checklist.sha256', shell=True)
|
@TaskGenerator
def run_ft(task):
params = run_hillclimbing.Params(task, weight_path)
res = run_hillclimbing.main(params)
return res
|
class Params():
def __init__(self, weight_path: str):
self.device = settings.torch_device()
self.weight_path = weight_path
time_run = strftime('%y-%m-%d_%H:%M:%S', gmtime())
f_name_weights = path.splitext(path.basename(self.weight_path))[0]
self.run_name = f'doggen_sampling_on_weights_{f_name_weights}_run_at_{time_run}'
print(f'''Run name is {self.run_name}
''')
self.batch_size = 200
self.num_batches = 100
self.log_for_reaction_predictor_path = path.join('logs', f'reactions-{self.run_name}.log')
|
def main(params: Params):
(model, collate_func, other_parts) = doggen_utils.load_doggen_model(params.device, params.log_for_reaction_predictor_path, weight_path=params.weight_path)
all_syn_trees = []
all_log_probs = []
for _ in tqdm(range(params.num_batches)):
(syn_trees, log_probs) = model.sample(params.batch_size)
all_syn_trees.extend(syn_trees)
all_log_probs.append(log_probs.detach().cpu().numpy().T)
all_log_probs = np.concatenate(all_log_probs)
with open(path.join(OUT_DIR, f'{params.run_name}.pick'), 'wb') as fo:
pickle.dump(dict(all_log_probs=all_log_probs, all_syn_trees=all_syn_trees), fo)
smiles_only = [elem.root_smi for elem in all_syn_trees]
with open(path.join(OUT_DIR, f'{params.run_name}_smiles.txt'), 'w') as fo:
fo.writelines('\n'.join(smiles_only))
|
def filter_valid_and_map_to_ms(smiles_in):
ms_out = []
for sm in tqdm.tqdm(smiles_in, desc='ValidFilter'):
try:
ms = _put_line_into_canonical_multiset(sm)
ms_out.append(ms)
except CanonicaliseError:
pass
return ms_out
|
class UniquenessCheck():
'\n Unique if the generated molecule multiset contains at least one molecule which has not been generated so far.\n '
def __call__(self, valid_molecule_bags: typing.List[multiset.BaseMultiset]):
seen_products_set = set()
unique_product_bags = []
for product_bag in tqdm.tqdm(valid_molecule_bags, desc='uniqueness check'):
unique_elements = [elem for elem in product_bag if (elem not in seen_products_set)]
seen_products_set.update(set(product_bag.distinct_elements()))
if len(unique_elements):
unique_product_bags.append(product_bag)
return (len(unique_product_bags) / len(valid_molecule_bags))
|
class NoveltyCheck():
'\n Novel if at least one of the molecules in the generated molecule multiset does not appear in the training dataset.\n '
def __init__(self, training_canonical_smiles: typing.List[str]):
self.training_smiles = set(training_canonical_smiles)
def __call__(self, valid_molecule_bags: typing.List[multiset.BaseMultiset]):
novel_molecules = []
for gen_ms in tqdm.tqdm(valid_molecule_bags, desc='novelty check'):
is_novel = any([(elem not in self.training_smiles) for elem in gen_ms])
if is_novel:
novel_molecules.append(gen_ms)
return (len(novel_molecules) / len(valid_molecule_bags))
|
class FCDCheck():
'\n Computes the Fréchet ChemNet Distance between molecules in the training set and those generated.\n\n Note 1. this is estimated from samples. For models that do not produce many valid molecules the variance is likely to\n be quite high.\n Note 2. We do not follow GuacaMol by returning "FCD Score" as exp(-0.2*FCD) (see eqn 2 of their paper). We return\n the FCD as it is.\n '
def __init__(self, training_smi: typing.List[str], sample_size=10000):
self.fcd_scorer = frechet_benchmark.FrechetBenchmark(training_smi, sample_size=sample_size)
@lazy
def chemnet(self):
return self.fcd_scorer._load_chemnet()
@lazy
def cached_ref_stats(self):
(mu, cov) = self.fcd_scorer._calculate_distribution_statistics(self.chemnet, self.fcd_scorer.reference_molecules)
return (mu, cov)
def __call__(self, valid_molecule_bags: typing.List[multiset.BaseMultiset]):
if (not (len(valid_molecule_bags) >= self.fcd_scorer.sample_size)):
print(f'less samples than ideal... @{len(valid_molecule_bags)}')
sample_size = len(valid_molecule_bags)
else:
sample_size = self.fcd_scorer.sample_size
samples_bags = random.sample(valid_molecule_bags, sample_size)
samples = []
for s in samples_bags:
samples.append(random.choice(list(s.distinct_elements())))
chemnet = self.chemnet
print('FCD: calculating dist stats on training data...')
(mu_ref, cov_ref) = self.cached_ref_stats
print('FCD: calculating dist stats on new generated molecules...')
(mu, cov) = self.fcd_scorer._calculate_distribution_statistics(chemnet, samples)
print('FCD: ... computed stats!')
FCD = fcd.calculate_frechet_distance(mu1=mu_ref, mu2=mu, sigma1=cov_ref, sigma2=cov)
return FCD
|
class QualityFiltersCheck():
'\n These are the Quality Filters proposed in the GuacaMol paper, which try to rule out " compounds which are\n potentially unstable, reactive, laborious to synthesize, or simply unpleasant to the eye of medicinal chemists."\n\n The filter rules are from the GuacaMol supplementary material: https://pubs.acs.org/doi/10.1021/acs.jcim.8b00839\n The filter code is from: https://github.com/PatWalters/rd_filters\n Parts of the code below have been taken from the script in this module. This code put in this\n class came with this MIT Licence:\n\n MIT License\n\n Copyright (c) 2018 Patrick Walters\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the "Software"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n SOFTWARE.\n\n '
def __init__(self, training_data_smi: typing.List[str]):
alert_file_name = path.join(THIS_FILE_DIR, 'quality_filters', 'alert_collection.csv')
self.rf = rd_filters.RDFilters(alert_file_name)
rules_file_path = path.join(THIS_FILE_DIR, 'quality_filters', 'rules.json')
rule_dict = rd_filters.read_rules(rules_file_path)
rule_list = [x.replace('Rule_', '') for x in rule_dict.keys() if (x.startswith('Rule') and rule_dict[x])]
rule_str = ' and '.join(rule_list)
print(f'Using alerts from {rule_str}', file=sys.stderr)
self.rf.build_rule_list(rule_list)
self.rule_dict = rule_dict
self.training_data_smi = training_data_smi
@lazy
def _training_data_prop(self):
training_data_qulaity_filters = self.call_on_smiles_no_normalization(self.training_data_smi)
print(f'Training data filters returned {training_data_qulaity_filters}. Rest normalized on this.')
return training_data_qulaity_filters
def __call__(self, valid_molecule_bags: typing.List[multiset.BaseMultiset]):
smiles = ['.'.join(elem) for elem in valid_molecule_bags]
return (self.call_on_smiles_no_normalization(smiles) / self._training_data_prop)
def call_on_smiles_no_normalization(self, smiles: typing.List[str]):
num_cores = 4
print(f'using {num_cores} cores', file=sys.stderr)
start_time = time.time()
p = Pool(mp.cpu_count())
num_smiles_in = len(smiles)
input_data = [(smi, f'MOL_{i}') for (i, smi) in enumerate(smiles)]
res = list(p.map(self.rf.evaluate, input_data))
df = pd.DataFrame(res, columns=['SMILES', 'NAME', 'FILTER', 'MW', 'LogP', 'HBD', 'HBA', 'TPSA'])
df_ok = df[((((((df.FILTER == 'OK') & df.MW.between(*self.rule_dict['MW'])) & df.LogP.between(*self.rule_dict['LogP'])) & df.HBD.between(*self.rule_dict['HBD'])) & df.HBA.between(*self.rule_dict['HBA'])) & df.TPSA.between(*self.rule_dict['TPSA']))]
num_input_rows = df.shape[0]
num_output_rows = df_ok.shape[0]
fraction_passed = '{:.1f}'.format(((num_output_rows / num_input_rows) * 100.0))
print(f'{num_output_rows} of {num_input_rows} passed filters {fraction_passed}%', file=sys.stderr)
elapsed_time = '{:.2f}'.format((time.time() - start_time))
print(f'Elapsed time {elapsed_time} seconds', file=sys.stderr)
p.close()
return (num_output_rows / num_smiles_in)
|
class CanonicaliseError(Exception):
pass
|
def _put_line_into_canonical_multiset(reaction_smi_str: str) -> multiset.FrozenMultiset:
'\n Splits up string into multiple molecules. Canonicalises each of these individually.\n \n Throws a CanonicaliseError exception if it cannot canonicalise at least one molecule in the string.\n '
if (len(reaction_smi_str) == 0):
raise CanonicaliseError
all_smi = reaction_smi_str.split('.')
canoncial_smi = []
for mol_smi in all_smi:
try:
canoncial_smi.append(rdkit_general_ops.canconicalize(mol_smi))
except:
pass
if len(canoncial_smi):
assert len(canoncial_smi[0]), f'Empty string passed through canonical function{canoncial_smi}'
return multiset.FrozenMultiset(canoncial_smi)
else:
raise CanonicaliseError
|
def _read_in_text_of_smiles(filename):
with open(filename, 'r') as fo:
smiles = fo.readlines()
smiles = [elem.strip() for elem in smiles]
return smiles
|
class Params():
def __init__(self):
arguments = docopt(__doc__)
self.experiments_config = arguments['--config']
self.training_trees = train_utils.load_tuple_trees('../../dataset_creation/data/uspto-train-depth_and_tree_tuples.pick', np.random.RandomState(10))
self.training_data_smi_list = self._get_training_data()
def _get_training_data(self):
all_train_molecules = set()
def unpack(iterable):
if isinstance(iterable, str):
all_train_molecules.add(iterable)
elif isinstance(iterable, (tuple, list)):
for item in iterable:
unpack(item)
else:
raise RuntimeError
unpack(self.training_trees)
all_mols = [rdkit_general_ops.canconicalize(smi) for smi in tqdm.tqdm(all_train_molecules, desc='ensuring molecules canonical')]
all_mols = sorted(list(set(all_mols)))
return all_mols
|
def main(params: Params):
print('setting up metrics')
rng = np.random.RandomState(484)
random.seed(rng.choice(54156))
metrics_conditioned_on_valid = {'novelty': NoveltyCheck(params.training_data_smi_list), 'uniqueness': UniquenessCheck(), 'FCD': FCDCheck(params.training_data_smi_list), 'quality_filters': QualityFiltersCheck(params.training_data_smi_list)}
def _create_metrics_part_of_row(smiles: typing.List[str], metrics, num_generated):
row = []
if (len(smiles) != num_generated):
warnings.warn(f'Number generated @{num_generated} different to num passed in @{len(smiles)}')
valid_ms = filter_valid_and_map_to_ms(smiles_in=smiles)
for met in metrics:
if (met == 'validity'):
row.append((len(valid_ms) / num_generated))
elif (met == 'num_generated'):
row.append(len(smiles))
else:
row.append(metrics_conditioned_on_valid[met](valid_ms))
return row
print('Reading config')
with open(params.experiments_config, 'r') as fo:
running_config = json.load(fo)
print(running_config)
print('\n\n\n')
data_dir = running_config['data_dir']
table_format = running_config['table_format']
for (i, tables) in enumerate(running_config['tables_to_create']):
print(f'''
==== Creating table {i} ====''')
rows_all = []
header = (['Method'] + tables['metrics'])
rows_all.append((['training data'] + _create_metrics_part_of_row(params.training_data_smi_list, tables['metrics'], len(params.training_data_smi_list))))
for (dataset_name, (location, num_generated)) in tables['rows'].items():
row = [dataset_name]
print(f'Reading in data for {dataset_name}')
smiles = _read_in_text_of_smiles(path.join(data_dir, location))
row += _create_metrics_part_of_row(smiles, tables['metrics'], num_generated)
rows_all.append(row)
print('\n\n\n')
print(tabulate.tabulate(rows_all, headers=header, tablefmt=table_format))
|
class Params():
def __init__(self, weight_path):
self.device = settings.torch_device()
self.weight_path = weight_path
self.batch_size = 200
self.num_batches = 100
time_run = strftime('%y-%m-%d_%H:%M:%S', gmtime())
f_name_weights = path.splitext(path.basename(self.weight_path))[0]
self.run_name = f'prior_samples_for_{f_name_weights}_done_{time_run}_'
print(f'Run name is {self.run_name}')
print(f'Checkpoint name is {self.weight_path}')
|
def main(params: Params):
rng = np.random.RandomState(564165)
torch.manual_seed(15616)
log_path = path.join('logs', f'reactions-{params.run_name}.log')
(model, __collate_func, *_) = dogae_utils.load_dogae_model(params.device, log_path, weight_path=params.weight_path)
out_tuple_trees = []
all_z = []
for _ in tqdm(range(params.num_batches), desc='sampling a batch'):
(out, z, _) = dogae_utils.sample_n_from_prior(model, params.batch_size, rng, return_extras=True)
all_z.append(z.detach().cpu().numpy())
out_tuple_trees.extend(out)
all_z = np.concatenate(all_z, axis=0)
pickle_name = path.join(SAMPLE_DIR, f'out_trees_{params.run_name}.pick')
root_smi_txt_name = path.join(SAMPLE_DIR, f'out_final_smi_{params.run_name}.txt')
misc.to_pickle({'tuple_trees': out_tuple_trees, 'all_z': all_z}, pickle_name)
print(f'Saving trees to {pickle_name}')
smiles = [x[0] for x in out_tuple_trees]
with open(root_smi_txt_name, 'w') as fo:
fo.writelines('\n'.join(smiles))
print(f'writing the SMILES out into {root_smi_txt_name}')
|
def tuple_tree_to_nx(tuple_tree):
tree = nx.DiGraph()
def recusive_func(tree_remaining, parent):
this_node = tree_remaining[0]
tree.add_node(this_node)
if (parent is not None):
tree.add_edge(parent, this_node)
for other_nodes in tree_remaining[1]:
recusive_func(other_nodes, this_node)
recusive_func(tuple_tree, None)
return tree
|
def _get_leaf_nodes(tuple_tree):
tree = tuple_tree_to_nx(tuple_tree)
leaf_nodes = set()
for n in tree:
is_leaf = (not bool(tree.out_degree(n)))
if is_leaf:
leaf_nodes.add(n)
return leaf_nodes
|
def convert_tuple_tree_to_js(tuple_tree):
def format_level(tuple, smi_list):
(smi, children) = tuple
smi_list.append(smi)
mol = Chem.MolFromSmiles(smi)
inchi = Chem.MolToInchiKey(mol)
out_dict = {'image': f'imgs/{inchi}.svg'}
if len(children):
out_dict['children'] = [format_level(child, smi_list) for child in children]
out_dict['collapsed'] = False
return out_dict
all_smi = []
out_dict = format_level(tuple_tree, all_smi)
out_dict.pop('collapsed')
all_smi = set(all_smi)
return (json.dumps(out_dict), all_smi)
|
def main(tuple_tree):
smiles_to_draw = set()
(node_structure1, smiles) = convert_tuple_tree_to_js(tuple_tree)
smiles_to_draw.update(smiles)
with open(path.join(OP_path, f"plot_{time.strftime('%y-%m-%d_%H:%M:%S', time.gmtime())}.html"), 'w') as fo:
fo.write(template_.substitute(node_structure=node_structure1))
os.makedirs(path.join(OP_path, 'imgs'), exist_ok=True)
for smi in smiles_to_draw:
mol = Chem.MolFromSmiles(smi)
inchi_key = Chem.MolToInchiKey(mol)
op_path = path.join(OP_path, 'imgs', f'{inchi_key}.svg')
print(f'Saving {smi} to {op_path}')
Draw.MolToFile(mol, op_path, size=(200, 200), imageType='svg', useBWAtomPalette=True)
print('Done!')
|
def get_atom_map_nums(rxn_str) -> typing.Set[int]:
'\n :return: set of the atom mapping numbers of the atoms in the reaction string\n '
mol = Chem.MolFromSmiles(rxn_str)
return set([a.GetPropsAsDict()['molAtomMapNumber'] for a in mol.GetAtoms()])
|
def get_mol_props(mol: AllChem.Mol):
'\n Get the properties of a molecule.\n '
logP = Descriptors.MolLogP(mol)
tpsa = rdMolDescriptors.CalcTPSA(mol)
alpha = rdMolDescriptors.CalcHallKierAlpha(mol)
MR = Descriptors.MolMR(mol)
asa = rdMolDescriptors.CalcLabuteASA(mol)
return [logP, tpsa, alpha, MR, asa]
|
def get_molecule(molecule_strs, kekulize=True) -> AllChem.Mol:
'\n Convert string to molecule\n '
mol = Chem.MolFromSmiles(molecule_strs)
if kekulize:
Chem.Kekulize(mol)
return mol
|
def add_atom_mapping(mol) -> typing.Tuple[(AllChem.Mol, dict)]:
'\n add atom mappings to molecule\n '
atom_map_to_index_map = {}
for (i, atom) in enumerate(mol.GetAtoms()):
atom.SetProp('molAtomMapNumber', str(i))
atom_map_to_index_map[i] = i
return (mol, atom_map_to_index_map)
|
def get_atoms_names_charge_and_h_count(mol, atom_mappings: set) -> typing.Mapping[(int, tuple)]:
'\n for all atoms with atom mapping numbers present in the `atom_mappings` set put in a dictionary indexed by the atom\n mapping tuples of the atom symbol, formal charge and total number of Hydrogen atoms.\n '
results = {}
for atom in mol.GetAtoms():
props = atom.GetPropsAsDict()
am = props['molAtomMapNumber']
if (am in atom_mappings):
results[am] = (atom.GetSymbol(), atom.GetFormalCharge(), atom.GetTotalNumHs())
atom_mappings.remove(am)
if (len(atom_mappings) == 0):
break
return results
|
def create_atom_map_indcs_map(mol) -> typing.Mapping[(int, int)]:
'\n return a dictionarry from atom mapping to the rdkit atom index\n '
return {atom.GetPropsAsDict()['molAtomMapNumber']: idx for (idx, atom) in enumerate(mol.GetAtoms())}
|
def get_bond_double_between_atom_mapped_atoms(mol, am_start, am_end, am_to_indcs_map=None) -> float:
'\n Return bond double between the two atoms with atom mappings given by arguments or 0. if no bond exists.\n :param am_to_indcs_map: atom map to atom index map (if not given will be created)\n '
am_to_indcs_map = (am_to_indcs_map or create_atom_map_indcs_map(mol))
try:
new_bond = get_bond_between_indx_atoms(mol, am_to_indcs_map[am_start], am_to_indcs_map[am_end])
except KeyError:
new_bond = 0.0
return new_bond
|
def get_bond_between_indx_atoms(mol, idx_start, idx_end) -> float:
'\n Return bond double between the two atoms with rdkit indices mappings given by arguments or 0. if no bond exists.\n '
bnd = mol.GetBondBetweenAtoms(idx_start, idx_end)
bnd = (bnd.GetBondTypeAsDouble() if (bnd is not None) else 0.0)
return bnd
|
def return_canoncailised_smiles_str(molecule, remove_am=True, allHsExplicit=False, kekuleSmiles=True) -> str:
'\n Rdkit molecule to smiles str,\n '
mol_copy = Chem.RWMol(molecule)
if remove_am:
for atom in mol_copy.GetAtoms():
atom.ClearProp('molAtomMapNumber')
smiles = Chem.MolToSmiles(mol_copy, allHsExplicit=allHsExplicit, kekuleSmiles=kekuleSmiles, canonical=True)
return smiles
|
def canconicalize(mol_smi, remove_am=False):
return return_canoncailised_smiles_str(get_molecule(mol_smi, kekulize=False), kekuleSmiles=False, remove_am=remove_am)
|
def get_fingerprint_as_array(molecule, radius=4, nbits=2048):
arr = np.zeros((1,))
DataStructs.ConvertToNumpyArray(AllChem.GetMorganFingerprintAsBitVect(molecule, radius=radius, nBits=nbits), arr)
return arr
|
def split_reagents_out_from_reactants_and_products(reactant_all_str: str, product_all_str: str, action_set: set) -> typing.Tuple[(str, str, str)]:
'\n :param reactant_all_str: SMILES string of all reactants -- individual reactants seperated by dots.\n :param product_all_str: SMILES string of all products -- individual reactants seperated by dots.\n :param action_set: list of atoms involved in reaction\n :return:\n '
canon_map = (lambda in_list: list(map(canconicalize, in_list)))
reactants_str = reactant_all_str.split('.')
products_str = product_all_str.split('.')
products_str_canon_set = set(canon_map(product_all_str.split('.')))
product_smiles_set = set(products_str)
products_to_keep = set(products_str)
product_atom_map_nums = functools.reduce((lambda x, y: (x | y)), (get_atom_map_nums(prod) for prod in products_str))
actions_atom_map_nums = action_set
reactants = []
reagents = []
for candidate_reactant in reactants_str:
atom_map_nums = get_atom_map_nums(candidate_reactant)
in_product = list((product_atom_map_nums & atom_map_nums))
in_center = list(set((actions_atom_map_nums & atom_map_nums)))
def reagent_flag1():
return ((len(in_product) == 0) and (len(in_center) == 0))
def reagent_flag2():
return (canconicalize(candidate_reactant) in products_str_canon_set)
if reagent_flag1():
reagents.append(candidate_reactant)
elif reagent_flag2():
reagents.append(candidate_reactant)
products_to_keep -= {candidate_reactant}
else:
reactants.append(candidate_reactant)
product_all_str = '.'.join(products_to_keep)
return ('.'.join(reactants), '.'.join(reagents), product_all_str)
|
class DatasetPartitions(enum.Enum):
TRAIN = 'train'
VALID = 'valid'
TEST = 'test'
|
class UsptoDataset(data.Dataset):
def __init__(self, dataset_partition: general.DatasetPartitions, transforms=None):
uspto_path = path.join(settings.get_repo_path(), settings.get_config().get('DataDirectories', 'uspto'), f'{dataset_partition.value}.txt')
with open(uspto_path, 'r') as fo:
data = fo.readlines()
self.reaction_lines = data
self.transforms = transforms
def __getitem__(self, idx: int):
smiles = self.reaction_lines[idx]
(rest, bond_changes) = smiles.split()
(reactants, products) = rest.split('>>')
return_val: typing.Tuple[str] = (reactants, products, bond_changes)
if (self.transforms is not None):
return_val = self.transforms(*return_val)
return return_val
def __len__(self):
return len(self.reaction_lines)
|
def actionset_from_uspto_line(change_str):
'\n '
change_list = change_str.split(';')
atoms = set(itertools.chain(*[map(int, c.split('-')) for c in change_list]))
return atoms
|
class AtmFeaturizer():
'\n See Table 1 of Gilmer et al, https://arxiv.org/pdf/1704.01212.pdf\n '
def __init__(self, atms: typing.List[str]):
self.atms_to_idx = dict(zip(atms, range(len(atms))))
self.number_atom_options = len(self.atms_to_idx)
self.hyb_mapping = {Chem.rdchem.HybridizationType.SP: 0, Chem.rdchem.HybridizationType.SP2: 1, Chem.rdchem.HybridizationType.SP3: 2}
self.number_hyb_options = len(self.hyb_mapping)
self.fdef_name = os.path.join(RDDataDir, 'BaseFeatures.fdef')
self.feats_factory = ChemicalFeatures.BuildFeatureFactory(self.fdef_name)
def atom_to_feat(self, atm: Chem.Atom, owning_mol: Chem.Mol, idx):
this_atms_idx = atm.GetIdx()
assert (idx == this_atms_idx)
feat = torch.zeros(len(self), dtype=torch.float32)
try:
feat[self.atms_to_idx[atm.GetSymbol()]] = 1.0
except KeyError as ex:
warnings.warn(f'Ignoring the symbol {atm.GetSymbol()}')
idx_up_to = self.number_atom_options
feat[idx_up_to] = float(atm.GetAtomicNum())
idx_up_to += 1
(acceptor_ids, donor_ids) = self.get_acceptor_and_donor_ids(owning_mol)
feat[idx_up_to] = float((this_atms_idx in acceptor_ids))
idx_up_to += 1
feat[idx_up_to] = float((this_atms_idx in donor_ids))
idx_up_to += 1
hyb_idx = self.hybridization(atm.GetHybridization())
if (hyb_idx is not None):
feat[(idx_up_to + hyb_idx)] = 1.0
idx_up_to += self.number_hyb_options
feat[idx_up_to] = float(atm.GetIsAromatic())
idx_up_to += 1
feat[idx_up_to] = float(atm.GetNumImplicitHs())
idx_up_to += 1
return feat
@functools.lru_cache(maxsize=10)
def get_acceptor_and_donor_ids(self, molecule: Chem.Mol):
feats = self.feats_factory.GetFeaturesForMol(molecule)
acceptor_ids = set(itertools.chain(*[x.GetAtomIds() for x in feats if (x.GetFamily() == 'Acceptor')]))
donor_ids = set(itertools.chain(*[x.GetAtomIds() for x in feats if (x.GetFamily() == 'Donor')]))
return (acceptor_ids, donor_ids)
def hybridization(self, hybridization_type):
return self.hyb_mapping.get(hybridization_type, None)
def __len__(self):
return ((self.number_atom_options + 5) + self.number_hyb_options)
|
class BondFeaturizer():
'\n One hot \n '
def __init__(self):
self.bond_type_to_oh_loc = {Chem.BondType.SINGLE: 0, Chem.BondType.DOUBLE: 1, Chem.BondType.TRIPLE: 2, Chem.BondType.AROMATIC: 3}
def bond_to_feat(self, bnd: Chem.Bond):
bond_indices = torch.tensor([bnd.GetBeginAtomIdx(), bnd.GetEndAtomIdx()])
feat = torch.zeros(len(self.bond_type_to_oh_loc), dtype=torch.float32)
feat[self.bond_type_to_oh_loc[bnd.GetBondType()]] = 1.0
return (bond_indices, feat)
|
class SmilesFeaturizer():
def __init__(self, atm_featurizer: AtmFeaturizer):
self.atm_featurizer = atm_featurizer
self.bond_featurizer = BondFeaturizer()
def smi_to_feats(self, smi: str):
mol = Chem.MolFromSmiles(smi)
atm_feats = torch.stack([self.atm_featurizer.atom_to_feat(atm, mol, i) for (i, atm) in enumerate(mol.GetAtoms())])
(bonds, bond_features) = zip(*[self.bond_featurizer.bond_to_feat(bnd) for bnd in mol.GetBonds()])
bonds = torch.stack(bonds, dim=1)
bond_features = torch.stack(bond_features)
return (atm_feats, bonds, bond_features)
|
class SmilesToGraphAsAdjListFeaturizer():
def __init__(self, atm_featurizer: AtmFeaturizer):
self.atm_featurizer = atm_featurizer
self.bond_featurizer = BondFeaturizer()
def smi_to_feats(self, smi: str):
bond_names = ['single', 'double', 'triple', 'aromatic']
mol = Chem.MolFromSmiles(smi)
atm_feats = torch.stack([self.atm_featurizer.atom_to_feat(atm, mol, i) for (i, atm) in enumerate(mol.GetAtoms())])
edge_to_bond_type = {k: [] for k in bond_names}
for (bonds, bond_features) in [self.bond_featurizer.bond_to_feat(bnd) for bnd in mol.GetBonds()]:
bond_type = bond_names[torch.argmax(bond_features)]
edge_to_bond_type[bond_type].extend([bonds, bonds[[1, 0]]])
edge_to_bond_type = {k: (torch.stack(v, dim=1) if len(v) else torch.tensor([[], []], device=str(atm_feats.device), dtype=settings.TORCH_INT)) for (k, v) in edge_to_bond_type.items()}
node_to_graph_id = torch.zeros(atm_feats.shape[0], device=str(atm_feats.device), dtype=settings.TORCH_INT)
return grphs.DirectedGraphAsAdjList(atm_feats, edge_to_bond_type, node_to_graph_id)
|
class AggrType(enum.Enum):
FINAL_NODE = 'final_node'
|
class DAGEmbedder(nn.Module):
def __init__(self, dag_gnn: ggnn_sparse.GGNNSparse, aggr_type: AggrType, final_dim):
super().__init__()
self.gnn = dag_gnn
self.aggr_type = aggr_type
self.final_mlp = mlp.get_mlp(mlp.MlpParams(dag_gnn.params.hlayer_size, final_dim, []))
def forward(self, x_data: synthesis_trees.PredOutBatch):
dog_in = x_data.dags_for_inputs
new_features: graph_as_adj_list.DirectedGraphAsAdjList = self.compute_dog_node_feats(dog_in)
if (self.aggr_type is AggrType.FINAL_NODE):
out_feats = new_features.node_features[(x_data.final_molecule_indcs, ...)]
else:
raise NotImplementedError
out_feats = self.final_mlp(out_feats)
return out_feats
def compute_dog_node_feats(self, dog_in: graph_as_adj_list.DirectedGraphAsAdjList):
if ((dog_in.node_features.shape[1] == 1) or (dog_in.node_features.dtype == settings.TORCH_INT)):
warnings.warn('Single dimensional or integer features being used for nodes in DAG -- is this meant?')
new_features: graph_as_adj_list.DirectedGraphAsAdjList = self.gnn(dog_in)
return new_features
@property
def embedding_dim(self):
return self.gnn.params.hlayer_size
|
def get_model(react_pred: reaction_predictors.AbstractReactionPredictor, smi2graph_func, reactant_vocab, params=None):
params = (params if (params is not None) else default_params)
mol_embedder = molecular_graph_embedder.GraphEmbedder(**params['mol_graph_embedder_params'])
dag_gnn = ggnn_sparse.GGNNSparse(ggnn_base.GGNNParams(**params['dag_graph_embedder_gnn_params']))
dag_embdr = dag_embedder.DAGEmbedder(dag_gnn, dag_embedder.AggrType[params['dag_embedder_aggr_type_s']], (default_params['latent_dim'] * 2))
encoder = nn_paramterised_dists.NNParamterisedDistribution(dag_embdr, final_parameterised_dist=shallow_distributions.IndependentGaussianDistribution())
latent_prior = shallow_distributions.IndependentGaussianDistribution(nn.Parameter(torch.zeros(1, (params['latent_dim'] * 2), dtype=settings.TORCH_FLT), requires_grad=False))
c = ((2 * params['latent_dim']) * (1 ** 2))
kernel = similarity_funcs.InverseMultiquadraticsKernel(c=c)
decoder_rnn_hidden_size = params['decoder_params']['gru_hsize']
decoder_embdg_dim = mol_embedder.embedding_dim
decoder_nets = dog_decoder.DecoderPreDefinedNetworks(mol_embedder, f_z_to_h0=nn.Linear(params['latent_dim'], decoder_rnn_hidden_size), f_ht_to_e_add=nn.Sequential(nn.Linear(decoder_rnn_hidden_size, 28), nn.ReLU(), nn.Linear(28, decoder_embdg_dim)), f_ht_to_e_reactant=nn.Sequential(nn.Linear(decoder_rnn_hidden_size, 28), nn.ReLU(), nn.Linear(28, decoder_embdg_dim)), f_ht_to_e_edge=nn.Sequential(nn.Linear(decoder_rnn_hidden_size, 28), nn.ReLU(), nn.Linear(28, decoder_embdg_dim)))
decoder_params = dog_decoder.DecoderParams(**params['decoder_params'])
decoder = dog_decoder.DOGGenerator(decoder_params, other_nets=decoder_nets, react_pred=react_pred, smi2graph=smi2graph_func, reactant_vocab=reactant_vocab)
wae = wasserstein.WAEnMMD(encoder=encoder, decoder=decoder, latent_prior=latent_prior, kernel=kernel)
wae.mol_embdr = mol_embedder
return (wae, params)
|
class DogGen(nn.Module):
'\n Wrapper around the Dog Generator to make it more obviously into just a autoregressive model\n rather than part of an autoencoder\n '
def __init__(self, dog_gen: dog_decoder.DOGGenerator, initial_z_size):
super().__init__()
self.dog_gen = dog_gen
self.initial_z_size = initial_z_size
self.mol_embdr = dog_gen.other_nets.mol_embdr
def forward(self, obs: synthesis_trees.PredOutBatch):
'Gets the negative log likelihood of observation for training (nb note this is via teacher forcing)'
self._update_gen(obs.batch_size)
loss = self.dog_gen.nlog_like_of_obs(obs)
return loss
@torch.no_grad()
def sample(self, batch_size):
self._update_gen(batch_size)
sample = self.dog_gen.sample_no_grad(1)[0]
return sample
def _update_gen(self, batch_size):
device = next(self.dog_gen.parameters()).device
initial_hidden = torch.zeros((batch_size, self.initial_z_size), dtype=settings.TORCH_FLT, device=device)
self.dog_gen.update(initial_hidden)
|
def get_dog_gen(react_pred: reaction_predictors.AbstractReactionPredictor, smi2graph_func, reactant_vocab, params=None):
if (params is None):
params = default_params
mol_embedder = molecular_graph_embedder.GraphEmbedder(**params['mol_graph_embedder_params'])
decoder_rnn_hidden_size = params['decoder_params']['gru_hsize']
decoder_embdg_dim = mol_embedder.embedding_dim
decoder_nets = dog_decoder.DecoderPreDefinedNetworks(mol_embedder, f_z_to_h0=nn.Linear(params['latent_dim'], decoder_rnn_hidden_size), f_ht_to_e_add=nn.Sequential(nn.Linear(decoder_rnn_hidden_size, 28), nn.ReLU(), nn.Linear(28, decoder_embdg_dim)), f_ht_to_e_reactant=nn.Sequential(nn.Linear(decoder_rnn_hidden_size, 28), nn.ReLU(), nn.Linear(28, decoder_embdg_dim)), f_ht_to_e_edge=nn.Sequential(nn.Linear(decoder_rnn_hidden_size, 28), nn.ReLU(), nn.Linear(28, decoder_embdg_dim)))
decoder_params = dog_decoder.DecoderParams(**params['decoder_params'])
decoder = dog_decoder.DOGGenerator(decoder_params, other_nets=decoder_nets, react_pred=react_pred, smi2graph=smi2graph_func, reactant_vocab=reactant_vocab)
model = DogGen(decoder, params['latent_dim'])
return (model, params)
|
class GraphEmbedder(nn.Module):
def __init__(self, hidden_layer_size, edge_names, embedding_dim, num_layers):
super().__init__()
self.ggnn = ggnn_sparse.GGNNSparse(ggnn_base.GGNNParams(hidden_layer_size, edge_names, num_layers))
mlp_project_up = mlp.get_mlp(mlp.MlpParams(hidden_layer_size, embedding_dim, []))
mlp_gate = mlp.get_mlp(mlp.MlpParams(hidden_layer_size, embedding_dim, []))
mlp_down = (lambda x: x)
self.embedding_dim = embedding_dim
self.ggnn_top = ggnn_sparse.GraphFeaturesStackIndexAdd(mlp_project_up, mlp_gate, mlp_down)
def forward(self, g_adjlist: graph_as_adj_list.DirectedGraphAsAdjList):
g_adjlist: graph_as_adj_list.DirectedGraphAsAdjList = self.ggnn(g_adjlist)
graph_feats = self.ggnn_top(g_adjlist.node_features, g_adjlist.node_to_graph_id)
return graph_feats
|
class MolTransformerTokenizer():
THE_REGEX = '(\\[[^\\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\\(|\\)|\\.|=|#|-|\\+|\\\\|\\/|:|~|@|\\?|>|\\*|\\$|\\%[0-9]{2}|[0-9])'
REGEX = re.compile(THE_REGEX)
@classmethod
def to_tokens(cls, smiles_str_in: str) -> str:
return ' '.join(cls.REGEX.findall(smiles_str_in))
@classmethod
def from_tokens(cls, tokenized_str_in: str) -> str:
return ''.join(tokenized_str_in.split())
|
class AbstractReactionPredictor(metaclass=abc.ABCMeta):
def __init__(self, size_of_cache=10000):
self.cached_results = collections.OrderedDict()
self.size_of_cache = size_of_cache
@abc.abstractmethod
def _run_list_of_reactant_sets(self, list_of_reactant_sets: typing.List[multiset.FrozenMultiset]) -> typing.List[multiset.Multiset]:
raise NotImplementedError
def __call__(self, list_of_reactant_sets: typing.List[multiset.FrozenMultiset]) -> typing.List[multiset.Multiset]:
'\n :param list_of_reactant_sets: List of sets. Each set contains SMILES strings of the reactants in the reaction\n :return: list of product sets: list of multisets with the associated product SMILES strings in the product\n location.\n '
set_of_already_have = set(self.cached_results.keys())
reactant_sets_new_needed = list((set(list_of_reactant_sets) - set_of_already_have))
if len(reactant_sets_new_needed):
reactants_products_new = list(zip(reactant_sets_new_needed, self._run_list_of_reactant_sets(reactant_sets_new_needed)))
else:
reactants_products_new = []
reactants_products_new.extend([(item, self.cached_results.pop(item)) for item in (set(list_of_reactant_sets) & set_of_already_have)])
reactant_products_map = collections.OrderedDict(reactants_products_new)
out = [reactant_products_map[item] for item in list_of_reactant_sets]
self.cached_results.update(reactant_products_map)
for _ in range(max(0, (len(self.cached_results) - self.size_of_cache))):
self.cached_results.popitem(last=False)
return out
|
class OpenNMTServerPredictor(AbstractReactionPredictor):
"\n Runs a request against OpenNMT's RESTful API\n "
logger = logging.getLogger('transformer-react-predict')
def __init__(self, model_id: int=0, *args, **kwargs):
super().__init__(*args, **kwargs)
self.server_address = settings.get_config().get('Transformer', 'address')
print(f'OpenNMTServerPredictor: server_address is {self.server_address}')
self.model_id = model_id
def _run_list_of_reactant_sets(self, list_of_reactant_sets: typing.List[multiset.FrozenMultiset]) -> typing.List[multiset.Multiset]:
def input_mapper(reactant_multiset_in):
smiles_str_in = '.'.join(reactant_multiset_in)
tokenstr = MolTransformerTokenizer.to_tokens(smiles_str_in)
out_ = {'src': tokenstr, 'id': self.model_id}
return out_
json_to_send = json.dumps([input_mapper(elem) for elem in list_of_reactant_sets])
def request_func():
r = requests.post(self.server_address, data=json_to_send, timeout=180)
r.raise_for_status()
return r.json()
return_list = misc.retry_n_times(request_func, 3, Exception, interval=0.5, on_exception=(lambda ex: print(ex)))
def output_mapper(dict_in):
prediction_tokenized = dict_in['tgt']
prediction = MolTransformerTokenizer.from_tokens(prediction_tokenized)
prediction_split = prediction.split('.')
pred_res = []
for smi in prediction_split:
try:
pred_res.append(rdkit_general_ops.canconicalize(smi))
except Exception:
pass
pred_res = pred_res[:1]
out = multiset.Multiset(pred_res)
return out
try:
op_back = return_list[0]
except KeyError as ex:
print(return_list)
raise ex
output = [output_mapper(elem) for elem in op_back]
for (in_, out_) in zip(list_of_reactant_sets, output):
self.logger.debug(f"{'.'.join(in_)}>>{'.'.join(out_)}")
def select_input_if_op_none(input_, output_):
output_ = [e for e in output_ if (e != '')]
if len(output_):
return output_
else:
return multiset.Multiset(list(input_)[:1])
output = [select_input_if_op_none(input_elem, op_elem) for (input_elem, op_elem) in zip(list_of_reactant_sets, output)]
return output
|
def _canonicalise_and_remove_am(molecules_in):
return [rdkit_general_ops.canconicalize(smi, remove_am=True) for smi in molecules_in]
|
class _LoopException(Exception):
pass
|
class NodeType(enum.Enum):
MOLECULE = 'MOLECULE'
REACTION = 'REACTION'
|
class Reaction(typing.NamedTuple):
reactants: typing.FrozenSet[str]
products: typing.FrozenSet[str]
|
def is_node_ancestor_of_node(plausible_ancestor_smi, dag, smis_to_check):
all_anc = functools.reduce((lambda x, y: (x | y)), [nx.ancestors(dag, (smi,)) for smi in smis_to_check])
return ((plausible_ancestor_smi,) in all_anc)
|
def extract_reactions(reaction_dataset) -> typing.Tuple[(typing.List[Reaction], typing.Set[str], dict)]:
'\n Note:\n 1. All molecules coming out of this function will be canconicalised (atom maps removed).\n 2. Reagents will be removed from the reaction (reagents classifed by not being involved in the final bond changes)\n 3. Currently skipping all the reactions that have multiple products or result in overlap.\n\n :param reaction_dataset: A dataset that returns reactions as a tuple of:\n (reactants, products, bond_changes)\n eg extracted from the USPTO data format from Jin et al available here: https://github.com/wengong-jin/nips17-rexgen/tree/master/USPTO\n :return: list of reactions extracted; a set of all molecules seen; dict of extraction details.\n '
reactions = []
logger.debug('Extracting reactions')
run_through_stats = dict(num_skipped_due_to_multiple_products=0, num_multiple_same_reactants=0, num_multiple_same_products=0, num_overlap_between_reactants_and_products=0, num_skipped_as_already_seen=0, num_total_molecules=0, num_kept=0, num_total=len(reaction_dataset))
all_molecules = set()
reactant_products_tuples_seen = set()
for (reactants, products, bond_changes) in tqdm.tqdm(reaction_dataset, desc='extracting reactions...'):
action_set = uspto_ds.actionset_from_uspto_line(bond_changes)
(reactants, reagents, products) = rdkit_reaction_ops.split_reagents_out_from_reactants_and_products(reactants, products, action_set)
reactants_split = reactants.split('.')
reactants_split = _canonicalise_and_remove_am(reactants_split)
reactants_split_set = frozenset(reactants_split)
if (len(reactants_split) != len(reactants_split_set)):
run_through_stats['num_multiple_same_reactants'] += 1
products_split = products.split('.')
products_split = _canonicalise_and_remove_am(products_split)
products_split_set = frozenset(products_split)
reaction = Reaction(reactants_split_set, products_split_set)
if (len(products_split) != len(products_split_set)):
run_through_stats['num_multiple_same_products'] += 1
if len((products_split_set & reactants_split_set)):
run_through_stats['num_overlap_between_reactants_and_products'] += 1
continue
if (len(products_split_set) > 1):
run_through_stats['num_skipped_due_to_multiple_products'] += 1
continue
if (reaction in reactant_products_tuples_seen):
run_through_stats['num_skipped_as_already_seen'] += 1
continue
reactant_products_tuples_seen.add(reaction)
reactions.append(reaction)
all_molecules.update(reactants_split_set)
all_molecules.update(products_split_set)
run_through_stats['num_kept'] = len(reactions)
run_through_stats['num_total_molecules'] = len(all_molecules)
logger.info(f'''Extracting reactions done:
{tabulate.tabulate([[name, value] for (name, value) in run_through_stats.items()])}''')
logger.debug('Creating tree dict')
num_reactions_before = len(reactions)
reactions = sorted(list(set(reactions)))
num_reactions_after = len(reactions)
logger.info(f'Removing duplicated reactions {(num_reactions_before - num_reactions_after)}. leaving: {num_reactions_after}')
logger.info(f'Number of reactions {len(reactions)}, number of molecules {len(all_molecules)}')
return (reactions, all_molecules, run_through_stats)
|
def create_mega_graph(reactions: typing.List[Reaction], reactants_to_reactant_id: dict) -> nx.DiGraph:
'\n Create NetworkX Graph\n Reactions represented as tuples holding frozensets of canonical SMILES\n Molecules Represented as one element tuple of canonical SMILES\n :param reactions: list of reactions from the dataset.\n :param reactants_to_reactant_id: dictionary mapping from reactant SMILES string to integer ID\n '
available_reactions = collections.deque(reactions)
reactant_set = set(reactants_to_reactant_id.keys())
initial_reactant_set = set(reactants_to_reactant_id.keys())
rxns_later = collections.defaultdict(list)
mega_graph = nx.DiGraph()
for initial_compound in reactant_set:
mega_graph.add_node((initial_compound,))
number_times_gone_through = 0
num_alternative_routes_to_product = 0
num_added = 0
num_skipped_due_to_loop = 0
while len(available_reactions):
logger.debug(f'Starting run through {number_times_gone_through}')
for _ in tqdm.tqdm(range(len(available_reactions))):
reaction_: Reaction = available_reactions.popleft()
assert (len(reaction_.products) == 1), 'currently only using one product reactions'
okay_to_add = (reaction_.reactants.issubset(reactant_set) and (not reaction_.products.issubset(initial_reactant_set)))
if okay_to_add:
reaction_node_representation = (reaction_.reactants, reaction_.products)
mega_graph.add_node(reaction_node_representation)
reactant_set.update(reaction_.products)
for prod in reaction_.products:
prod_repr = (prod,)
if (prod_repr not in mega_graph):
mega_graph.add_node(prod_repr)
else:
num_alternative_routes_to_product += 1
mega_graph.add_edge(reaction_node_representation, prod_repr)
available_reactions.extend(rxns_later[prod])
rxns_later[prod] = []
for react_ in reaction_.reactants:
react_repr = (react_,)
mega_graph.add_edge(react_repr, reaction_node_representation)
num_added += 1
elif (reaction_.reactants.issubset(reactant_set) and reaction_.products.issubset(initial_reactant_set)):
pass
num_skipped_due_to_loop += 1
else:
reactants_missing = set((reaction_.reactants - reactant_set))
one_arbitrary_reactant_missing = reactants_missing.pop()
rxns_later[one_arbitrary_reactant_missing].append(reaction_)
number_times_gone_through += 1
number_discarded = sum([len(el) for el in rxns_later.values()])
table = [('Total num times gone through', number_times_gone_through), ('Number added', num_added), ('Number discarded due to not connecting', number_discarded), ('Number discarded due to forming loop', num_skipped_due_to_loop), ('Number alterative routes to products found', num_alternative_routes_to_product)]
logger.info(f'''Extracting Mega-DAG done:
{tabulate.tabulate(table)}''')
return mega_graph
|
def _recursive_sample_from_dag_starting_at_node(rng: np.random.RandomState, dag, node_smi, smiles_seen, ancestor_smiles, connect_flag, max_depth):
if (node_smi in ancestor_smiles):
raise _LoopException
else:
ancestor_smiles = (ancestor_smiles | {node_smi})
connect_flag = (connect_flag or (node_smi in smiles_seen))
smiles_seen.add(node_smi)
max_depth += 1
in_edges = list(dag.in_edges((node_smi,)))
if (len(in_edges) == 0):
tuple_tree = (node_smi, [])
else:
in_edge_possible_indices = rng.permutation(len(in_edges))
for idx in in_edge_possible_indices:
last_possible_idx_flag = (idx == in_edge_possible_indices[(- 1)])
in_edge = in_edges[idx]
reaction = in_edge[0]
reactants = reaction[0]
assert (node_smi in reaction[1]), 'not in products...?'
try:
(tuple_tree_down, max_depth, connect_flag) = zip(*[_recursive_sample_from_dag_starting_at_node(rng, dag, n, smiles_seen, ancestor_smiles, connect_flag, max_depth) for n in reactants])
except _LoopException as ex:
if last_possible_idx_flag:
raise ex
else:
continue
else:
tuple_tree = (node_smi, list(tuple_tree_down))
max_depth = np.max(max_depth)
connect_flag = any(connect_flag)
break
return (tuple_tree, max_depth, connect_flag)
|
def extract_tuple_trees_from_mega_dag(mega_graph: nx.DiGraph, reactants_to_reactant_id: dict) -> typing.Tuple[(typing.List[typing.Tuple[(int, tuple)]], dict)]:
'\n :param mega_graph: The DAG which contains the whole reaction network.\n :param reactants_to_reactant_id: dictionary mapping from reactant SMILES string to integer ID\n :return: a list of extracted trees/DAGs leading to one particular product, table of details about them.\n '
reactant_set = set(reactants_to_reactant_id.keys())
interesting_possible_root_nodes = set()
for node in mega_graph:
if ((len(node) == 1) and (node[0] not in reactant_set)):
interesting_possible_root_nodes.add(node[0])
logger.info(f'Number of possible starting nodes: {len(interesting_possible_root_nodes)}')
depth_and_tree_tuples = []
number_dags_of_these = 0
max_depth = collections.Counter()
rng = np.random.RandomState(100)
for r_node in tqdm.tqdm(interesting_possible_root_nodes, desc='going through all possible final nodes.'):
(tuple_tree, depth, connect_flag) = _recursive_sample_from_dag_starting_at_node(rng, mega_graph, r_node, set(), set(), False, 0)
max_depth.update([depth])
number_dags_of_these += int(connect_flag)
depth_and_tree_tuples.append((depth, tuple_tree))
stats = ([['Number root nodes', len(interesting_possible_root_nodes)], ['Number which have repeated nodes', number_dags_of_these]] + [[f'Number that have {k} levels', v] for (k, v) in max_depth.items()])
logger.info(f'''Extracting final trees done:
{tabulate.tabulate(stats)}''')
return (depth_and_tree_tuples, dict(stats))
|
@dataclass(order=True, frozen=True)
class ScoredTupleTree():
tuple_tree: tuple = field(compare=False)
score_to_maximize: float
@property
def root_smi(self):
return self.tuple_tree[0]
|
@dataclass
class DogGenHillclimbingParams():
n_rounds: int = 30
n_samples_per_round: int = 7000
n_samples_to_keep_per_round: int = 1500
n_epochs_for_finetuning: int = 2
batch_size: int = 64
break_down_tuple_tree_into_parts: bool = False
sample_batch_size: int = 200
learning_rate: float = 0.001
clip_gradients: bool = True
|
@dataclass
class DogGenHillclimberParts():
model: doggen.DogGen
scorer: opt_utils.PropertyEvaluator
reactant_vocab_set: typing.Set[str]
rng: np.random.RandomState
dataloader_factory: typing.Callable
prepare_batch: typing.Callable
loss_fn: typing.Callable
device: typing.Union[(str, torch.device)]
|
class DogGenHillclimber():
def __init__(self, parts: DogGenHillclimberParts, params: DogGenHillclimbingParams):
self.parts = parts
self.hparams = params
self.optimizer = None
self._num_total_train_steps_for_hc = None
def run_hillclimbing(self, initial_tuple_trees, tb_logger: SummaryWriter):
'\n See Alg.2 of our paper.\n :param initial_tuple_trees: (NB all SMILES should be in canoncical form already.)\n :param tb_logger: Tensorboard logger\n '
seen_tts: typing.List[tuple] = self.filter_out_uninteresting_trees_and_clean(initial_tuple_trees, set())
sorted_tts: typing.List[ScoredTupleTree] = self.score_new_trees_and_sort(seen_tts, [])
self._report_best(sorted_tts, tb_logger, 0)
self.optimizer = optim.Adam(self.parts.model.parameters(), lr=self.hparams.learning_rate)
self._num_total_train_steps_for_hc = 0
print('## Sampling before tuning...')
sampled_dirty_tts = self.sample_from_model()
sampled_clean_tts = self.filter_out_uninteresting_trees_and_clean(sampled_dirty_tts, sorted_tts)
sorted_tts: typing.List[ScoredTupleTree] = self.score_new_trees_and_sort(sampled_clean_tts, sorted_tts)
self._report_best(sorted_tts, tb_logger, 0)
for round in range(self.hparams.n_rounds):
print(f'# Starting round {round}')
print('## Setting up new batch for training...')
new_batch_for_fine_tuning = [e.tuple_tree for e in sorted_tts[:self.hparams.n_samples_to_keep_per_round]]
print('## Starting dog_gen on new batch...')
self.train_one_round(new_batch_for_fine_tuning, tb_logger)
print('## Sampling...')
sampled_dirty_tts = self.sample_from_model()
sampled_clean_tts = self.filter_out_uninteresting_trees_and_clean(sampled_dirty_tts, sorted_tts)
sorted_tts: typing.List[ScoredTupleTree] = self.score_new_trees_and_sort(sampled_clean_tts, sorted_tts)
self._report_best(sorted_tts, tb_logger, round)
return sorted_tts
def train_one_round(self, tuple_trees_to_train_on: typing.List[tuple], tb_logger: SummaryWriter):
self.parts.model.train()
train_dataloader = self.parts.dataloader_factory(tuple_trees=tuple_trees_to_train_on, batch_size=self.hparams.batch_size)
for epoch in range(self.hparams.n_epochs_for_finetuning):
print(f'### Training epoch {epoch}')
loss = 0.0
for data in tqdm(train_dataloader, desc='training'):
self.optimizer.zero_grad()
batch = self.parts.prepare_batch(data, self.parts.device)
loss = self.parts.loss_fn(self.parts.model, *batch)
loss.backward()
tb_logger.add_scalar('train_one_round_loss', loss.item(), self._num_total_train_steps_for_hc)
if self.hparams.clip_gradients:
nn.utils.clip_grad_norm_(self.parts.model.parameters(), 1.0)
self.optimizer.step()
self._num_total_train_steps_for_hc += 1
print(f'loss, last batch: {loss.item()}')
@staticmethod
def _report_best(sorted_list: typing.List[ScoredTupleTree], tb_logger: SummaryWriter, step_num):
print(f'Step {step_num}, Best 3 TTs so far are {sorted_list[:3]}')
tb_logger.add_scalar('Best score So Far', sorted_list[0].score_to_maximize, global_step=step_num)
tb_logger.add_scalar('Second best score So Far', sorted_list[1].score_to_maximize, global_step=step_num)
tb_logger.add_scalar('Third best score So Far', sorted_list[2].score_to_maximize, global_step=step_num)
tb_logger.add_scalar('Mean of top 50 scores', np.mean([sorted_list[i].score_to_maximize for i in range(50)]), global_step=step_num)
def sample_from_model(self) -> typing.List[tuple]:
self.parts.model.eval()
out_list: typing.List[synthesis_trees.SynthesisTree] = []
for _ in tqdm(range(int(np.ceil((self.hparams.n_samples_per_round / self.hparams.sample_batch_size)))), desc='sampling from model'):
(syn_trees, _) = self.parts.model.sample(batch_size=self.hparams.sample_batch_size)
out_list.extend(syn_trees)
out_tts = [e.tuple_tree_repr() for e in out_list]
return out_tts
def score_new_trees_and_sort(self, new_tts: typing.List[tuple], existing_tree_scores: typing.List[ScoredTupleTree]) -> typing.List[ScoredTupleTree]:
existing_tree_scores = copy.copy(existing_tree_scores)
scores = self.parts.scorer.evaluate_molecules([e[0] for e in new_tts])
new_scored_tts = [ScoredTupleTree(tt, score) for (tt, score) in zip(new_tts, scores)]
existing_tree_scores.extend(new_scored_tts)
return sorted(existing_tree_scores, reverse=True)
def filter_out_uninteresting_trees_and_clean(self, list_of_tuple_trees: typing.List[tuple], seen_tt_scores: typing.Iterable[ScoredTupleTree]) -> typing.List[tuple]:
'\n Filters out tuple trees that have either been seen or lead to an item in the reactant set.\n Cleans tuple trees such that any unecessary parts (ie any parts that lead to a reactant and so are\n superfluous) are removed.\n '
out = []
invariant_seen_tts = set([synthesis_trees.SynthesisTree.make_tuple_tree_invariant(elem.tuple_tree) for elem in seen_tt_scores])
for tt in tqdm(list_of_tuple_trees, desc='cleaning trees'):
if (tt[0] in self.parts.reactant_vocab_set):
continue
invariant_rep = synthesis_trees.SynthesisTree.make_tuple_tree_invariant(tt)
if (invariant_rep in invariant_seen_tts):
continue
else:
invariant_seen_tts.add(invariant_rep)
clean_tt = synthesis_trees.SynthesisTree.clean_dirty_tuple_tree(tt, self.parts.reactant_vocab_set)
out.append(clean_tt)
return out
|
@dataclass
class DoggenTrainDetails():
starting_reactants: list
params: dict
|
def load_doggen_model(device, log_path_for_react_predict, *, weight_path=None, doggen_train_details: DoggenTrainDetails=None):
'\n This utility function loads the DoG-Gen model, setting up the reaction predictor, loggers etc.\n It can be called either at initial training time or after training with a weight path in which case it will obtain\n the required parameters from the checkpoint.\n See the load_doggae_model function which works similarly for the DoG-AE model.\n\n :param device: eg cpu or cuda\n :param log_path_for_react_predict: where to write out the reaction predictors log\n :param weight_path: if already have a trained version of the model give path here...\n :param doggen_train_details: ... or if not provide the parameter details/starting reactants to create a _new_ model.\n '
assert ((doggen_train_details is None) or (weight_path is None)), 'Should either create a new model or load an existing. Not both!'
if (weight_path is not None):
chkpt = torch.load(weight_path, device)
print(f'Loading an existing model from {weight_path}.')
_doggen_params = chkpt['model_params']
starting_reactants = list(chkpt['mol_to_graph_idx_for_reactants'].keys())
else:
print('Creating a new dog gen model')
chkpt = None
_doggen_params = doggen_train_details.params
print(_doggen_params)
starting_reactants = doggen_train_details.starting_reactants
collate_func = synthesis_trees.CollateWithLargestFirstReordering(starting_reactants)
mol_to_graph_idx_for_reactants = collate_func.base_mol_to_idx_dict.copy()
reactant_graphs = copy.copy(collate_func.reactant_graphs)
reactant_graphs.inplace_torch_to(device)
reactant_vocab = dog_decoder.DOGGenerator.ReactantVocab(reactant_graphs, mol_to_graph_idx_for_reactants)
smi2graph_func = (lambda smi: smiles_to_feats.DEFAULT_SMILES_FEATURIZER.smi_to_feats(smi))
reaction_predictor = reaction_predictors.OpenNMTServerPredictor()
log_hndlr = logging.FileHandler(log_path_for_react_predict)
log_hndlr.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log_hndlr.setFormatter(formatter)
reaction_predictor.logger.addHandler(log_hndlr)
reaction_predictor.logger.setLevel(logging.DEBUG)
reaction_predictor.logger.propagate = False
(model, hparams) = doggen.get_dog_gen(reaction_predictor, smi2graph_func, reactant_vocab, _doggen_params)
model = model.to(device)
if (chkpt is not None):
print('loading weights into model...')
model.load_state_dict(chkpt['model'])
other_parts = dict(log_hndlr=log_hndlr, hparams=hparams, chkpt=chkpt, mol_to_graph_idx_for_reactants=mol_to_graph_idx_for_reactants)
return (model, collate_func, other_parts)
|
class PropertyEvaluator():
'\n Wraps the property calculator so that we can memoize the oracle calls.\n Also can track the order that the molecules are queried in and number of (unique) molecules queried.\n '
def __init__(self, property_calculator, dim=1):
self.seen_molecules = collections.OrderedDict()
self.property_calculator = property_calculator
self.dim = dim
@property
def num_evaluated(self):
return len(self.seen_molecules)
@property
def best_seen(self):
seen_molecule_vals = list(self.seen_molecules.items())
return max(seen_molecule_vals, key=(lambda x: x[1]))
def evaluate_molecules(self, list_of_smiles: typing.List[str]):
out = []
for smi in list_of_smiles:
canon_smi = rdkit_general_ops.canconicalize(smi)
if (canon_smi not in self.seen_molecules):
value = self.property_calculator(canon_smi)
self.seen_molecules[canon_smi] = value
out.append(self.seen_molecules[canon_smi])
return np.array(out)
|
def qed(smi):
mol = rdkit_general_ops.get_molecule(smi, kekulize=False)
qed = QED.qed(mol)
return [qed]
|
def get_penalized_logp():
def reward_penalized_log_p_gcpn(smiles):
'\n Reward that consists of log p penalized by SA and # long cycles,\n as described in (Kusner et al. 2017). Scores are normalized based on the\n statistics of 250k_rndm_zinc_drugs_clean.smi dataset\n :param mol: rdkit mol object\n :return: float\n '
mol = Chem.MolFromSmiles(smiles)
logP_mean = 2.4570953396190123
logP_std = 1.434324401111988
SA_mean = (- 3.0525811293166134)
SA_std = 0.8335207024513095
cycle_mean = (- 0.0485696876403053)
cycle_std = 0.2860212110245455
log_p = MolLogP(mol)
SA = (- sascorer.calculateScore(mol))
cycle_list = nx.cycle_basis(nx.Graph(Chem.rdmolops.GetAdjacencyMatrix(mol)))
if (len(cycle_list) == 0):
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if (cycle_length <= 6):
cycle_length = 0
else:
cycle_length = (cycle_length - 6)
cycle_score = (- cycle_length)
normalized_log_p = ((log_p - logP_mean) / logP_std)
normalized_SA = ((SA - SA_mean) / SA_std)
normalized_cycle = ((cycle_score - cycle_mean) / cycle_std)
return [((normalized_log_p + normalized_SA) + normalized_cycle)]
return PropertyEvaluator(reward_penalized_log_p_gcpn)
|
class GuacTask(enum.Enum):
"\n We wrap the Guacamole tasks in this class (redefining several of them below) so that we can call the tasks\n individually rather than running a pre-built 'suite'\n "
ARIPIPRAZOLE = 'Aripiprazole_similarity'
OSIMERTINIB = 'Osimertinib_MPO'
RANOLAZINE = 'Ranolazine_MPO'
ZALEPLON = 'Zaleplon_MPO'
VALSARTAN = 'Valsartan_SMARTS'
DECO = 'decoration_hop'
SCAFFOLD = 'scaffold_hop'
PERINDOPRIL = 'Perindopril_MPO'
AMLODIPINE = 'Amlodipine_MPO'
SITAGLIPTIN = 'Sitagliptin_MPO'
CELECOXIB = 'Celecoxib_rediscovery'
TROGLITAZONE = 'Troglitazone_rediscovery'
THIOTHIXENE = 'Thiothixene_rediscovery'
ALBUTEROL = 'Albuterol_similarity'
MESTRANOL = 'Mestranol_similarity'
FEXOFENADINE = 'Fexofenadine_MPO'
@classmethod
def get_guac_property_eval(self, task):
if (task is GuacTask.CELECOXIB):
bench = standard_benchmarks.similarity(smiles='CC1=CC=C(C=C1)C1=CC(=NN1C1=CC=C(C=C1)S(N)(=O)=O)C(F)(F)F', name='Celecoxib', fp_type='ECFP4', threshold=1.0, rediscovery=True)
elif (task is GuacTask.TROGLITAZONE):
bench = standard_benchmarks.similarity(smiles='Cc1c(C)c2OC(C)(COc3ccc(CC4SC(=O)NC4=O)cc3)CCc2c(C)c1O', name='Troglitazone', fp_type='ECFP4', threshold=1.0, rediscovery=True)
elif (task is GuacTask.THIOTHIXENE):
bench = standard_benchmarks.similarity(smiles='CN(C)S(=O)(=O)c1ccc2Sc3ccccc3C(=CCCN4CCN(C)CC4)c2c1', name='Thiothixene', fp_type='ECFP4', threshold=1.0, rediscovery=True)
elif (task is GuacTask.ARIPIPRAZOLE):
bench = standard_benchmarks.similarity(smiles='Clc4cccc(N3CCN(CCCCOc2ccc1c(NC(=O)CC1)c2)CC3)c4Cl', name='Aripiprazole', fp_type='ECFP4', threshold=0.75)
elif (task is GuacTask.ALBUTEROL):
bench = standard_benchmarks.similarity(smiles='CC(C)(C)NCC(O)c1ccc(O)c(CO)c1', name='Albuterol', fp_type='FCFP4', threshold=0.75)
elif (task is GuacTask.MESTRANOL):
bench = standard_benchmarks.similarity(smiles='COc1ccc2[C@H]3CC[C@@]4(C)[C@@H](CC[C@@]4(O)C#C)[C@@H]3CCc2c1', name='Mestranol', fp_type='AP', threshold=0.75)
elif (task is GuacTask.OSIMERTINIB):
bench = standard_benchmarks.hard_osimertinib()
elif (task is GuacTask.RANOLAZINE):
bench = standard_benchmarks.ranolazine_mpo()
elif (task is GuacTask.ZALEPLON):
bench = standard_benchmarks.zaleplon_with_other_formula()
elif (task is GuacTask.VALSARTAN):
bench = standard_benchmarks.valsartan_smarts()
elif (task is GuacTask.DECO):
bench = standard_benchmarks.decoration_hop()
elif (task is GuacTask.SCAFFOLD):
bench = standard_benchmarks.scaffold_hop()
elif (task is GuacTask.PERINDOPRIL):
bench = standard_benchmarks.perindopril_rings()
elif (task is GuacTask.AMLODIPINE):
bench = standard_benchmarks.amlodipine_rings()
elif (task is GuacTask.SITAGLIPTIN):
bench = standard_benchmarks.sitagliptin_replacement()
elif (task is GuacTask.FEXOFENADINE):
bench = standard_benchmarks.hard_fexofenadine()
else:
raise NotImplementedError
smi2score = (lambda smi: [bench.objective.score(smi)])
return PropertyEvaluator(smi2score)
@classmethod
def get_name_to_enum(self) -> dict:
return {k.value: k for k in self}
|
def get_task(name_of_task: str):
"\n Given a task name (eg handed in as an argument to a script call) return the relevant PropertyEvaluator.\n See code for definition of class names. NB that Guacamol names are given by 'guac_<name>'\n "
if (name_of_task == 'qed'):
return PropertyEvaluator(qed)
elif (name_of_task == 'sas'):
return PropertyEvaluator((lambda smiles: [sascorer.calculateScore(Chem.MolFromSmiles(smiles))]))
elif (name_of_task == 'pen_logp'):
return PropertyEvaluator((lambda smiles: [MolLogP(Chem.MolFromSmiles(smiles))]))
elif (name_of_task[:5] == 'guac_'):
task = GuacTask.get_name_to_enum()[name_of_task[5:]]
return GuacTask.get_guac_property_eval(task)
else:
raise NotImplementedError(f'{name_of_task} is not implemented.')
|
def get_tb_writer(path):
global _tb_writer
if (path not in _tb_writer):
_tb_writer[path] = SummaryWriter(path)
def create_new_func(wrapped, instance):
@functools.wraps(wrapped)
def add_event(*args, **kwargs):
kwargs.update(zip(wrapped.__code__.co_varnames[1:], args))
step = kwargs.get('step', None)
if (step is None):
try:
step = _tb_writer[path].global_step
except AttributeError:
pass
kwargs['step'] = step
return wrapped(**kwargs)
return add_event
_tb_writer[path].file_writer.add_event = create_new_func(_tb_writer[path].file_writer.add_event, _tb_writer[path].file_writer)
return _tb_writer[path]
|
def load_tuple_trees(path_to_trees: str, rng: np.random.RandomState):
'\n :param path_to_trees: This will point to a pickle file containing a list of tuples. First element of tuple is depth,\n second is the tuple tree.\n '
with open(path_to_trees, 'rb') as fo:
data = pickle.load(fo)
def create_tuple_trees(elem):
(_, tuple_tree) = elem
return tuple_tree
syn_trees = [create_tuple_trees(el) for el in tqdm.tqdm(data, desc='Loading in tuple trees')]
return syn_trees
|
def load_reactant_vocab(path_to_json: str) -> typing.List[str]:
with open(path_to_json, 'r') as fo:
d = json.load(fo)
return sorted(list(d.keys()), key=(lambda x: d[x]))
|
class AverageMeter(object):
'\n Pytorch examples license.\n BSD 3-Clause License\n Copyright (c) 2017,\n All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n * Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n '
'Computes and stores the average and current value.\n taken from https://github.com/pytorch/examples/blob/master/imagenet/main.py, licence above'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
class AvgMeterHolder():
def __init__(self):
self.time_to_get_batch = AverageMeter()
self.time_to_forward = AverageMeter()
self.time_to_step = AverageMeter()
def reset(self):
for elem in self.__dict__.values():
if isinstance(elem, AverageMeter):
elem.reset()
def __str__(self):
table = [('Time to batch', self.time_to_get_batch.avg), ('Time to loss', self.time_to_forward.avg), ('Time for all update', self.time_to_step.avg)]
return str(tabulate.tabulate(table))
|
def create_unsupervised_trainer_timers(model, optimizer, loss_fn, device, prepare_batch, output_transform=(lambda x, loss: loss.item()), max_norm=np.inf):
'\n Factory function for creating a trainer for unsupervised models.\n Adapted from the regular one in Pytorch\n\n\n Note: `engine.state.output` for this engine is defined by `output_transform` parameter and is the loss\n of the processed batch by default.\n\n Returns:\n Engine: a trainer engine with unsupervised update function.\n '
if device:
model.to(device)
timings = AvgMeterHolder()
def _update(engine, batch):
s_time = time.time()
model.train()
optimizer.zero_grad()
x = prepare_batch(batch, device=device)
timings.time_to_get_batch.update((time.time() - s_time))
loss = loss_fn(model, x)
timings.time_to_forward.update((time.time() - s_time))
loss.backward()
if (max_norm != np.inf):
clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
timings.time_to_step.update((time.time() - s_time))
return output_transform(x, loss)
return (Engine(_update), timings)
|
def try_but_pass(fn, exception, print_flag: bool=True):
try:
out = fn()
except Exception as ex:
if print_flag:
print(ex)
out = None
return out
|
def retry_n_times(fn, n, exception=Exception, interval=0, on_exception=None, args=(), kwargs=None):
'\n\n '
if (kwargs is None):
kwargs = {}
for i in range(n):
if (i == (n - 1)):
return fn(*args, **kwargs)
try:
return fn(*args, **kwargs)
except exception as e:
if (interval > 0):
time.sleep(interval)
if on_exception:
on_exception(e)
|
def unpack_class_into_params_dict(params_in, prepender=''):
if (not isinstance(params_in, dict)):
params_in = vars(params_in)
out_dict = {}
def unpack(d_in, prepender_=''):
for (key, value) in d_in.items():
if isinstance(value, (int, float, str)):
out_dict[(prepender_ + str(key))] = value
elif isinstance(value, dict):
unpack(value, prepender_=f'{(prepender_ + str(key))}:')
elif isinstance(value, list):
out_dict[(prepender_ + str(key))] = str(value)
else:
pass
unpack(params_in, prepender)
return out_dict
|
def to_pickle(data, filepath):
with open(filepath, 'wb') as fo:
pickle.dump(data, fo)
|
def from_pickle(filepath):
with open(filepath, 'rb') as fo:
d = pickle.load(fo)
return d
|
def load_json(path):
with open(path, 'r') as fo:
d = json.load(fo)
return d
|
def get_repo_path():
return path.join(path.dirname(__file__), '../../')
|
def get_config() -> configparser.ConfigParser:
global _config
if (_config is None):
_config = configparser.ConfigParser()
_config.read(path.join(get_repo_path(), 'synthesis-dags-config.ini'))
return _config
|
def torch_device():
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
return device
|
def test_open_nmt_run_list_of_reactant_sets_output(monkeypatch):
def mock_post(*args, **kwargs):
class MockResponse():
@staticmethod
def json():
return [[{'n_best': 1, 'pred_score': (- 0.0002288818359375), 'src': 'C [S-] . [Mg+] c 1 c c c ( Cl ) c c 1', 'tgt': 'C S c 1 c c c ( Cl ) c c 1'}, {'n_best': 1, 'pred_score': (- 0.004589080810546875), 'src': 'C C O C ( = O ) C 1 C C N ( C ( = O ) O C ( C ) ( C ) C ) C C 1 . C C ( C ) ( C ) O C ( = O ) N 1 C C N C C 1', 'tgt': 'C C ( C ) ( C ) O C ( = O ) N 1 C C N ( C ( = O ) C 2 C C N ( C ( = O ) O C ( C ) ( C ) C ) C C 2 ) C C 1'}]]
def raise_for_status(self):
pass
return MockResponse()
monkeypatch.setattr(requests, 'post', mock_post)
nmt_pred = reaction_predictors.OpenNMTServerPredictor()
out = nmt_pred._run_list_of_reactant_sets([multiset.FrozenMultiset(['C[S-]', '[Mg+]c1ccc(Cl)cc1']), multiset.FrozenMultiset(['CCOC(=O)C1CCN(C(=O)OC(C)(C)C)CC1', 'CC(C)(C)OC(=O)N1CCNCC1'])])
assert (out[0] == ['CSc1ccc(Cl)cc1'])
assert (out[1] == ['CC(C)(C)OC(=O)N1CCC(C(=O)N2CCN(C(=O)OC(C)(C)C)CC2)CC1'])
|
def test_open_nmt_run_list_of_reactant_sets_input(monkeypatch):
def mock_post(add, data, **kwargs):
assert (data == '[{"src": "C [S-] . [Mg+] c 1 c c c ( Cl ) c c 1", "id": 0}, {"src": "C C O C ( = O ) C 1 C C N ( C ( = O ) O C ( C ) ( C ) C ) C C 1 . C C ( C ) ( C ) O C ( = O ) N 1 C C N C C 1", "id": 0}]')
class MockResponse():
@staticmethod
def json():
return [[{'n_best': 1, 'pred_score': (- 0.0002288818359375), 'src': 'C [S-] . [Mg+] c 1 c c c ( Cl ) c c 1', 'tgt': 'C S c 1 c c c ( Cl ) c c 1'}, {'n_best': 1, 'pred_score': (- 0.004589080810546875), 'src': 'C C O C ( = O ) C 1 C C N ( C ( = O ) O C ( C ) ( C ) C ) C C 1 . C C ( C ) ( C ) O C ( = O ) N 1 C C N C C 1', 'tgt': 'C C ( C ) ( C ) O C ( = O ) N 1 C C N ( C ( = O ) C 2 C C N ( C ( = O ) O C ( C ) ( C ) C ) C C 2 ) C C 1'}]]
def raise_for_status(self):
pass
return MockResponse()
monkeypatch.setattr(requests, 'post', mock_post)
nmt_pred = reaction_predictors.OpenNMTServerPredictor()
out = nmt_pred._run_list_of_reactant_sets([multiset.FrozenMultiset(['C[S-]', '[Mg+]c1ccc(Cl)cc1']), multiset.FrozenMultiset(['CCOC(=O)C1CCN(C(=O)OC(C)(C)C)CC1', 'CC(C)(C)OC(=O)N1CCNCC1'])])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.