import rdkit import pandas as pd import molvs from rdkit import Chem from concurrent.futures import ProcessPoolExecutor from tqdm import tqdm import pyarrow as pa import pyarrow.parquet as pq import yaml with open("parameters.yaml") as parameters_file: parameters = yaml.safe_load(parameters_file) data = pd.read_csv( filepath_or_buffer = "data/ro4/a2a.ro4.tsv.gz", sep = "\t", compression='gzip', header=None, names=['smiles','id','value']) # Convert the 'value' column to float, coercing errors to NaN data['value'] = pd.to_numeric(data['value'], errors='coerce') standardizer = molvs.Standardizer() fragment_remover = molvs.fragment.FragmentRemover() def sanitize_smiles(smiles_raw): try: mol = rdkit.Chem.MolFromSmiles(smiles_raw) mol = standardizer.standardize(mol) mol = fragment_remover.remove(mol) smiles = rdkit.Chem.MolToSmiles(mol) return smiles except: return None #print(data.dtypes) #print(data.info()) #types_in_value = set(type(x) for x in data['value']) #print(types_in_value) #num_str = data['value'].apply(lambda x: isinstance(x, str)).sum() #print(f"Number of string entries in 'value': {num_str}") def parallel_sanitize(smiles_list, n_jobs=10): with ProcessPoolExecutor(max_workers=n_jobs) as executor: sanitized = list(tqdm(executor.map(sanitize_smiles, smiles_list), total=len(smiles_list))) return sanitized data['clean_smiles'] = parallel_sanitize(data['smiles'].tolist(), n_jobs=10) # Drop failed rows (where sanitization failed) data = data[data['clean_smiles'].notnull()].copy() output_path = f"product/a2a_ro4_sanitized_{parameters['date_code']}.parquet" table = pa.Table.from_pandas(data[['clean_smiles', 'id', 'value']]) pq.write_table(table, output_path, compression='snappy') #data['smiles'] = data['smiles'].apply(sanitize_smiles) #data.to_csv( # path_or_buf = f"product/d2_ro4_sanitized_{parameters['date_code']}.tsv", # sep = "\t", # index = False)