File size: 1,988 Bytes
73a57ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62

import rdkit
import pandas as pd
import molvs
from rdkit import Chem
from concurrent.futures import ProcessPoolExecutor
from tqdm import tqdm
import pyarrow as pa
import pyarrow.parquet as pq
import yaml

with open("parameters.yaml") as parameters_file:
    parameters = yaml.safe_load(parameters_file)

data = pd.read_csv(
    filepath_or_buffer = "data/ro4/a2a.ro4.tsv.gz", sep = "\t", compression='gzip', header=None, names=['smiles','id','value'])

# Convert the 'value' column to float, coercing errors to NaN
data['value'] = pd.to_numeric(data['value'], errors='coerce')

standardizer = molvs.Standardizer()
fragment_remover = molvs.fragment.FragmentRemover()

def sanitize_smiles(smiles_raw):
    try:
        mol = rdkit.Chem.MolFromSmiles(smiles_raw)
        mol = standardizer.standardize(mol)
        mol = fragment_remover.remove(mol)
        smiles = rdkit.Chem.MolToSmiles(mol)
        return smiles
    except:
        return None

#print(data.dtypes)
#print(data.info())
#types_in_value = set(type(x) for x in data['value'])
#print(types_in_value)
#num_str = data['value'].apply(lambda x: isinstance(x, str)).sum()
#print(f"Number of string entries in 'value': {num_str}")

def parallel_sanitize(smiles_list, n_jobs=10):
    with ProcessPoolExecutor(max_workers=n_jobs) as executor:
        sanitized = list(tqdm(executor.map(sanitize_smiles, smiles_list), total=len(smiles_list)))
    return sanitized
data['clean_smiles'] = parallel_sanitize(data['smiles'].tolist(), n_jobs=10)

# Drop failed rows (where sanitization failed)
data = data[data['clean_smiles'].notnull()].copy()

output_path = f"product/a2a_ro4_sanitized_{parameters['date_code']}.parquet"
table = pa.Table.from_pandas(data[['clean_smiles', 'id', 'value']])
pq.write_table(table, output_path, compression='snappy')


#data['smiles'] = data['smiles'].apply(sanitize_smiles)

#data.to_csv(
 #   path_or_buf = f"product/d2_ro4_sanitized_{parameters['date_code']}.tsv",
  #  sep = "\t",
   # index = False)