Hani Park commited on
Commit
cea0baa
·
1 Parent(s): d7d2d06

Remove old files

Browse files
CHAFF_processing_scripts/st1_download_pubchem.py DELETED
@@ -1,99 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- Download canonical SMILES and assay outcome for all compounds in a PubChem BioAssay (AID).
4
- Uses ListKey interface to handle large assays (>10,000 compounds).
5
-
6
- Usage:
7
- python download_pubchem.py 584 --output smiles_with_outcome.csv
8
- """
9
-
10
- import argparse
11
- import csv
12
- import sys
13
- import time
14
- import requests
15
- from typing import Dict, Tuple
16
- from io import StringIO
17
-
18
- PUG_BASE = "https://pubchem.ncbi.nlm.nih.gov/rest/pug"
19
-
20
- def get_listkey_for_aid(aid: str) -> Tuple[str, int]:
21
- """Get ListKey and size for a given AID (used to retrieve large CID sets)."""
22
- url = f"{PUG_BASE}/assay/aid/{aid}/cids/JSON?list_return=listkey"
23
- try:
24
- resp = requests.get(url, timeout=20)
25
- resp.raise_for_status()
26
- data = resp.json()["IdentifierList"]
27
- listkey = data["ListKey"]
28
- size = data["Size"]
29
- print(f"Retrieved ListKey: {listkey} with {size} CIDs.")
30
- return listkey, size
31
- except Exception as e:
32
- print(f"Error retrieving ListKey for AID {aid}: {e}", file=sys.stderr)
33
- sys.exit(1)
34
-
35
- def fetch_smiles_from_listkey(listkey: str) -> Dict[int, str]:
36
- """Fetch SMILES in batches using ListKey."""
37
- smiles_dict = {}
38
- url = f"{PUG_BASE}/compound/listkey/{listkey}/property/SMILES/CSV"
39
- try:
40
- resp = requests.get(url, timeout=30)
41
- resp.raise_for_status()
42
- reader = csv.DictReader(StringIO(resp.text))
43
- count = 0
44
- for row in reader:
45
- cid = int(row["CID"])
46
- smiles = row["SMILES"]
47
- smiles_dict[cid] = smiles
48
- count += 1
49
- print(f"Fetched {count} SMILES")
50
- except Exception as e:
51
- print(f"Error fetching SMILES: {e}", file=sys.stderr)
52
- return smiles_dict
53
-
54
- def fetch_outcomes_from_listkey(aid: int, listkey: str, size: int, batch_size: int, delay: float) -> Dict[int, str]:
55
- """Fetch outcomes in batches using ListKey."""
56
- outcomes = {}
57
- for start in range(0, size, batch_size):
58
- url = f"{PUG_BASE}/assay/aid/{aid}/CSV?cid=listkey&listkey={listkey}&listkey_start={start}&listkey_count={batch_size}"
59
- try:
60
- resp = requests.get(url, timeout=30)
61
- resp.raise_for_status()
62
- reader = csv.DictReader(StringIO(resp.text))
63
- for row in reader:
64
- if "PUBCHEM_CID" in row and row["PUBCHEM_CID"].isdigit():
65
- cid = int(row["PUBCHEM_CID"])
66
- outcome = row.get("PUBCHEM_ACTIVITY_OUTCOME", "Unavailable")
67
- outcomes[cid] = outcome
68
- print(f"Fetched outcomes for {len(outcomes)} compounds.")
69
- time.sleep(delay)
70
- except Exception as e:
71
- print(f"Error fetching assay outcomes: {e}", file=sys.stderr)
72
- sys.exit(1)
73
- return outcomes
74
-
75
- def write_to_csv(smiles_dict: Dict[int, str], outcomes: Dict[int, str], output_file: str):
76
- """Write combined CID, SMILES, and outcome to CSV."""
77
- with open(output_file, "w", newline='') as f:
78
- writer = csv.writer(f)
79
- writer.writerow(["CID", "CanonicalSMILES", "AssayOutcome"])
80
- for cid, smiles in smiles_dict.items():
81
- outcome = outcomes.get(cid, "Unavailable")
82
- writer.writerow([cid, smiles, outcome])
83
- print(f"Saved {len(smiles_dict)} entries to {output_file}")
84
-
85
- def main():
86
- parser = argparse.ArgumentParser(description="Download SMILES and outcome using ListKey for a PubChem assay")
87
- parser.add_argument("aid", help="PubChem Assay ID (AID)")
88
- parser.add_argument("--output", "-o", default="smiles_with_outcome.csv", help="Output CSV file")
89
- parser.add_argument("--batch-size", type=int, default=10000, help="Batch size for ListKey download")
90
- parser.add_argument("--delay", type=float, default=0.5, help="Delay between batches (in seconds)")
91
- args = parser.parse_args()
92
-
93
- listkey, size = get_listkey_for_aid(args.aid)
94
- smiles_dict = fetch_smiles_from_listkey(listkey)
95
- outcomes = fetch_outcomes_from_listkey(args.aid, listkey, size, batch_size=args.batch_size, delay=args.delay)
96
- write_to_csv(smiles_dict, outcomes, args.output)
97
-
98
- if __name__ == "__main__":
99
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
CHAFF_processing_scripts/st2_run_download_pubchem.py DELETED
@@ -1,20 +0,0 @@
1
- import os
2
- import subprocess
3
-
4
- aid_list = [
5
- 632, 1641, 1730, 1857, 1926, 435026, 504689, 720541, 1159604,
6
- 587, 588, 589, 590, 591, 592, 593, 594, 709, 923, 1480, 1483, 1696, 1775, 1776, 2124, 2757,
7
- 588517, 588620, 624483, 720675, 720678, 720680, 720681, 720682, 720686, 720687,
8
- 584, 585, 1476, 1478, 485294, 485341, # Detergents
9
- 411, 1006, 1269, 1379, 1891, 2515, 2530, 366887, 366889, 366891, 488838, 493175,
10
- 588342, 588498, 602357, 602358, 602364, 602474, 602475, 602476, 602477,
11
- 624030, 652016, 720522, 720835, 1224835, 1347047,
12
- 672, 682, 936,
13
- 878, 888, 929, 1234
14
- ]
15
-
16
- for aid in aid_list:
17
- output_file = f"./raw/pubchem_aid_{aid}.csv"
18
- command = ["python", "st1_download_pubchem.py", str(aid), "--output", output_file]
19
- print(f"Running: {' '.join(command)}")
20
- subprocess.run(command)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
CHAFF_processing_scripts/st3_extract_active_compounds.py DELETED
@@ -1,25 +0,0 @@
1
- import os
2
- import pandas as pd
3
- import glob
4
-
5
- file_path = "./raw/*.csv"
6
-
7
- # Extract active compounds from the datasets
8
- for file in glob.glob(file_path):
9
- name = os.path.splitext(os.path.basename(file))[0]
10
- output_path = f"./active/{name}_active.csv"
11
-
12
- if os.path.exists(output_path):
13
- print(f"Skipping {name}, already exists.")
14
- continue
15
-
16
- raw_df = pd.read_csv(file)
17
- active_df = raw_df[raw_df['AssayOutcome'] == 'Active']
18
-
19
- # Save new CSV file
20
- active_df.to_csv(output_path, index=False)
21
- print(f"Saved {len(active_df)} active compounds out of {len(raw_df)} to {output_path}")
22
-
23
-
24
-
25
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
CHAFF_processing_scripts/st4_smiles_curation.py DELETED
@@ -1,91 +0,0 @@
1
- import os
2
- import pandas as pd
3
- import rdkit
4
- import molvs
5
- import tqdm
6
- import glob
7
- from rdkit import Chem
8
-
9
- standardizer = molvs.Standardizer()
10
- fragment_remover = molvs.fragment.FragmentRemover()
11
-
12
-
13
- excluded_aids = {"584", "585", "1478", "1476", "485294", "485341"} # with/without detergents
14
-
15
- file_path = "./active/*.csv"
16
-
17
- for file in glob.glob(file_path):
18
- file_name = os.path.splitext(os.path.basename(file))[0]
19
- aid = file_name.replace("pubchem_aid_", "").replace("_active", "")
20
-
21
- if aid in excluded_aids:
22
- print(f"Skipping {aid} (excluded).")
23
- continue
24
-
25
- output_path = f"./curated/{file_name}_curated.csv"
26
- if os.path.exists(output_path):
27
- print(f"Skipping {file_name}, already exists.")
28
- continue
29
-
30
- active_df = pd.read_csv(file)
31
- smiles_series = active_df["CanonicalSMILES"]
32
- active_df["curated_SMILES"] = None
33
- cid = active_df["CID"]
34
-
35
- # --------- SMILES sanitization ---------
36
- valid_indices = []
37
- invalid_smiles = []
38
- warning_smiles = []
39
-
40
- for idx, smiles in smiles_series.items():
41
- mol = Chem.MolFromSmiles(smiles)
42
- compound_cid = cid.iloc[idx]
43
-
44
- if mol is None:
45
- invalid_smiles.append({
46
- 'CID': compound_cid,
47
- 'SMILES': smiles,
48
- 'Reason': "MolFromSmiles returned None"
49
- })
50
- continue
51
-
52
- results = molvs.validate_smiles(smiles)
53
-
54
- if len(results) > 0:
55
- warning_smiles.append({
56
- 'CID': compound_cid,
57
- 'SMILES': smiles,
58
- 'Reason': results
59
- })
60
- continue
61
-
62
- mol = standardizer.standardize(mol)
63
- mol = fragment_remover.remove(mol)
64
- standardized = Chem.MolToSmiles(mol)
65
-
66
- active_df.at[idx, "curated_SMILES"] = standardized
67
- valid_indices.append(idx)
68
-
69
- # Save valid entries
70
- valid_df = active_df.loc[valid_indices].reset_index(drop=True)
71
- valid_df = valid_df.drop(columns=["CanonicalSMILES"])
72
- valid_df = valid_df.rename(columns={"curated_SMILES": "SMILES"})
73
-
74
- # Create DataFrames for invalid and warning
75
- invalid_df = pd.DataFrame(invalid_smiles)
76
- warning_df = pd.DataFrame(warning_smiles)
77
-
78
- # Add AID column to dataframes
79
- valid_df.insert(0, "AID", aid)
80
- invalid_df.insert(0, "AID", aid)
81
- warning_df.insert(0, "AID", aid)
82
-
83
- # Save csv files
84
- valid_df.to_csv(f'./curated/{file_name}_curated.csv', index=False)
85
- invalid_df.to_csv(f'./curated/{file_name}_invalid_smiles.csv', index=False)
86
- warning_df.to_csv(f'./curated/{file_name}_molvs_validation.csv', index=False)
87
-
88
- print(f"Number of compounds in {file_name}:", len(active_df))
89
- print(f"Number of invalid smiles in {file_name}: {len(invalid_df)}")
90
- print(f"Number of warning smiles in {file_name}: {len(warning_df)}\n")
91
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
CHAFF_processing_scripts/st5_detergent_smiles_curation.py DELETED
@@ -1,121 +0,0 @@
1
- import os
2
- import pandas as pd
3
- import rdkit
4
- import molvs
5
- import tqdm
6
- import glob
7
- from rdkit import Chem
8
-
9
- standardizer = molvs.Standardizer()
10
- fragment_remover = molvs.fragment.FragmentRemover()
11
-
12
-
13
- # AID mapping {without : with detergent}
14
- filter_map = {
15
- "585": "584",
16
- "1476": "1478",
17
- "485341": "485294"
18
- }
19
-
20
- # AID -> CID set maaping
21
- cid_sets = {}
22
-
23
- # Gather CIDs
24
- for target_aid in filter_map.values():
25
- file_name = f"pubchem_aid_{target_aid}_active.csv"
26
- file_full_path = os.path.join("./active", file_name)
27
-
28
- if os.path.exists(file_full_path):
29
- df = pd.read_csv(file_full_path)
30
- cid_sets[target_aid] = set(df["CID"].tolist())
31
- else:
32
- print(f"Warning: file for AID {target_aid} not found!")
33
-
34
- # Fliter
35
- for target_aid, filter_aid in filter_map.items():
36
- file_name = f"pubchem_aid_{target_aid}_active.csv"
37
- file_full_path = os.path.join("./active", file_name)
38
-
39
- if not os.path.exists(file_full_path):
40
- print(f"Skipping {target_aid}, file not found.")
41
- continue
42
-
43
- df = pd.read_csv(file_full_path)
44
-
45
- before = len(df)
46
- df = df[~df["CID"].isin(cid_sets[filter_aid])]
47
- after = len(df)
48
-
49
- output_path = f"./active/pubchem_aid_{target_aid}_active_filtered.csv"
50
- df.to_csv(output_path, index=False)
51
-
52
- print(f"{target_aid}: removed {before - after} compounds from {before}, saved to {output_path}")
53
-
54
-
55
- filtered_files = [
56
- "pubchem_aid_585_active_filtered.csv",
57
- "pubchem_aid_1476_active_filtered.csv",
58
- "pubchem_aid_485341_active_filtered.csv"
59
- ]
60
-
61
-
62
- for file in filtered_files:
63
- file_path = os.path.join("./active", file)
64
- file_name = os.path.splitext(file)[0]
65
-
66
- # Read file
67
- active_df = pd.read_csv(file_path)
68
- smiles_series = active_df["CanonicalSMILES"]
69
- active_df["curated_SMILES"] = None
70
- cid = active_df["CID"]
71
-
72
- valid_indices = []
73
- invalid_smiles = []
74
- warning_smiles = []
75
-
76
- for idx, smiles in smiles_series.items():
77
- mol = Chem.MolFromSmiles(smiles)
78
- compound_cid = cid.iloc[idx]
79
-
80
- if mol is None:
81
- invalid_smiles.append({
82
- 'CID': compound_cid,
83
- 'SMILES': smiles,
84
- 'Reason': "MolFromSmiles returned None"
85
- })
86
- continue
87
-
88
- results = molvs.validate_smiles(smiles)
89
- if len(results) > 0:
90
- warning_smiles.append({
91
- 'CID': compound_cid,
92
- 'SMILES': smiles,
93
- 'Reason': results
94
- })
95
- continue
96
-
97
- mol = standardizer.standardize(mol)
98
- mol = fragment_remover.remove(mol)
99
- standardized = Chem.MolToSmiles(mol)
100
-
101
- active_df.at[idx, "curated_SMILES"] = standardized
102
- valid_indices.append(idx)
103
-
104
- # Save outputs
105
- valid_df = active_df.loc[valid_indices].reset_index(drop=True)
106
- valid_df = valid_df.drop(columns=["CanonicalSMILES"])
107
- valid_df = valid_df.rename(columns={"curated_SMILES": "SMILES"})
108
- aid = file_name.replace("pubchem_aid_", "").replace("_active_filtered", "")
109
- valid_df.insert(0, "AID", aid)
110
-
111
- invalid_df = pd.DataFrame(invalid_smiles)
112
- warning_df = pd.DataFrame(warning_smiles)
113
- invalid_df.insert(0, "AID", aid)
114
- warning_df.insert(0, "AID", aid)
115
-
116
- valid_df.to_csv(f'./curated/{file_name}_curated.csv', index=False)
117
- invalid_df.to_csv(f'./curated/{file_name}_invalid_smiles.csv', index=False)
118
- warning_df.to_csv(f'./curated/{file_name}_molvs_validation.csv', index=False)
119
-
120
- print(f"Finished curation for {file_name}")
121
- print(f" Valid: {len(valid_df)}, Invalid: {len(invalid_df)}, Warnings: {len(warning_df)}\n")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
prepare_hf_upload.py DELETED
@@ -1,20 +0,0 @@
1
- import os
2
- import shutil
3
-
4
- # Path
5
- csv_source_folder = "./HuggingFaceFinal"
6
- hf_upload_folder = "./HuggingFaceUpload"
7
- data_folder = os.path.join(hf_upload_folder, "data")
8
-
9
- os.makedirs(data_folder, exist_ok=True)
10
-
11
- # Curated CSV files
12
- csv_files = []
13
- for file in os.listdir(csv_source_folder):
14
- if file.endswith(".csv"):
15
- src = os.path.join(csv_source_folder, file)
16
- dst = os.path.join(data_folder, file)
17
- shutil.copy(src, dst)
18
- csv_files.append(file)
19
-
20
- print(f"Number of copied CSV files: {len(csv_files)}")