File size: 6,623 Bytes
ae9909c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
"""
Build MolEdit dataset v2 — balanced, 20 samples per op type (13 types = 260 total).

Changes from v1:
  - target_per_op is a fixed CLI arg (default 20), not derived from n_samples
  - Uses a much larger candidate pool to handle rare op types
  - Multi-pass: exhausts full pool before giving up on any op type
  - Saves to a new output directory (default: ./data/pubchem_dataset_v2)

Usage:
    python build_dataset_v2.py \
        --input  /home/dataset-assist-0/usr/lh/mzm/data/pubchem/CID-SMILES.gz.1 \
        --output ./data/pubchem_dataset_v2 \
        --target_per_op 20 \
        --pool   20000 \
        --seed   42
"""

import os, sys, json, random, argparse, subprocess, signal
sys.path.insert(0, os.path.dirname(__file__))

from rdkit import Chem
from rdkit.Chem import Draw, AllChem
from rdkit import RDLogger
RDLogger.DisableLog("rdApp.*")

from mol_corrupt import corrupt_molecule, CORRUPTION_TYPES


# ---------------------------------------------------------------------------
# Molecule filtering
# ---------------------------------------------------------------------------

def is_valid(mol, smiles):
    if mol is None or "." in smiles:
        return False
    n = mol.GetNumAtoms()
    if n < 5 or n > 60:
        return False
    if not any(a.GetAtomicNum() == 6 for a in mol.GetAtoms()):
        return False
    return True


# ---------------------------------------------------------------------------
# Streaming sample from gzip file
# ---------------------------------------------------------------------------

def stream_sample(filepath, pool_size, seed=42):
    rng = random.Random(seed)
    pool = []
    proc = subprocess.Popen(
        ["zcat", filepath],
        stdout=subprocess.PIPE,
        stderr=subprocess.DEVNULL,
        preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL),
    )
    for raw in proc.stdout:
        line = raw.decode("ascii", errors="ignore").strip()
        parts = line.split("\t")
        if len(parts) >= 2 and parts[0].isdigit():
            pool.append((parts[0], parts[1]))
        if len(pool) >= pool_size:
            break
    proc.kill(); proc.wait()
    rng.shuffle(pool)
    return pool


def render(smiles, path, size=(400, 300)):
    mol = Chem.MolFromSmiles(smiles)
    if mol is None:
        return False
    AllChem.Compute2DCoords(mol)
    Draw.MolToFile(mol, path, size=size)
    return True


# ---------------------------------------------------------------------------
# Main builder
# ---------------------------------------------------------------------------

def build(input_path, output_dir, target_per_op=20, pool_size=20000, seed=42):
    img_dir = os.path.join(output_dir, "images")
    os.makedirs(img_dir, exist_ok=True)

    print(f"Sampling {pool_size} candidates from {input_path} ...")
    raw_pool = stream_sample(input_path, pool_size, seed=seed)
    print(f"  Got {len(raw_pool)} candidates, filtering ...")

    valid = []
    for cid, smi in raw_pool:
        mol = Chem.MolFromSmiles(smi)
        if is_valid(mol, smi):
            valid.append((cid, Chem.MolToSmiles(mol), mol))
    print(f"  {len(valid)} molecules passed filtering")

    collected = {op: 0 for op in CORRUPTION_TYPES}
    records = []
    idx = 0
    skipped = 0

    # Pass through the pool repeatedly until all op types hit target
    # or we've done max_passes full sweeps (to avoid infinite loop)
    max_passes = 5
    for pass_num in range(1, max_passes + 1):
        remaining_ops = [op for op in CORRUPTION_TYPES if collected[op] < target_per_op]
        if not remaining_ops:
            break
        print(f"\nPass {pass_num}: {len(remaining_ops)} op types still need samples")

        random.seed(seed + pass_num)
        random.shuffle(valid)

        for cid, canonical, mol in valid:
            if not remaining_ops:
                break

            for op_type in list(remaining_ops):
                if collected[op_type] >= target_per_op:
                    remaining_ops.remove(op_type)
                    continue

                result = corrupt_molecule(canonical, op_type=op_type, max_retries=15)
                if result is None:
                    skipped += 1
                    continue

                wrong_smi, wrong_smi_mapped, correction = result
                item_id = f"{idx:06d}"
                img_rel = os.path.join("images", f"{item_id}.png")
                abs_img = os.path.join(output_dir, img_rel)

                if not render(canonical, abs_img):
                    skipped += 1
                    continue

                records.append({
                    "id":                  item_id,
                    "cid":                 cid,
                    "correct_smiles":      canonical,
                    "wrong_smiles":        wrong_smi,
                    "wrong_smiles_mapped": wrong_smi_mapped,
                    "image_path":          img_rel,
                    "operation":           correction,
                })
                collected[op_type] += 1
                idx += 1

                if idx % 50 == 0:
                    print(f"  {idx} samples collected ...")

        remaining_ops = [op for op in CORRUPTION_TYPES if collected[op] < target_per_op]

    print("\nOp type coverage:")
    for op in CORRUPTION_TYPES:
        mark = "✓" if collected[op] >= target_per_op else f"✗ (only {collected[op]})"
        print(f"  {op:<35} {collected[op]:>4}  {mark}")

    json_path = os.path.join(output_dir, "dataset.json")
    with open(json_path, "w") as f:
        json.dump(records, f, indent=2, ensure_ascii=False)

    print(f"\nDone: {len(records)} samples → {json_path}  (skipped {skipped})")
    return records


# ---------------------------------------------------------------------------
# CLI
# ---------------------------------------------------------------------------

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--input",         default="/home/dataset-assist-0/usr/lh/mzm/data/pubchem/CID-SMILES.gz.1")
    parser.add_argument("--output",        default="./data/pubchem_dataset_v2")
    parser.add_argument("--target_per_op", type=int, default=20,    help="Samples per op type")
    parser.add_argument("--pool",          type=int, default=20000,  help="Candidate pool size from input file")
    parser.add_argument("--seed",          type=int, default=42)
    args = parser.parse_args()

    build(
        input_path=args.input,
        output_dir=args.output,
        target_per_op=args.target_per_op,
        pool_size=args.pool,
        seed=args.seed,
    )