File size: 6,844 Bytes
bf004e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
"""
pull_structures.py
----------------------------
Pulls all glycan structure files from GlycoShape API and uploads
to HuggingFace as a Parquet dataset.

Requirements:
    pip install requests huggingface_hub pyarrow tqdm

Usage:
    python pull_structures.py --repo your-username/GlycoShape
"""

import argparse, io, json, os, time, zipfile, requests
from pathlib import Path
import pyarrow as pa
import pyarrow.parquet as pq
from tqdm import tqdm
from huggingface_hub import HfApi

# ── Config ─────────────────────────────────────────────────────────────────────
BASE_URL   = "https://glycoshape.org/api"
BATCH_SIZE = 50        # write to parquet every N glycans (memory control)
SLEEP_SEC  = 0.3       # polite delay between requests
TIMEOUT    = 30        # seconds per request


# ── Fetch the full list of GlyTouCan IDs ───────────────────────────────
def fetch_available_ids() -> list[str]:
    print("Fetching GlyTouCan ID list from /api/available …")
    r = requests.get(f"{BASE_URL}/available", timeout=TIMEOUT)
    r.raise_for_status()
    ids = r.json()
    print(f"  → {len(ids)} glycans found")
    return ids


# ── Download ZIP and extract PDB files for a glycan ──────────────────
def download_structures(glycan_id: str) -> list[dict]:
    """
    Returns a list of records, one per PDB file inside the ZIP:
    {
        glytoucan_id : str   – GlyTouCan accession (e.g. "G00028MO")
        filename     : str   – original filename inside the ZIP
        file_type    : str   – extension: "pdb", "mol2", etc.
        cluster      : str   – cluster label parsed from filename (if present)
        pdb_content  : str   – full text content of the file
    }
    """
    url = f"{BASE_URL}/download/{glycan_id}"
    try:
        r = requests.get(url, timeout=60)
        r.raise_for_status()
    except requests.HTTPError as e:
        print(f"  HTTP error for {glycan_id}: {e}")
        return []
    except requests.RequestException as e:
        print(f"  Network error for {glycan_id}: {e}")
        return []

    records = []
    try:
        with zipfile.ZipFile(io.BytesIO(r.content)) as zf:
            for name in zf.namelist():
                ext = Path(name).suffix.lstrip(".").lower()
                if ext not in {"pdb", "mol2", "cif", "xyz"}:
                    continue  # skip non-structure files (logs, etc.)

                raw = zf.read(name)
                try:
                    content = raw.decode("utf-8")
                except UnicodeDecodeError:
                    content = raw.decode("latin-1")

                # Parse cluster label from filename, e.g. "cluster_1.pdb" → "1"
                stem = Path(name).stem
                cluster = ""
                for part in stem.replace("-", "_").split("_"):
                    if part.isdigit():
                        cluster = part
                        break

                records.append({
                    "glytoucan_id": glycan_id,
                    "filename":     name,
                    "file_type":    ext,
                    "cluster":      cluster,
                    "pdb_content":  content,
                })
    except zipfile.BadZipFile:
        print(f"  Bad ZIP for {glycan_id}")

    return records


# ── Write a batch of records to a Parquet shard ─────────────────────────
def write_parquet(records: list[dict], out_dir: Path, shard_idx: int) -> Path:
    schema = pa.schema([
        pa.field("glytoucan_id", pa.string()),
        pa.field("filename",     pa.string()),
        pa.field("file_type",    pa.string()),
        pa.field("cluster",      pa.string()),
        pa.field("pdb_content",  pa.large_string()),
    ])
    table    = pa.Table.from_pylist(records, schema=schema)
    out_path = out_dir / f"shard-{shard_idx:05d}.parquet"
    pq.write_table(table, out_path, compression="zstd")
    print(f"  → wrote {out_path.name}  ({len(records)} structure files)")
    return out_path

# ── Main ───────────────────────────────────────────────────────────────────────
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--out-dir",      default="../data/structure_parquets/", help="Output dir for parquet shards (default: ../data/structure_parquets/)")
    parser.add_argument("--limit",        type=int, default=None,                help="Only process first N glycans (for testing)")
    parser.add_argument("--retry-failed", action="store_true",                   help="Retry only the IDs listed in failed_ids.json")
    args = parser.parse_args()

    out_dir = Path(args.out_dir)
    out_dir.mkdir(parents=True, exist_ok=True)

    # 1. Get IDs — either from failed_ids.json or the full API list
    if args.retry_failed:
        fail_path = out_dir / "failed_ids.json"
        if not fail_path.exists():
            print(f"No failed_ids.json found in {out_dir}")
            return
        all_ids = json.loads(fail_path.read_text())
        print(f"Retrying {len(all_ids)} previously failed glycans …")
    else:
        all_ids = fetch_available_ids()
        if args.limit:
            all_ids = all_ids[: args.limit]
            print(f"  (limited to first {args.limit} for testing)")

    # 2. Download + write in batches
    # Use a high shard_idx start when retrying so we don't overwrite existing shards
    batch:     list[dict] = []
    shard_idx: int        = len(list(out_dir.glob("shard-*.parquet")))
    failed:    list[str]  = []

    for glycan_id in tqdm(all_ids, desc="Downloading"):
        records = download_structures(glycan_id)
        if records:
            batch.extend(records)
        else:
            failed.append(glycan_id)

        if len(batch) >= BATCH_SIZE * 10:
            write_parquet(batch, out_dir, shard_idx)
            shard_idx += 1
            batch = []

        time.sleep(SLEEP_SEC)

    # flush remainder
    if batch:
        write_parquet(batch, out_dir, shard_idx)

    if failed:
        fail_path = out_dir / "failed_ids.json"
        fail_path.write_text(json.dumps(failed, indent=2))
        print(f"\n⚠ {len(failed)} glycans failed — saved to {fail_path}")
    elif args.retry_failed:
        # clean up failed_ids.json if everything succeeded this time
        (out_dir / "failed_ids.json").unlink(missing_ok=True)
        print("\n✓ All previously failed glycans downloaded successfully")


if __name__ == "__main__":
    main()