File size: 8,209 Bytes
f8a4853 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 80_20_proportion.py stratified 80/20 split with Bacteria filter & grouped by base accession (GCA/GCF).
# Input: --ndjson (NDJSON), --supp1 (optional Supp1.csv filter: domain B by assembly), --supp2 (Supp2.xlsx for labels)
# Output: subsetXX/train.jsonl, subsetXX/test.jsonl, subsetXX/manifest.json
#
# CHANGE: Remove contaminated assemblies listed in Supp2.xlsx where
# "Evidence of assembly contamination with alt gen code" == "yes"
# (based on the "assembly" column).
import argparse, json, re, sys
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedGroupKFold
def extract_acc_base(acc: str) -> str:
m = re.match(r'^(G[CF]A_\d+)', str(acc))
return m.group(1) if m else str(acc).split('.')[0]
def load_bacterial_bases_from_supp1(supp1_csv: str) -> set:
df = pd.read_csv(supp1_csv)
cols = {c.lower().strip(): c for c in df.columns}
dom_col = cols.get('domain of life') or cols.get('domain_of_life') or cols.get('domain') or 'domain of life'
asm_col = cols.get('assembly') or cols.get('assembly accession') or 'assembly'
if dom_col not in df.columns or asm_col not in df.columns:
raise ValueError(f"Supp1.csv must contain columns similar to 'domain of life' and 'assembly'. Found: {list(df.columns)}")
mask = df[dom_col].astype(str).str.strip().str.upper().eq('B')
df_b = df.loc[mask, [asm_col]].dropna()
bases = set(extract_acc_base(a) for a in df_b[asm_col].astype(str))
return bases
def load_alt_bases_from_supp2_legacy(supp2_xlsx: str) -> set:
"""
ORIGINAL behavior (kept): column index-based extraction.
WARNING: This assumes the 4th column (usecols=[3]) contains assembly IDs for the ALT label.
"""
df = pd.read_excel(supp2_xlsx, header=None, usecols=[3])
alt = (
df.iloc[:, 0]
.dropna()
.astype(str)
.unique()
.tolist()
)
return set(extract_acc_base(x) for x in alt)
def load_contam_bases_from_supp2(supp2_xlsx: str) -> set:
"""
NEW: assemblies to REMOVE (contaminated), where:
Evidence of assembly contamination with alt gen code == 'yes'
Uses named columns: 'assembly' + 'Evidence of assembly contamination with alt gen code'
"""
df = pd.read_excel(supp2_xlsx)
cols = {c.lower().strip(): c for c in df.columns}
asm_col = cols.get('assembly')
ev_col = cols.get('evidence of assembly contamination with alt gen code')
if asm_col is None or ev_col is None:
raise ValueError(
"Supp2.xlsx must contain columns 'assembly' and "
"'Evidence of assembly contamination with alt gen code' to filter contaminated rows. "
f"Found columns: {list(df.columns)}"
)
mask = (
df[ev_col]
.astype(str)
.str.strip()
.str.lower()
.eq('yes')
)
contam_bases = (
df.loc[mask, asm_col]
.dropna()
.astype(str)
.apply(extract_acc_base)
.unique()
.tolist()
)
return set(contam_bases)
def read_ndjson_records(path: str):
with open(path, 'r') as fh:
for line in fh:
line = line.strip()
if not line:
continue
try:
yield json.loads(line)
except Exception:
continue
def main():
ap = argparse.ArgumentParser(description="Create grouped stratified 80/20 splits compatible with Mass_models.py")
ap.add_argument('--ndjson', required=True, help='Input NDJSON file (one JSON per line)')
ap.add_argument('--supp1', required=False, help='Optional Supp1.csv filter: keep only Bacteria (domain==B) by assembly')
ap.add_argument('--supp2', required=True, help='Supp2.xlsx (used for legacy alt labels + contamination filter)')
ap.add_argument('--outdir', required=True, help='Output directory root for subsets')
ap.add_argument('--n_splits', type=int, default=1, help='Number of replicate 80/20 splits')
ap.add_argument('--seed', type=int, default=42, help='Random seed')
args = ap.parse_args()
ndjson_path = Path(args.ndjson)
if not ndjson_path.exists():
sys.exit(f"[ERR ] NDJSON not found: {ndjson_path}")
bacteria_bases = None
if args.supp1:
print(f"[FILTER] Loading Supp1 (Bacteria-only by 'domain of life' & 'assembly')…")
bacteria_bases = load_bacterial_bases_from_supp1(args.supp1)
print(f"[FILTER] Allowed assembly bases: {len(bacteria_bases)}")
# Original label behavior preserved
alt_bases = load_alt_bases_from_supp2_legacy(args.supp2)
print(f"[LABEL] Alt bases from Supp2 (legacy col[3]): {len(alt_bases)}")
# NEW: contaminated -> remove
contam_bases = load_contam_bases_from_supp2(args.supp2)
print(f"[FILTER] Contaminated bases from Supp2 where evidence == 'yes': {len(contam_bases)}")
# If something is both "alt" (legacy) and contaminated, it MUST be removed.
overlap = len(alt_bases & contam_bases)
if overlap:
print(f"[WARN ] Overlap alt vs contaminated: {overlap} bases (will be REMOVED from dataset)")
print(f"[LOAD ] Reading NDJSON: {ndjson_path}")
records, groups, y = [], [], []
dropped_contam = 0
dropped_supp1 = 0
for obj in read_ndjson_records(str(ndjson_path)):
acc = obj.get("acc")
if not acc:
continue
base = extract_acc_base(acc)
# Optional bacteria-only filter
if bacteria_bases is not None and base not in bacteria_bases:
dropped_supp1 += 1
continue
# NEW contamination filter (REMOVE)
if base in contam_bases:
dropped_contam += 1
continue
records.append(obj)
groups.append(base)
y.append(1 if base in alt_bases else 0)
if not records:
sys.exit("[ERR ] No records after filtering. Check Supp1 filter / Supp2 contamination filter / NDJSON.")
y = np.array(y, dtype=int)
pos = int(y.sum())
print(
f"[DATA ] kept={len(records)} | positives={pos} ({100.0*pos/len(records):.2f}%) | "
f"groups={len(set(groups))} | dropped_contam={dropped_contam} | dropped_supp1={dropped_supp1}"
)
outroot = Path(args.outdir)
outroot.mkdir(parents=True, exist_ok=True)
sgkf = StratifiedGroupKFold(n_splits=5, shuffle=True, random_state=args.seed)
for k in range(args.n_splits):
subset_dir = outroot / f"subset{k+1:02d}"
subset_dir.mkdir(parents=True, exist_ok=True)
idx = np.arange(len(records))
tr_idx, te_idx = next(sgkf.split(idx, y, groups))
train_records = [records[i] for i in tr_idx]
test_records = [records[i] for i in te_idx]
with open(subset_dir / "train.jsonl", "w") as ftr:
for r in train_records:
ftr.write(json.dumps(r, separators=(',', ':')) + "\n")
with open(subset_dir / "test.jsonl", "w") as fte:
for r in test_records:
fte.write(json.dumps(r, separators=(',', ':')) + "\n")
y_tr = y[tr_idx]; y_te = y[te_idx]
manifest = {
"n_total": int(len(records)),
"n_train": int(len(train_records)),
"n_test": int(len(test_records)),
"positives_total": int(y.sum()),
"positives_train": int(y_tr.sum()),
"positives_test": int(y_te.sum()),
"pct_pos_total": float(100.0 * y.sum() / len(records)),
"pct_pos_train": float(100.0 * y_tr.sum() / len(train_records)),
"pct_pos_test": float(100.0 * y_te.sum() / len(test_records)),
"groups_total": int(len(set(groups))),
"seed": int(args.seed + k),
"source_ndjson": str(ndjson_path.resolve()),
"supp2_contam_removed": int(len(contam_bases)),
}
(subset_dir / "manifest.json").write_text(json.dumps(manifest, indent=2))
print(f"[WRITE] {subset_dir} | train={len(train_records)} test={len(test_records)} | pos_tr={int(y_tr.sum())} pos_te={int(y_te.sum())}")
print("[DONE ] All subsets written.")
if __name__ == "__main__":
main()
|