QPromaQ commited on
Commit
f8a4853
·
verified ·
1 Parent(s): f1e8970

Upload 125 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. 80_20_split_fixed.py +210 -0
  3. FEATURES_ALL.ndjson +3 -0
  4. Mass_models.py +987 -0
  5. Supp1.csv +3 -0
  6. Supp2.xlsx +3 -0
  7. genomes-all_metadata_with_genetic_code_id_noNA.tsv +0 -0
  8. metrics_summary.csv +10 -0
  9. models_logs.txt +0 -0
  10. predict_dir.py +557 -0
  11. predictions_truth/all_models_predictions_long.csv +3 -0
  12. predictions_truth/model_ET_pu_normal_F1_64log_gc_tetra__pred.csv +0 -0
  13. predictions_truth/model_ET_pu_normal_FB_64log_gc_tetra__pred.csv +0 -0
  14. predictions_truth/model_ET_pu_normal_PR_AUC_64log_gc_tetra__pred.csv +0 -0
  15. predictions_truth/model_ET_pu_weighted_F1_64log_gc_tetra__pred.csv +0 -0
  16. predictions_truth/model_ET_pu_weighted_FB_64log_gc_tetra__pred.csv +0 -0
  17. predictions_truth/model_ET_pu_weighted_PR_AUC_64log_gc_tetra__pred.csv +0 -0
  18. predictions_truth/model_ET_supervised_normal_F1_64log_gc_tetra__pred.csv +0 -0
  19. predictions_truth/model_ET_supervised_normal_FB_64log_gc_tetra__pred.csv +0 -0
  20. predictions_truth/model_ET_supervised_normal_PR_AUC_64log_gc_tetra__pred.csv +0 -0
  21. predictions_truth/model_ET_supervised_weighted_F1_64log_gc_tetra__pred.csv +0 -0
  22. predictions_truth/model_ET_supervised_weighted_FB_64log_gc_tetra__pred.csv +0 -0
  23. predictions_truth/model_ET_supervised_weighted_PR_AUC_64log_gc_tetra__pred.csv +0 -0
  24. predictions_truth/model_RF_pu_normal_F1_64log_gc_tetra__pred.csv +0 -0
  25. predictions_truth/model_RF_pu_normal_FB_64log_gc_tetra__pred.csv +0 -0
  26. predictions_truth/model_RF_pu_normal_PR_AUC_64log_gc_tetra__pred.csv +0 -0
  27. predictions_truth/model_RF_pu_weighted_F1_64log_gc_tetra__pred.csv +0 -0
  28. predictions_truth/model_RF_pu_weighted_FB_64log_gc_tetra__pred.csv +0 -0
  29. predictions_truth/model_RF_pu_weighted_PR_AUC_64log_gc_tetra__pred.csv +0 -0
  30. predictions_truth/model_RF_supervised_normal_F1_64log_gc_tetra__pred.csv +0 -0
  31. predictions_truth/model_RF_supervised_normal_FB_64log_gc_tetra__pred.csv +0 -0
  32. predictions_truth/model_RF_supervised_normal_PR_AUC_64log_gc_tetra__pred.csv +0 -0
  33. predictions_truth/model_RF_supervised_weighted_F1_64log_gc_tetra__pred.csv +0 -0
  34. predictions_truth/model_RF_supervised_weighted_FB_64log_gc_tetra__pred.csv +0 -0
  35. predictions_truth/model_RF_supervised_weighted_PR_AUC_64log_gc_tetra__pred.csv +0 -0
  36. predictions_truth/model_XGB_pu_normal_F1_64log_gc_tetra__pred.csv +0 -0
  37. predictions_truth/model_XGB_pu_normal_FB_64log_gc_tetra__pred.csv +0 -0
  38. predictions_truth/model_XGB_pu_normal_PR_AUC_64log_gc_tetra__pred.csv +0 -0
  39. predictions_truth/model_XGB_pu_weighted_F1_64log_gc_tetra__pred.csv +0 -0
  40. predictions_truth/model_XGB_pu_weighted_FB_64log_gc_tetra__pred.csv +0 -0
  41. predictions_truth/model_XGB_pu_weighted_PR_AUC_64log_gc_tetra__pred.csv +0 -0
  42. predictions_truth/model_XGB_supervised_normal_F1_64log_gc_tetra__pred.csv +0 -0
  43. predictions_truth/model_XGB_supervised_normal_FB_64log_gc_tetra__pred.csv +0 -0
  44. predictions_truth/model_XGB_supervised_normal_PR_AUC_64log_gc_tetra__pred.csv +0 -0
  45. predictions_truth/model_XGB_supervised_weighted_F1_64log_gc_tetra__pred.csv +0 -0
  46. predictions_truth/model_XGB_supervised_weighted_FB_64log_gc_tetra__pred.csv +0 -0
  47. predictions_truth/model_XGB_supervised_weighted_PR_AUC_64log_gc_tetra__pred.csv +0 -0
  48. predictions_truth/prediction_summary.csv +37 -0
  49. results_models/best_params_ET_pu_normal_F1_64log_gc_tetra.json +8 -0
  50. results_models/best_params_ET_pu_normal_FB_64log_gc_tetra.json +8 -0
.gitattributes CHANGED
@@ -57,3 +57,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ FEATURES_ALL.ndjson filter=lfs diff=lfs merge=lfs -text
61
+ predictions_truth/all_models_predictions_long.csv filter=lfs diff=lfs merge=lfs -text
62
+ Supp1.csv filter=lfs diff=lfs merge=lfs -text
63
+ Supp2.xlsx filter=lfs diff=lfs merge=lfs -text
80_20_split_fixed.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # 80_20_proportion.py stratified 80/20 split with Bacteria filter & grouped by base accession (GCA/GCF).
4
+ # Input: --ndjson (NDJSON), --supp1 (optional Supp1.csv filter: domain B by assembly), --supp2 (Supp2.xlsx for labels)
5
+ # Output: subsetXX/train.jsonl, subsetXX/test.jsonl, subsetXX/manifest.json
6
+ #
7
+ # CHANGE: Remove contaminated assemblies listed in Supp2.xlsx where
8
+ # "Evidence of assembly contamination with alt gen code" == "yes"
9
+ # (based on the "assembly" column).
10
+
11
+ import argparse, json, re, sys
12
+ from pathlib import Path
13
+ import numpy as np
14
+ import pandas as pd
15
+ from sklearn.model_selection import StratifiedGroupKFold
16
+
17
+ def extract_acc_base(acc: str) -> str:
18
+ m = re.match(r'^(G[CF]A_\d+)', str(acc))
19
+ return m.group(1) if m else str(acc).split('.')[0]
20
+
21
+ def load_bacterial_bases_from_supp1(supp1_csv: str) -> set:
22
+ df = pd.read_csv(supp1_csv)
23
+ cols = {c.lower().strip(): c for c in df.columns}
24
+ dom_col = cols.get('domain of life') or cols.get('domain_of_life') or cols.get('domain') or 'domain of life'
25
+ asm_col = cols.get('assembly') or cols.get('assembly accession') or 'assembly'
26
+ if dom_col not in df.columns or asm_col not in df.columns:
27
+ raise ValueError(f"Supp1.csv must contain columns similar to 'domain of life' and 'assembly'. Found: {list(df.columns)}")
28
+ mask = df[dom_col].astype(str).str.strip().str.upper().eq('B')
29
+ df_b = df.loc[mask, [asm_col]].dropna()
30
+ bases = set(extract_acc_base(a) for a in df_b[asm_col].astype(str))
31
+ return bases
32
+
33
+ def load_alt_bases_from_supp2_legacy(supp2_xlsx: str) -> set:
34
+ """
35
+ ORIGINAL behavior (kept): column index-based extraction.
36
+ WARNING: This assumes the 4th column (usecols=[3]) contains assembly IDs for the ALT label.
37
+ """
38
+ df = pd.read_excel(supp2_xlsx, header=None, usecols=[3])
39
+ alt = (
40
+ df.iloc[:, 0]
41
+ .dropna()
42
+ .astype(str)
43
+ .unique()
44
+ .tolist()
45
+ )
46
+ return set(extract_acc_base(x) for x in alt)
47
+
48
+ def load_contam_bases_from_supp2(supp2_xlsx: str) -> set:
49
+ """
50
+ NEW: assemblies to REMOVE (contaminated), where:
51
+ Evidence of assembly contamination with alt gen code == 'yes'
52
+ Uses named columns: 'assembly' + 'Evidence of assembly contamination with alt gen code'
53
+ """
54
+ df = pd.read_excel(supp2_xlsx)
55
+
56
+ cols = {c.lower().strip(): c for c in df.columns}
57
+ asm_col = cols.get('assembly')
58
+ ev_col = cols.get('evidence of assembly contamination with alt gen code')
59
+
60
+ if asm_col is None or ev_col is None:
61
+ raise ValueError(
62
+ "Supp2.xlsx must contain columns 'assembly' and "
63
+ "'Evidence of assembly contamination with alt gen code' to filter contaminated rows. "
64
+ f"Found columns: {list(df.columns)}"
65
+ )
66
+
67
+ mask = (
68
+ df[ev_col]
69
+ .astype(str)
70
+ .str.strip()
71
+ .str.lower()
72
+ .eq('yes')
73
+ )
74
+
75
+ contam_bases = (
76
+ df.loc[mask, asm_col]
77
+ .dropna()
78
+ .astype(str)
79
+ .apply(extract_acc_base)
80
+ .unique()
81
+ .tolist()
82
+ )
83
+
84
+ return set(contam_bases)
85
+
86
+ def read_ndjson_records(path: str):
87
+ with open(path, 'r') as fh:
88
+ for line in fh:
89
+ line = line.strip()
90
+ if not line:
91
+ continue
92
+ try:
93
+ yield json.loads(line)
94
+ except Exception:
95
+ continue
96
+
97
+ def main():
98
+ ap = argparse.ArgumentParser(description="Create grouped stratified 80/20 splits compatible with Mass_models.py")
99
+ ap.add_argument('--ndjson', required=True, help='Input NDJSON file (one JSON per line)')
100
+ ap.add_argument('--supp1', required=False, help='Optional Supp1.csv filter: keep only Bacteria (domain==B) by assembly')
101
+ ap.add_argument('--supp2', required=True, help='Supp2.xlsx (used for legacy alt labels + contamination filter)')
102
+ ap.add_argument('--outdir', required=True, help='Output directory root for subsets')
103
+ ap.add_argument('--n_splits', type=int, default=1, help='Number of replicate 80/20 splits')
104
+ ap.add_argument('--seed', type=int, default=42, help='Random seed')
105
+ args = ap.parse_args()
106
+
107
+ ndjson_path = Path(args.ndjson)
108
+ if not ndjson_path.exists():
109
+ sys.exit(f"[ERR ] NDJSON not found: {ndjson_path}")
110
+
111
+ bacteria_bases = None
112
+ if args.supp1:
113
+ print(f"[FILTER] Loading Supp1 (Bacteria-only by 'domain of life' & 'assembly')…")
114
+ bacteria_bases = load_bacterial_bases_from_supp1(args.supp1)
115
+ print(f"[FILTER] Allowed assembly bases: {len(bacteria_bases)}")
116
+
117
+ # Original label behavior preserved
118
+ alt_bases = load_alt_bases_from_supp2_legacy(args.supp2)
119
+ print(f"[LABEL] Alt bases from Supp2 (legacy col[3]): {len(alt_bases)}")
120
+
121
+ # NEW: contaminated -> remove
122
+ contam_bases = load_contam_bases_from_supp2(args.supp2)
123
+ print(f"[FILTER] Contaminated bases from Supp2 where evidence == 'yes': {len(contam_bases)}")
124
+
125
+ # If something is both "alt" (legacy) and contaminated, it MUST be removed.
126
+ overlap = len(alt_bases & contam_bases)
127
+ if overlap:
128
+ print(f"[WARN ] Overlap alt vs contaminated: {overlap} bases (will be REMOVED from dataset)")
129
+
130
+ print(f"[LOAD ] Reading NDJSON: {ndjson_path}")
131
+ records, groups, y = [], [], []
132
+ dropped_contam = 0
133
+ dropped_supp1 = 0
134
+
135
+ for obj in read_ndjson_records(str(ndjson_path)):
136
+ acc = obj.get("acc")
137
+ if not acc:
138
+ continue
139
+ base = extract_acc_base(acc)
140
+
141
+ # Optional bacteria-only filter
142
+ if bacteria_bases is not None and base not in bacteria_bases:
143
+ dropped_supp1 += 1
144
+ continue
145
+
146
+ # NEW contamination filter (REMOVE)
147
+ if base in contam_bases:
148
+ dropped_contam += 1
149
+ continue
150
+
151
+ records.append(obj)
152
+ groups.append(base)
153
+ y.append(1 if base in alt_bases else 0)
154
+
155
+ if not records:
156
+ sys.exit("[ERR ] No records after filtering. Check Supp1 filter / Supp2 contamination filter / NDJSON.")
157
+
158
+ y = np.array(y, dtype=int)
159
+ pos = int(y.sum())
160
+ print(
161
+ f"[DATA ] kept={len(records)} | positives={pos} ({100.0*pos/len(records):.2f}%) | "
162
+ f"groups={len(set(groups))} | dropped_contam={dropped_contam} | dropped_supp1={dropped_supp1}"
163
+ )
164
+
165
+ outroot = Path(args.outdir)
166
+ outroot.mkdir(parents=True, exist_ok=True)
167
+
168
+ sgkf = StratifiedGroupKFold(n_splits=5, shuffle=True, random_state=args.seed)
169
+
170
+ for k in range(args.n_splits):
171
+ subset_dir = outroot / f"subset{k+1:02d}"
172
+ subset_dir.mkdir(parents=True, exist_ok=True)
173
+
174
+ idx = np.arange(len(records))
175
+ tr_idx, te_idx = next(sgkf.split(idx, y, groups))
176
+ train_records = [records[i] for i in tr_idx]
177
+ test_records = [records[i] for i in te_idx]
178
+
179
+ with open(subset_dir / "train.jsonl", "w") as ftr:
180
+ for r in train_records:
181
+ ftr.write(json.dumps(r, separators=(',', ':')) + "\n")
182
+ with open(subset_dir / "test.jsonl", "w") as fte:
183
+ for r in test_records:
184
+ fte.write(json.dumps(r, separators=(',', ':')) + "\n")
185
+
186
+ y_tr = y[tr_idx]; y_te = y[te_idx]
187
+ manifest = {
188
+ "n_total": int(len(records)),
189
+ "n_train": int(len(train_records)),
190
+ "n_test": int(len(test_records)),
191
+ "positives_total": int(y.sum()),
192
+ "positives_train": int(y_tr.sum()),
193
+ "positives_test": int(y_te.sum()),
194
+ "pct_pos_total": float(100.0 * y.sum() / len(records)),
195
+ "pct_pos_train": float(100.0 * y_tr.sum() / len(train_records)),
196
+ "pct_pos_test": float(100.0 * y_te.sum() / len(test_records)),
197
+ "groups_total": int(len(set(groups))),
198
+ "seed": int(args.seed + k),
199
+ "source_ndjson": str(ndjson_path.resolve()),
200
+ "supp2_contam_removed": int(len(contam_bases)),
201
+ }
202
+ (subset_dir / "manifest.json").write_text(json.dumps(manifest, indent=2))
203
+
204
+ print(f"[WRITE] {subset_dir} | train={len(train_records)} test={len(test_records)} | pos_tr={int(y_tr.sum())} pos_te={int(y_te.sum())}")
205
+
206
+ print("[DONE ] All subsets written.")
207
+
208
+ if __name__ == "__main__":
209
+ main()
210
+
FEATURES_ALL.ndjson ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2316d3eab9d4900a9902eafb7c6112e3bb412c32d7a80816b0ce7a8e4634313a
3
+ size 1751164568
Mass_models.py ADDED
@@ -0,0 +1,987 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # Mass_models.py Grouped-CV training with calibration & thresholding on tRNA profiles.
4
+ # Supports:
5
+ # - train_mode: supervised / pu / both
6
+ # - weight_mode: normal / weighted / both
7
+ # - models: RF / XGB / ET (ExtraTrees)
8
+ # - metrics: F1 / PR_AUC / FB
9
+ #
10
+ # Input: ndjson train/test with 64-log + GC + tetra_norm (full row from ndjson).
11
+ # Labels: positives are assemblies present in Supp2 (NOT treated as certain ALT, only candidates).
12
+ # Weighted mode: assigns confidence weights ONLY to Supp2 genomes using Supp1 expected vs inferred,
13
+ # with gentle GC gating.
14
+ #
15
+ # NEW FIXES:
16
+ # - Proper sample_weight routing into sklearn Pipeline: clf__sample_weight
17
+ # - PU classifier marked as classifier for sklearn calibration (_estimator_type + classifier tags)
18
+ # - Optional calibration auto-disabled for PU by default (still possible with --force_calibration_pu)
19
+ # - Skip already-trained model variants based on existing artifacts in results_models/
20
+ # - Crash-safe: each run saved immediately, and metrics_summary.csv updated after each run
21
+ #
22
+ # NOTE:
23
+ # - For PU, calibration can be extremely expensive (cv * n_bags refits). Default: off for PU unless forced.
24
+
25
+ import argparse, json, os, re, time, glob
26
+ from pathlib import Path
27
+ from collections import Counter
28
+
29
+ import numpy as np
30
+ import pandas as pd
31
+
32
+ from sklearn.model_selection import StratifiedGroupKFold
33
+ from sklearn.metrics import (
34
+ f1_score, confusion_matrix, accuracy_score, precision_score, recall_score,
35
+ fbeta_score, roc_auc_score, average_precision_score, precision_recall_curve
36
+ )
37
+ from sklearn.preprocessing import StandardScaler
38
+ from sklearn.pipeline import Pipeline
39
+ from sklearn.compose import ColumnTransformer
40
+ from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
41
+ from sklearn.impute import SimpleImputer
42
+ from sklearn.calibration import CalibratedClassifierCV
43
+ from sklearn.base import BaseEstimator, ClassifierMixin, clone
44
+
45
+ import joblib
46
+
47
+ try:
48
+ from xgboost import XGBClassifier
49
+ _HAS_XGB = True
50
+ except Exception:
51
+ _HAS_XGB = False
52
+
53
+ import optuna
54
+ from optuna.pruners import MedianPruner
55
+
56
+ ANTICODONS64 = [a+b+c for a in "ACGT" for b in "ACGT" for c in "ACGT"]
57
+ TETRA_KEYS = [a+b+c+d for a in "ACGT" for b in "ACGT" for c in "ACGT" for d in "ACGT"]
58
+
59
+ AA_SET = set(list("ACDEFGHIKLMNPQRSTVWY"))
60
+ STAR = "*"
61
+ QMARK = "?"
62
+
63
+ # =========================
64
+ # Helpers: crash-safe fit with sample_weight
65
+ # =========================
66
+ def fit_pipeline(model, X, y, sample_weight=None):
67
+ """
68
+ Fit helper that correctly routes sample_weight into Pipeline
69
+ (to the 'clf' step) and also supports plain estimators.
70
+ """
71
+ if sample_weight is None:
72
+ return model.fit(X, y)
73
+
74
+ sw = np.asarray(sample_weight)
75
+
76
+ # sklearn Pipeline cannot accept sample_weight directly
77
+ if isinstance(model, Pipeline):
78
+ return model.fit(X, y, clf__sample_weight=sw)
79
+
80
+ # many estimators accept sample_weight directly
81
+ try:
82
+ return model.fit(X, y, sample_weight=sw)
83
+ except TypeError:
84
+ return model.fit(X, y)
85
+
86
+ # =========================
87
+ # PU Bagging Meta-Estimator
88
+ # =========================
89
+ class PUBaggingClassifier(BaseEstimator, ClassifierMixin):
90
+ """
91
+ PU-learning via bagging:
92
+ - y==1: positives (P) [assemblies from Supp2]
93
+ - y==0: unlabeled (U) [everything else]
94
+ For each bag:
95
+ - train on all P + sampled subset of U as pseudo-negative
96
+ predict_proba: average over bags.
97
+
98
+ sklearn calibration fix:
99
+ - _estimator_type='classifier'
100
+ - get tags / is_classifier compatibility
101
+ """
102
+ _estimator_type = "classifier"
103
+
104
+ def __init__(self, base_estimator, n_bags=15, u_ratio=3.0, random_state=42):
105
+ self.base_estimator = base_estimator
106
+ self.n_bags = int(n_bags)
107
+ self.u_ratio = float(u_ratio)
108
+ self.random_state = int(random_state)
109
+ self.models_ = None
110
+ self.classes_ = np.array([0, 1], dtype=int)
111
+
112
+ def _more_tags(self):
113
+ # Helps sklearn treat this as classifier with predict_proba.
114
+ return {"requires_y": True, "binary_only": True}
115
+
116
+ def fit(self, X, y, sample_weight=None):
117
+ y = np.asarray(y).astype(int)
118
+ self.classes_ = np.array([0, 1], dtype=int)
119
+
120
+ pos_idx = np.where(y == 1)[0]
121
+ unl_idx = np.where(y == 0)[0]
122
+
123
+ if pos_idx.size == 0:
124
+ raise ValueError("PU training requires at least one positive sample (y==1).")
125
+
126
+ rng = np.random.RandomState(self.random_state)
127
+ self.models_ = []
128
+
129
+ # if no unlabeled, just fit once
130
+ if unl_idx.size == 0:
131
+ m = clone(self.base_estimator)
132
+ fit_pipeline(m, X, y, sample_weight)
133
+ self.models_.append(m)
134
+ return self
135
+
136
+ k_u = int(min(unl_idx.size, max(1, round(self.u_ratio * pos_idx.size))))
137
+
138
+ for _ in range(self.n_bags):
139
+ if k_u <= unl_idx.size:
140
+ u_b = rng.choice(unl_idx, size=k_u, replace=False)
141
+ else:
142
+ u_b = rng.choice(unl_idx, size=k_u, replace=True)
143
+
144
+ idx_b = np.concatenate([pos_idx, u_b])
145
+
146
+ X_b = X.iloc[idx_b] if hasattr(X, "iloc") else X[idx_b]
147
+ y_b = y[idx_b]
148
+
149
+ sw_b = None
150
+ if sample_weight is not None:
151
+ sw_b = np.asarray(sample_weight)[idx_b]
152
+
153
+ m = clone(self.base_estimator)
154
+ fit_pipeline(m, X_b, y_b, sw_b)
155
+ self.models_.append(m)
156
+
157
+ return self
158
+
159
+ def predict_proba(self, X):
160
+ if not self.models_:
161
+ raise RuntimeError("PUBaggingClassifier not fitted")
162
+ probs = [m.predict_proba(X) for m in self.models_]
163
+ return np.mean(np.stack(probs, axis=0), axis=0)
164
+
165
+ def predict(self, X):
166
+ return (self.predict_proba(X)[:, 1] >= 0.5).astype(int)
167
+
168
+ # =========================
169
+ # Helpers: IDs, NDJSON features
170
+ # =========================
171
+ def extract_acc_base(acc: str) -> str:
172
+ m = re.match(r'^(G[CF]A_\d+)', str(acc))
173
+ return m.group(1) if m else str(acc).split('.')[0]
174
+
175
+ def load_alt_list_from_supp2(supp2_xlsx: str) -> set:
176
+ df = pd.read_excel(supp2_xlsx, header=None, usecols=[3])
177
+ alt = df.iloc[:, 0].dropna().astype(str).unique().tolist()
178
+ return set(extract_acc_base(x) for x in alt)
179
+
180
+ def label_from_supp2(meta: pd.DataFrame, supp2_xlsx: str) -> pd.Series:
181
+ alt = load_alt_list_from_supp2(supp2_xlsx)
182
+ return meta["acc_base"].map(lambda a: 1 if a in alt else 0).astype(int)
183
+
184
+ def parse_anticodon_from_label(label_tail: str) -> str:
185
+ if "_" in label_tail:
186
+ return label_tail.split("_", 1)[1].upper().replace("U","T")
187
+ return label_tail.upper().replace("U","T")
188
+
189
+ def build_vec64_from_record(rec: dict) -> np.ndarray:
190
+ counter = Counter()
191
+ for t in rec.get("trna_type_per_acc", []):
192
+ c = int(t.get("count", 0) or 0)
193
+ tail = str(t.get("label", "")).split("_genome_")[-1]
194
+ ac = parse_anticodon_from_label(tail)
195
+ if len(ac) == 3 and set(ac) <= {"A","C","G","T"}:
196
+ counter[ac] += c
197
+ return np.array([counter.get(k, 0) for k in ANTICODONS64], dtype=float)
198
+
199
+ def build_feature_matrices(ndjson_path: str, include_gc_len_tetra: bool = True):
200
+ rows = []
201
+ with open(ndjson_path, "r") as fh:
202
+ for line in fh:
203
+ line = line.strip()
204
+ if not line:
205
+ continue
206
+ try:
207
+ rows.append(json.loads(line))
208
+ except Exception:
209
+ continue
210
+
211
+ feat_rows, meta_rows = [], []
212
+ for rec in rows:
213
+ acc = rec.get("acc")
214
+ if not acc:
215
+ continue
216
+ acc_base = extract_acc_base(acc)
217
+ gc = rec.get("gc", {})
218
+ tetra = rec.get("tetra_norm", {})
219
+
220
+ v64 = build_vec64_from_record(rec)
221
+ vals = np.log1p(v64)
222
+ cols = [f"ac_{k}" for k in ANTICODONS64]
223
+ trna_dict = {c: float(v) for c, v in zip(cols, vals)}
224
+
225
+ extra = {}
226
+ if include_gc_len_tetra:
227
+ extra["gc_percent"] = float(gc.get("percent", np.nan))
228
+ extra["genome_length"] = int(gc.get("length", 0) or 0)
229
+ for k in TETRA_KEYS:
230
+ extra[f"tetra_{k}"] = float(tetra.get(k, np.nan))
231
+
232
+ feat_rows.append({**trna_dict, **extra})
233
+ meta_rows.append({"acc": acc, "acc_base": acc_base})
234
+
235
+ X = pd.DataFrame(feat_rows)
236
+ meta = pd.DataFrame(meta_rows)
237
+ return X, meta
238
+
239
+ def _count_lines(path: Path) -> int:
240
+ try:
241
+ return sum(1 for _ in open(path, 'r'))
242
+ except Exception:
243
+ return -1
244
+
245
+ def _load_train_test(path_str: str, supp2_path: str):
246
+ p = Path(path_str)
247
+ if not p.is_dir():
248
+ raise FileNotFoundError(f"--ndjson must be a directory containing train.jsonl and test.jsonl: {p}")
249
+ tr = p / "train.jsonl"
250
+ te = p / "test.jsonl"
251
+ if not tr.exists() or not te.exists():
252
+ raise FileNotFoundError(f"{p} must contain train.jsonl and test.jsonl")
253
+
254
+ print(f"[LOAD] train: {tr} (lines≈{_count_lines(tr)})")
255
+ Xtr, mtr = build_feature_matrices(str(tr), True)
256
+ print(f"[LOAD] test: {te} (lines≈{_count_lines(te)})")
257
+ Xte, mte = build_feature_matrices(str(te), True)
258
+
259
+ ytr = label_from_supp2(mtr, supp2_path) # P=1 (Supp2), U=0 otherwise
260
+ yte = label_from_supp2(mte, supp2_path)
261
+
262
+ gtr = mtr["acc_base"].tolist()
263
+ gte = mte["acc_base"].tolist()
264
+ return Xtr, ytr, gtr, mtr, Xte, yte, gte, mte
265
+
266
+ def make_preprocess_pipeline(X: pd.DataFrame) -> ColumnTransformer:
267
+ num_cols = list(X.columns)
268
+ pre_num = Pipeline([
269
+ ("imputer", SimpleImputer(strategy="median")),
270
+ ("scaler", StandardScaler(with_mean=True, with_std=True))
271
+ ])
272
+ return ColumnTransformer(
273
+ [("num", pre_num, num_cols)],
274
+ remainder="drop",
275
+ verbose_feature_names_out=False
276
+ )
277
+
278
+ # =========================
279
+ # Supp1 parsing -> weights for Supp2 only
280
+ # =========================
281
+ def _norm_code_str(x) -> str:
282
+ if pd.isna(x):
283
+ return ""
284
+ return str(x).replace(",", "").replace(" ", "").strip().upper()
285
+
286
+ def _analyze_expected_inferred(expected: str, inferred: str):
287
+ """
288
+ Return counts:
289
+ aa_aa: AA<->AA different
290
+ aa_q : AA<->?
291
+ stop_aa: *<->AA
292
+ stop_q : *<->?
293
+ total_q: positions with '?' in either string
294
+ valid: both length 64
295
+ """
296
+ e = _norm_code_str(expected)
297
+ i = _norm_code_str(inferred)
298
+ if len(e) != 64 or len(i) != 64:
299
+ return dict(valid=False, aa_aa=0, aa_q=0, stop_aa=0, stop_q=0, total_q=0)
300
+
301
+ aa_aa = aa_q = stop_aa = stop_q = total_q = 0
302
+
303
+ for a, b in zip(e, i):
304
+ if a == QMARK or b == QMARK:
305
+ total_q += 1
306
+
307
+ if a == b:
308
+ continue
309
+
310
+ a_is_aa = a in AA_SET
311
+ b_is_aa = b in AA_SET
312
+ a_is_q = a == QMARK
313
+ b_is_q = b == QMARK
314
+ a_is_s = a == STAR
315
+ b_is_s = b == STAR
316
+
317
+ if a_is_aa and b_is_aa:
318
+ aa_aa += 1
319
+ elif (a_is_aa and b_is_q) or (a_is_q and b_is_aa):
320
+ aa_q += 1
321
+ elif (a_is_s and b_is_aa) or (a_is_aa and b_is_s):
322
+ stop_aa += 1
323
+ elif (a_is_s and b_is_q) or (a_is_q and b_is_s):
324
+ stop_q += 1
325
+ else:
326
+ pass
327
+
328
+ return dict(valid=True, aa_aa=aa_aa, aa_q=aa_q, stop_aa=stop_aa, stop_q=stop_q, total_q=total_q)
329
+
330
+ def load_supp1_code_map(supp1_csv: str):
331
+ """
332
+ Returns dict base_assembly -> (expected_str, inferred_str)
333
+ Uses columns:
334
+ - assembly
335
+ - expected genetic code
336
+ - Codetta inferred genetic code
337
+ """
338
+ df = pd.read_csv(supp1_csv)
339
+ cols = {c.lower(): c for c in df.columns}
340
+
341
+ def pick(key_sub):
342
+ for k in cols:
343
+ if k == key_sub:
344
+ return cols[k]
345
+ for k in cols:
346
+ if key_sub in k:
347
+ return cols[k]
348
+ return None
349
+
350
+ asm_col = pick("assembly")
351
+ exp_col = pick("expected genetic code")
352
+ inf_col = pick("codetta inferred genetic code")
353
+
354
+ if not (asm_col and exp_col and inf_col):
355
+ raise ValueError(f"[ERROR] Supp1.csv missing required columns. Found: {list(df.columns)}")
356
+
357
+ out = {}
358
+ for _, row in df.iterrows():
359
+ asm = str(row[asm_col])
360
+ base = extract_acc_base(asm)
361
+ out[base] = (row[exp_col], row[inf_col])
362
+ return out
363
+
364
+ def gentle_gc_penalty(gc_val, gc_median, gc_iqr):
365
+ """
366
+ Very gentle penalty: near 1.0 for most of the range.
367
+ Uses robust z = |gc - median| / IQR.
368
+ Returns in [0.90, 1.00] (delicate gating).
369
+ """
370
+ if not np.isfinite(gc_val) or gc_iqr <= 1e-9 or not np.isfinite(gc_median):
371
+ return 1.0
372
+ z = abs(float(gc_val) - float(gc_median)) / float(gc_iqr)
373
+ pen = float(np.exp(-0.08 * z))
374
+ return float(min(1.0, max(0.90, pen)))
375
+
376
+ def weight_for_supp2_genome(base, gc_percent, supp1_map, gc_median, gc_iqr):
377
+ """
378
+ Weight only for genomes in Supp2:
379
+ - STOP<->AA => very confident ALT signal (1.0)
380
+ - AA<->AA => confident, but apply gentle GC penalty (close to 1.0)
381
+ - STOP<->? treated like AA<->AA (your requirement)
382
+ - AA<->? => less confident (downweights with total_q)
383
+ If Supp1 missing or invalid: return 0.85 (still candidate but less trusted)
384
+ """
385
+ if base not in supp1_map:
386
+ return 0.85
387
+
388
+ exp, inf = supp1_map[base]
389
+ a = _analyze_expected_inferred(exp, inf)
390
+ if not a["valid"]:
391
+ return 0.85
392
+
393
+ aa_aa = a["aa_aa"]
394
+ aa_q = a["aa_q"]
395
+ stop_aa = a["stop_aa"]
396
+ stop_q = a["stop_q"]
397
+ total_q = a["total_q"]
398
+
399
+ gc_pen = gentle_gc_penalty(gc_percent, gc_median, gc_iqr)
400
+
401
+ if stop_aa > 0:
402
+ w = 1.00 * gc_pen
403
+ return float(max(0.90, min(1.00, w)))
404
+
405
+ # treat STOP<->? like AA<->AA
406
+ aa_like = aa_aa + stop_q
407
+
408
+ if aa_like > 0 and aa_q == 0:
409
+ w = 0.98 * gc_pen
410
+ return float(max(0.90, min(1.00, w)))
411
+
412
+ if aa_like > 0 and aa_q > 0:
413
+ w = 0.95 * (0.97 ** max(total_q, 0)) * gc_pen
414
+ return float(max(0.75, min(0.98, w)))
415
+
416
+ if aa_like == 0 and aa_q > 0:
417
+ w = 0.70 * (0.95 ** max(total_q, 0)) * gc_pen
418
+ return float(max(0.35, min(0.75, w)))
419
+
420
+ w = 0.80 * gc_pen
421
+ return float(max(0.50, min(0.90, w)))
422
+
423
+ def build_sample_weights_for_dataset(meta_df, X_df, y_series, supp2_set, supp1_map):
424
+ """
425
+ Returns np.array weights (len == n_samples).
426
+ Only genomes in Supp2 get graded weights; others -> 1.0.
427
+ """
428
+ gc_vals = X_df["gc_percent"].astype(float).values if "gc_percent" in X_df.columns else np.full(len(X_df), np.nan)
429
+
430
+ gc_clean = gc_vals[np.isfinite(gc_vals)]
431
+ if gc_clean.size >= 10:
432
+ gc_median = float(np.median(gc_clean))
433
+ q1 = float(np.quantile(gc_clean, 0.25))
434
+ q3 = float(np.quantile(gc_clean, 0.75))
435
+ gc_iqr = float(max(1e-6, q3 - q1))
436
+ else:
437
+ gc_median, gc_iqr = float("nan"), 1.0
438
+
439
+ weights = np.ones(len(X_df), dtype=float)
440
+ bases = meta_df["acc_base"].astype(str).tolist()
441
+
442
+ for i, base in enumerate(bases):
443
+ if base in supp2_set:
444
+ gc = gc_vals[i]
445
+ weights[i] = weight_for_supp2_genome(base, gc, supp1_map, gc_median, gc_iqr)
446
+ else:
447
+ weights[i] = 1.0
448
+ return weights
449
+
450
+ # =========================
451
+ # Metrics utilities
452
+ # =========================
453
+ def metrics_from_pred(y_true, y_pred, y_proba=None):
454
+ y_true = np.asarray(y_true); y_pred = np.asarray(y_pred)
455
+ cm = confusion_matrix(y_true, y_pred, labels=[0,1])
456
+ tn, fp, fn, tp = int(cm[0,0]), int(cm[0,1]), int(cm[1,0]), int(cm[1,1])
457
+ row = dict(tn=tn, fp=fp, fn=fn, tp=tp)
458
+ row["n"] = int(len(y_true)); row["positives"] = int(y_true.sum())
459
+ row["accuracy"] = float(accuracy_score(y_true, y_pred))
460
+ row["precision"] = float(precision_score(y_true, y_pred, pos_label=1, zero_division=0))
461
+ row["recall"] = float(recall_score(y_true, y_pred, pos_label=1, zero_division=0))
462
+ row["specificity"] = float(tn / (tn + fp)) if (tn + fp) > 0 else 0.0
463
+ row["f1"] = float(f1_score(y_true, y_pred, pos_label=1, zero_division=0))
464
+ if y_proba is not None and not np.isnan(y_proba).all():
465
+ try: row["roc_auc"] = float(roc_auc_score(y_true, y_proba))
466
+ except Exception: row["roc_auc"] = float("nan")
467
+ try: row["pr_auc"] = float(average_precision_score(y_true, y_proba))
468
+ except Exception: row["pr_auc"] = float("nan")
469
+ else:
470
+ row["roc_auc"] = float("nan"); row["pr_auc"] = float("nan")
471
+ return row
472
+
473
+ def best_threshold_f1(y_true, y_score):
474
+ p, r, t = precision_recall_curve(y_true, y_score)
475
+ f1 = (2*p*r) / np.maximum(p+r, 1e-12)
476
+ idx = int(np.nanargmax(f1))
477
+ return float(t[max(0, idx-1)]) if idx == len(t) else float(t[idx])
478
+
479
+ def best_threshold_fbeta(y_true, y_score, beta=2.0):
480
+ p, r, t = precision_recall_curve(y_true, y_score)
481
+ if t.size == 0:
482
+ return float(np.median(y_score))
483
+ p = p[1:].astype(float)
484
+ r = r[1:].astype(float)
485
+ t = t.astype(float)
486
+ valid = np.isfinite(p) & np.isfinite(r) & np.isfinite(t)
487
+ p, r, t = p[valid], r[valid], t[valid]
488
+ if t.size == 0:
489
+ return float(np.median(y_score))
490
+ fbeta_vals = (1.0 + beta**2) * p * r / np.maximum(beta**2 * p + r, 1e-12)
491
+ j = int(np.nanargmax(fbeta_vals))
492
+ return float(t[j])
493
+
494
+ # =========================
495
+ # Optuna objective
496
+ # =========================
497
+ def make_objective(model_type: str,
498
+ train_mode: str,
499
+ X: pd.DataFrame, y: pd.Series, groups,
500
+ metric_obj: str,
501
+ n_splits: int, seed: int, threads: int,
502
+ pu_bags: int, pu_u_ratio: float,
503
+ fb_beta: float,
504
+ sample_weight: np.ndarray | None):
505
+ sgkf = StratifiedGroupKFold(n_splits=n_splits, shuffle=True, random_state=seed)
506
+ pre = make_preprocess_pipeline(X)
507
+
508
+ def build_base_estimator(trial):
509
+ if model_type == "RF":
510
+ params = dict(
511
+ n_estimators = trial.suggest_int("n_estimators", 400, 1400),
512
+ max_depth = trial.suggest_int("max_depth", 6, 16),
513
+ min_samples_split = trial.suggest_int("min_samples_split", 2, 20),
514
+ min_samples_leaf = trial.suggest_int("min_samples_leaf", 2, 12),
515
+ max_features = trial.suggest_categorical("max_features", ["sqrt"]),
516
+ class_weight = trial.suggest_categorical("class_weight", [None, "balanced", "balanced_subsample"]),
517
+ n_jobs = threads,
518
+ random_state = seed,
519
+ )
520
+ return RandomForestClassifier(**params)
521
+
522
+ if model_type == "ET":
523
+ params = dict(
524
+ n_estimators = trial.suggest_int("n_estimators", 600, 2000),
525
+ max_depth = trial.suggest_int("max_depth", 6, 18),
526
+ min_samples_split = trial.suggest_int("min_samples_split", 2, 20),
527
+ min_samples_leaf = trial.suggest_int("min_samples_leaf", 2, 12),
528
+ max_features = trial.suggest_categorical("max_features", ["sqrt"]),
529
+ class_weight = trial.suggest_categorical("class_weight", [None, "balanced"]),
530
+ n_jobs = threads,
531
+ random_state = seed,
532
+ )
533
+ return ExtraTreesClassifier(**params)
534
+
535
+ if not _HAS_XGB:
536
+ raise RuntimeError("xgboost not installed")
537
+ params = dict(
538
+ n_estimators = trial.suggest_int("n_estimators", 400, 2000),
539
+ max_depth = trial.suggest_int("max_depth", 3, 9),
540
+ learning_rate= trial.suggest_float("learning_rate", 1e-3, 0.15, log=True),
541
+ subsample = trial.suggest_float("subsample", 0.6, 1.0),
542
+ colsample_bytree = trial.suggest_float("colsample_bytree", 0.5, 1.0),
543
+ reg_alpha = trial.suggest_float("reg_alpha", 1e-4, 10.0, log=True),
544
+ reg_lambda = trial.suggest_float("reg_lambda", 1e-3, 10.0, log=True),
545
+ gamma = trial.suggest_float("gamma", 0.0, 5.0),
546
+ min_child_weight = trial.suggest_float("min_child_weight", 1e-3, 10.0, log=True),
547
+ n_jobs = threads,
548
+ random_state = seed,
549
+ tree_method = trial.suggest_categorical("tree_method", ["hist", "approx"]),
550
+ objective = "binary:logistic",
551
+ eval_metric = "logloss",
552
+ )
553
+ return XGBClassifier(**params)
554
+
555
+ def objective(trial):
556
+ clf = build_base_estimator(trial)
557
+ base_pipe = Pipeline([("pre", pre), ("clf", clf)])
558
+
559
+ if train_mode == "pu":
560
+ model = PUBaggingClassifier(
561
+ base_estimator=base_pipe,
562
+ n_bags=pu_bags,
563
+ u_ratio=pu_u_ratio,
564
+ random_state=seed
565
+ )
566
+ else:
567
+ model = base_pipe
568
+
569
+ y_true_all = []
570
+ proba_all = []
571
+ f_scores = []
572
+
573
+ for tr_idx, va_idx in sgkf.split(X, y, groups):
574
+ Xtr, Xva = X.iloc[tr_idx], X.iloc[va_idx]
575
+ ytr, yva = y.iloc[tr_idx], y.iloc[va_idx]
576
+
577
+ sw_tr = None
578
+ if sample_weight is not None:
579
+ sw_tr = np.asarray(sample_weight)[tr_idx]
580
+
581
+ model_fold = clone(model)
582
+ fit_pipeline(model_fold, Xtr, ytr, sw_tr)
583
+
584
+ proba = model_fold.predict_proba(Xva)[:, 1]
585
+ y_true_all.append(yva.values)
586
+ proba_all.append(proba)
587
+
588
+ if metric_obj == "F1":
589
+ thr = best_threshold_f1(yva.values, proba)
590
+ yhat = (proba >= thr).astype(int)
591
+ f_scores.append(f1_score(yva.values, yhat, zero_division=0))
592
+
593
+ y_true_all = np.concatenate(y_true_all)
594
+ proba_all = np.concatenate(proba_all)
595
+
596
+ if metric_obj == "F1":
597
+ return float(np.mean(f_scores))
598
+ if metric_obj == "PR_AUC":
599
+ return float(average_precision_score(y_true_all, proba_all))
600
+ if metric_obj == "FB":
601
+ thr = best_threshold_fbeta(y_true_all, proba_all, beta=fb_beta)
602
+ yhat = (proba_all >= thr).astype(int)
603
+ return float(fbeta_score(y_true_all, yhat, beta=fb_beta, zero_division=0))
604
+
605
+ raise ValueError("metric_obj must be one of: F1, PR_AUC, FB")
606
+
607
+ return objective, pre
608
+
609
+ # =========================
610
+ # Fit best model + calibration + threshold + test metrics
611
+ # =========================
612
+ def fit_best_model(model_type: str,
613
+ train_mode: str,
614
+ weight_mode: str,
615
+ metric_obj: str,
616
+ X: pd.DataFrame, y: pd.Series, groups,
617
+ seed: int, timeout: int, n_trials: int, outdir: Path, tag: str,
618
+ X_test: pd.DataFrame = None, y_test: pd.Series = None,
619
+ threads: int = 1,
620
+ pu_bags: int = 15, pu_u_ratio: float = 3.0,
621
+ fb_beta: float = 2.0,
622
+ calibrate: bool = True,
623
+ sample_weight: np.ndarray | None = None,
624
+ cv_folds: int = 5):
625
+
626
+ t0 = time.time()
627
+
628
+ def _make_eta_cb(t0, timeout, n_trials):
629
+ def _cb(study, trial):
630
+ try:
631
+ elapsed = time.time() - t0
632
+ done = trial.number + 1
633
+ avg = elapsed / max(1, done)
634
+ rem_trials = max(0, (n_trials or 0) - done)
635
+ eta_trials = rem_trials * avg if n_trials else float('inf')
636
+ eta_timeout = max(0.0, (timeout or 0) - elapsed) if timeout else float('inf')
637
+ eta = min(eta_trials, eta_timeout)
638
+ print(f"[TRIAL] #{trial.number:03d} value={trial.value:.5f} | best={study.best_value:.5f} | elapsed={elapsed/60:.1f}m | ETA~{eta/60:.1f}m")
639
+ except Exception:
640
+ print(f"[TRIAL] #{trial.number:03d} done")
641
+ return _cb
642
+
643
+ study = optuna.create_study(
644
+ direction="maximize",
645
+ pruner=MedianPruner(n_startup_trials=8, n_warmup_steps=2),
646
+ study_name=f"{model_type}_{train_mode}_{weight_mode}_{metric_obj}_{tag}"
647
+ )
648
+
649
+ objective, pre = make_objective(
650
+ model_type=model_type,
651
+ train_mode=train_mode,
652
+ X=X, y=y, groups=groups,
653
+ metric_obj=metric_obj,
654
+ n_splits=cv_folds,
655
+ seed=seed, threads=threads,
656
+ pu_bags=pu_bags, pu_u_ratio=pu_u_ratio,
657
+ fb_beta=fb_beta,
658
+ sample_weight=sample_weight
659
+ )
660
+
661
+ study.optimize(
662
+ objective,
663
+ timeout=timeout,
664
+ n_trials=n_trials,
665
+ gc_after_trial=True,
666
+ callbacks=[_make_eta_cb(t0, timeout, n_trials)]
667
+ )
668
+
669
+ best_params = dict(study.best_params)
670
+
671
+ # rebuild best estimator
672
+ if model_type == "RF":
673
+ clf = RandomForestClassifier(**{**best_params, "n_jobs": threads, "random_state": seed})
674
+ elif model_type == "ET":
675
+ clf = ExtraTreesClassifier(**{**best_params, "n_jobs": threads, "random_state": seed})
676
+ else:
677
+ if not _HAS_XGB:
678
+ raise RuntimeError("xgboost not installed")
679
+ clf = XGBClassifier(**{
680
+ **best_params,
681
+ "n_jobs": threads,
682
+ "random_state": seed,
683
+ "objective": "binary:logistic",
684
+ "eval_metric": "logloss",
685
+ })
686
+
687
+ base_pipe = Pipeline([("pre", pre), ("clf", clf)])
688
+
689
+ if train_mode == "pu":
690
+ model = PUBaggingClassifier(
691
+ base_estimator=base_pipe,
692
+ n_bags=pu_bags,
693
+ u_ratio=pu_u_ratio,
694
+ random_state=seed
695
+ )
696
+ else:
697
+ model = base_pipe
698
+
699
+ # fit final model (weighted or not)
700
+ fit_pipeline(model, X, y, sample_weight)
701
+
702
+ # optional calibration
703
+ final_model = model
704
+ if calibrate:
705
+ # CalibratedClassifierCV will refit the estimator cv times; for PU this is heavy.
706
+ try:
707
+ calib = CalibratedClassifierCV(estimator=model, method="isotonic", cv=cv_folds)
708
+ except TypeError:
709
+ calib = CalibratedClassifierCV(base_estimator=model, method="isotonic", cv=cv_folds)
710
+
711
+ # We avoid passing weights here for robustness across sklearn versions.
712
+ calib.fit(X, y)
713
+ final_model = calib
714
+
715
+ # threshold selection on train
716
+ proba_tr = final_model.predict_proba(X)[:, 1]
717
+ if metric_obj == "F1":
718
+ tau = best_threshold_f1(y.values, proba_tr)
719
+ elif metric_obj == "FB":
720
+ tau = best_threshold_fbeta(y.values, proba_tr, beta=fb_beta)
721
+ else:
722
+ tau = best_threshold_f1(y.values, proba_tr)
723
+
724
+ yhat_tr = (proba_tr >= tau).astype(int)
725
+ train_row = metrics_from_pred(y.values, yhat_tr, proba_tr)
726
+ train_row = {f"train_{k}": v for k, v in train_row.items()}
727
+
728
+ metrics = dict(
729
+ threshold_used=float(tau),
730
+ study_best=float(study.best_value),
731
+ model_type=model_type,
732
+ train_mode=train_mode,
733
+ weight_mode=weight_mode,
734
+ metric_obj=metric_obj,
735
+ fb_beta=float(fb_beta),
736
+ pu_bags=int(pu_bags),
737
+ pu_u_ratio=float(pu_u_ratio),
738
+ calibrated=bool(calibrate),
739
+ **train_row
740
+ )
741
+
742
+ # test metrics (always)
743
+ if X_test is not None and y_test is not None:
744
+ X_te = X_test.reindex(columns=list(X.columns))
745
+ proba_te = final_model.predict_proba(X_te)[:, 1]
746
+ yhat_te = (proba_te >= tau).astype(int)
747
+ test_row = metrics_from_pred(y_test.values, yhat_te, proba_te)
748
+ metrics.update({f"test_{k}": v for k, v in test_row.items()})
749
+
750
+ return final_model, best_params, metrics, study
751
+
752
+ # =========================
753
+ # Results folder detection / skipping already trained variants
754
+ # =========================
755
+ def model_run_id(model_type, train_mode, weight_mode, metric_obj, tag):
756
+ return f"{model_type}_{train_mode}_{weight_mode}_{metric_obj}_{tag}"
757
+
758
+ def artifact_paths(results_dir: Path, run_id: str):
759
+ return {
760
+ "model": results_dir / f"model_{run_id}.joblib",
761
+ "params": results_dir / f"best_params_{run_id}.json",
762
+ "metrics": results_dir / f"metrics_{run_id}.json",
763
+ }
764
+
765
+ def is_run_complete(results_dir: Path, run_id: str) -> bool:
766
+ p = artifact_paths(results_dir, run_id)
767
+ return p["model"].exists() and p["metrics"].exists() and p["params"].exists()
768
+
769
+ def load_existing_metrics(results_dir: Path) -> list:
770
+ rows = []
771
+ for mf in sorted(results_dir.glob("metrics_*.json")):
772
+ try:
773
+ d = json.loads(mf.read_text())
774
+ rows.append(d)
775
+ except Exception:
776
+ continue
777
+ return rows
778
+
779
+ def append_metrics_summary(results_dir: Path, rows: list):
780
+ if not rows:
781
+ return
782
+ df = pd.DataFrame(rows)
783
+ df.to_csv(results_dir / "metrics_summary.csv", index=False)
784
+
785
+ # =========================
786
+ # Main
787
+ # =========================
788
+ def main():
789
+ ap = argparse.ArgumentParser(description="Train RF/XGB/ET with grouped CV. supervised + PU. normal + weighted.")
790
+ ap.add_argument("--ndjson", required=True, help="Folder with train.jsonl and test.jsonl")
791
+ ap.add_argument("--supp2", required=True, help="Supp2.xlsx (positives list for PU; label=1)")
792
+ ap.add_argument("--supp1", required=False, default=None, help="Supp1.csv (for weighted grading of Supp2 genomes)")
793
+ ap.add_argument("--outdir", required=True, help="Output directory root (will create results_models/)")
794
+
795
+ ap.add_argument("--train_mode", choices=["supervised","pu","both"], default="both",
796
+ help="Training mode: classic supervised vs PU-bagging vs both")
797
+
798
+ ap.add_argument("--weight_mode", choices=["normal","weighted","both"], default="both",
799
+ help="normal: all weights=1; weighted: grade only Supp2 genomes via Supp1 expected/inferred; both: run both")
800
+
801
+ ap.add_argument("--model", choices=["RF","XGB","ET","all"], default="all",
802
+ help="Model(s): RF, XGB, ET (ExtraTrees baseline), or all")
803
+
804
+ ap.add_argument("--metric", choices=["F1","PR_AUC","FB","all"], default="all",
805
+ help="Optuna objective metric")
806
+
807
+ ap.add_argument("--fb_beta", type=float, default=2.0, help="Beta for F-beta metric (FB objective)")
808
+
809
+ ap.add_argument("--timeout", type=int, default=3600, help="Optuna time budget per run (seconds)")
810
+ ap.add_argument("--n_trials", type=int, default=60, help="Upper limit of trials if timeout not reached")
811
+ ap.add_argument("--threads", type=int, default=0, help="Threads (0=auto cpu_count()-4)")
812
+ ap.add_argument("--seed", type=int, default=42, help="Random seed")
813
+
814
+ ap.add_argument("--pu_bags", type=int, default=15, help="PU bagging: number of bags")
815
+ ap.add_argument("--pu_u_ratio", type=float, default=3.0, help="PU bagging: U sampled per bag = ratio * |P|")
816
+
817
+ ap.add_argument("--no_calibration", action="store_true",
818
+ help="Disable isotonic calibration (use raw predict_proba from base model / PU ensemble).")
819
+
820
+ ap.add_argument("--force_calibration_pu", action="store_true",
821
+ help="Force calibration even for PU (can be extremely slow: cv_folds * pu_bags fits).")
822
+
823
+ ap.add_argument("--cv", type=int, default=5, help="Grouped CV folds for objective & calibration")
824
+ ap.add_argument("--overwrite", action="store_true",
825
+ help="Re-train even if artifacts already exist for a run_id (otherwise skipped).")
826
+
827
+ args = ap.parse_args()
828
+
829
+ cpu_total = os.cpu_count() or 8
830
+ eff_threads = max(1, cpu_total - 4) if args.threads in (None, 0, -1) else max(1, args.threads)
831
+ print(f"[CPU ] total={cpu_total} using={eff_threads} (flag={args.threads})")
832
+
833
+ os.environ['OMP_NUM_THREADS'] = str(eff_threads)
834
+ os.environ['OPENBLAS_NUM_THREADS'] = str(eff_threads)
835
+ os.environ['MKL_NUM_THREADS'] = str(eff_threads)
836
+ os.environ['VECLIB_MAXIMUM_THREADS'] = str(eff_threads)
837
+ os.environ['NUMEXPR_NUM_THREADS'] = str(eff_threads)
838
+
839
+ out_root = Path(args.outdir)
840
+ out_root.mkdir(parents=True, exist_ok=True)
841
+
842
+ results_dir = out_root / "results_models"
843
+ results_dir.mkdir(parents=True, exist_ok=True)
844
+
845
+ # load once
846
+ Xtr, ytr, gtr, mtr, Xte, yte, gte, mte = _load_train_test(args.ndjson, args.supp2)
847
+ tag = "64log_gc_tetra"
848
+
849
+ supp2_set = load_alt_list_from_supp2(args.supp2)
850
+
851
+ # weights (only if needed)
852
+ supp1_map = {}
853
+ if args.supp1:
854
+ print("[LOAD] Supp1 mapping for weights:", args.supp1)
855
+ supp1_map = load_supp1_code_map(args.supp1)
856
+
857
+ weights_train_weighted = None
858
+ weights_test_weighted = None
859
+ if args.weight_mode in ("weighted", "both"):
860
+ if not args.supp1:
861
+ raise ValueError("--weight_mode weighted/both requires --supp1 Supp1.csv")
862
+ weights_train_weighted = build_sample_weights_for_dataset(mtr, Xtr, ytr, supp2_set, supp1_map)
863
+ weights_test_weighted = build_sample_weights_for_dataset(mte, Xte, yte, supp2_set, supp1_map)
864
+
865
+ # snapshot for debugging
866
+ snap_tr = pd.DataFrame({
867
+ "acc_base": mtr["acc_base"].astype(str).values,
868
+ "y": ytr.astype(int).values,
869
+ "gc_percent": Xtr["gc_percent"].astype(float).values,
870
+ "weight": weights_train_weighted
871
+ })
872
+ snap_te = pd.DataFrame({
873
+ "acc_base": mte["acc_base"].astype(str).values,
874
+ "y": yte.astype(int).values,
875
+ "gc_percent": Xte["gc_percent"].astype(float).values,
876
+ "weight": weights_test_weighted
877
+ })
878
+ snap_tr.to_csv(results_dir / "weights_snapshot_train.tsv", sep="\t", index=False)
879
+ snap_te.to_csv(results_dir / "weights_snapshot_test.tsv", sep="\t", index=False)
880
+ print("[WRITE] weights_snapshot_train.tsv / weights_snapshot_test.tsv")
881
+
882
+ wP = weights_train_weighted[ytr.values == 1]
883
+ if wP.size:
884
+ print(f"[WEIGHTS] Supp2(P) weights: n={wP.size} mean={wP.mean():.3f} sd={wP.std():.3f} min={wP.min():.3f} max={wP.max():.3f}")
885
+
886
+ print("\n" + "="*72)
887
+ print(f"[DATA] train={Xtr.shape} P(from Supp2)={int(ytr.sum())} ({100.0*float(ytr.mean()):.3f}%)")
888
+ print(f"[DATA] test={Xte.shape} P(from Supp2)={int(yte.sum())} ({100.0*float(yte.mean()):.3f}%)")
889
+ print("="*72)
890
+
891
+ train_modes = ["supervised","pu"] if args.train_mode == "both" else [args.train_mode]
892
+ weight_modes = ["normal","weighted"] if args.weight_mode == "both" else [args.weight_mode]
893
+ models = ["RF","XGB","ET"] if args.model == "all" else [args.model]
894
+ metrics = ["F1","PR_AUC","FB"] if args.metric == "all" else [args.metric]
895
+
896
+ # Load any already-existing metrics into summary rows
897
+ global_rows = load_existing_metrics(results_dir)
898
+ if global_rows:
899
+ append_metrics_summary(results_dir, global_rows)
900
+ print(f"[INFO] Found existing metrics: {len(global_rows)} runs. metrics_summary.csv refreshed.")
901
+
902
+ for tm in train_modes:
903
+ for wm in weight_modes:
904
+ for met in metrics:
905
+ for mdl in models:
906
+ run_id = model_run_id(mdl, tm, wm, met, tag)
907
+
908
+ # Decide calibration: PU default off unless forced
909
+ calibrate = (not args.no_calibration)
910
+ if tm == "pu" and calibrate and not args.force_calibration_pu:
911
+ print(f"[INFO] Auto-disabling calibration for PU run {run_id} (use --force_calibration_pu to override).")
912
+ calibrate = False
913
+
914
+ # Skip if complete and not overwrite
915
+ if (not args.overwrite) and is_run_complete(results_dir, run_id):
916
+ print(f"[SKIP] already exists: {run_id}")
917
+ continue
918
+
919
+ print("\n" + "-"*72)
920
+ print(f"[RUN ] mode={tm} | weights={wm} | metric={met} | model={mdl} | timeout={args.timeout}s | trials={args.n_trials}")
921
+ print(f"[RUN ] run_id={run_id} | calibrate={calibrate}")
922
+ print("-"*72)
923
+
924
+ # pick weights
925
+ if wm == "weighted":
926
+ sw_tr = weights_train_weighted
927
+ sw_te = weights_test_weighted
928
+ else:
929
+ sw_tr = None
930
+ sw_te = None
931
+
932
+ t0 = time.time()
933
+ final_model, best_params, met_dict, study = fit_best_model(
934
+ model_type=mdl,
935
+ train_mode=tm,
936
+ weight_mode=wm,
937
+ metric_obj=met,
938
+ X=Xtr, y=ytr, groups=gtr,
939
+ seed=args.seed,
940
+ timeout=args.timeout,
941
+ n_trials=args.n_trials,
942
+ outdir=results_dir,
943
+ tag=tag,
944
+ X_test=Xte,
945
+ y_test=yte,
946
+ threads=eff_threads,
947
+ pu_bags=args.pu_bags,
948
+ pu_u_ratio=args.pu_u_ratio,
949
+ fb_beta=args.fb_beta,
950
+ calibrate=calibrate,
951
+ sample_weight=sw_tr,
952
+ cv_folds=args.cv
953
+ )
954
+
955
+ dt = time.time() - t0
956
+ print(f"[DONE] {mdl} | {tm} | {wm} | {met} in {dt/60:.1f} min best={study.best_value:.5f}")
957
+
958
+ # crash-safe save immediately
959
+ model_path = results_dir / f"model_{run_id}.joblib"
960
+ params_path = results_dir / f"best_params_{run_id}.json"
961
+ metrics_path = results_dir / f"metrics_{run_id}.json"
962
+ cols_path = results_dir / f"feature_columns_{tag}.json"
963
+
964
+ joblib.dump(final_model, model_path)
965
+ params_path.write_text(json.dumps(best_params, indent=2))
966
+ metrics_path.write_text(json.dumps(met_dict, indent=2))
967
+ if not cols_path.exists():
968
+ cols_path.write_text(json.dumps(list(Xtr.columns), indent=2))
969
+
970
+ # Update in-memory summary list:
971
+ # remove previous row with same run_id if overwrite
972
+ global_rows = [r for r in global_rows if not (
973
+ r.get("model_type")==mdl and r.get("train_mode")==tm and r.get("weight_mode")==wm
974
+ and r.get("metric_obj")==met
975
+ )]
976
+ met_row = dict(met_dict)
977
+ met_row["elapsed_min"] = float(dt/60.0)
978
+ global_rows.append(met_row)
979
+
980
+ append_metrics_summary(results_dir, global_rows)
981
+ print("[WRITE] metrics_summary.csv updated")
982
+
983
+ print("[DONE] Saved artifacts to", results_dir.resolve())
984
+
985
+ if __name__ == "__main__":
986
+ main()
987
+
Supp1.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a812e57415028390f2839a6ec89e3023c29545a6dbe7c03d0498b83fe6640fc
3
+ size 51347196
Supp2.xlsx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47567efc7b4b09f753d131f8c229e1d89087a03b2ce8b2f92d3226be8e6f8b11
3
+ size 2186240
genomes-all_metadata_with_genetic_code_id_noNA.tsv ADDED
The diff for this file is too large to render. See raw diff
 
metrics_summary.csv ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ threshold_used,study_best,model_type,train_mode,weight_mode,metric_obj,fb_beta,pu_bags,pu_u_ratio,calibrated,train_tn,train_fp,train_fn,train_tp,train_n,train_positives,train_accuracy,train_precision,train_recall,train_specificity,train_f1,train_roc_auc,train_pr_auc,test_tn,test_fp,test_fn,test_tp,test_n,test_positives,test_accuracy,test_precision,test_recall,test_specificity,test_f1,test_roc_auc,test_pr_auc,elapsed_min
2
+ 0.28160005812047,0.7170877819106146,RF,supervised,weighted,F1,2.0,15,3.0,True,192327,599,1445,3845,198216,5290,0.9896880171126448,0.8652115211521152,0.7268431001890359,0.996895182608876,0.7900143825765359,0.989440669602949,0.8648721301935423,47948,223,516,868,49555,1384,0.9850872767631924,0.7956003666361137,0.6271676300578035,0.9953706586950655,0.7014141414141414,0.9417760493895602,0.7089960012695522,100.32298675378163
3
+ 0.270977745577693,0.7316308621469358,XGB,supervised,weighted,F1,2.0,15,3.0,True,192282,644,1329,3961,198216,5290,0.9900462122129394,0.8601520086862107,0.7487712665406427,0.9966619325544509,0.8006063668519454,0.9912934246099276,0.8724422883868594,47958,213,489,895,49555,1384,0.9858339219049541,0.8077617328519856,0.6466763005780347,0.9955782524755559,0.7182985553772071,0.9480600136219919,0.7391698893672186,116.04822653134664
4
+ 0.26884064330614116,0.7142723913988928,ET,supervised,weighted,F1,2.0,15,3.0,True,192018,908,1096,4194,198216,5290,0.9898898171691488,0.82203057624461,0.7928166351606806,0.9952935322351575,0.8071593533487298,0.9957403224449537,0.8900330617078354,47854,317,481,903,49555,1384,0.9838966804560589,0.7401639344262295,0.6524566473988439,0.9934192771584563,0.6935483870967742,0.947862627035694,0.7126230593189249,95.2558631738027
5
+ 0.3056620102323837,0.7323725486714675,RF,supervised,weighted,PR_AUC,2.0,15,3.0,True,192450,476,1429,3861,198216,5290,0.9903892723089962,0.8902467143186534,0.729867674858223,0.9975327327576377,0.802119040199439,0.9904682181539894,0.8741500528850537,47969,202,524,860,49555,1384,0.9853496115427303,0.8097928436911488,0.6213872832369942,0.9958066056340952,0.7031888798037612,0.9405524235493905,0.7071107938720944,109.88682698011398
6
+ 0.2763037309981883,0.7739642015387473,XGB,supervised,weighted,PR_AUC,2.0,15,3.0,True,192350,576,837,4453,198216,5290,0.9928714130039956,0.8854643070192881,0.8417769376181474,0.9970143993033599,0.8630681267564686,0.9954336537391821,0.9287123546097,47936,235,445,939,49555,1384,0.9862778730703259,0.7998296422487223,0.6784682080924855,0.9951215461584771,0.7341673182173573,0.9540925553870405,0.7605201702162149,129.1129001100858
7
+ 0.2528274437858936,0.733124843786015,ET,supervised,weighted,PR_AUC,2.0,15,3.0,True,191998,928,1001,4289,198216,5290,0.9902681922750939,0.8221199923327583,0.8107750472589792,0.995189865544302,0.8164081088797944,0.9960269471274596,0.8960748662603205,47833,338,480,904,49555,1384,0.9834930884875391,0.7278582930756844,0.653179190751445,0.9929833302194266,0.6884996191926885,0.9482122815600444,0.714426289534482,98.56812449296315
8
+ 0.22519098961318726,0.7034497459764806,RF,supervised,weighted,FB,2.0,15,3.0,True,191371,1555,136,5154,198216,5290,0.9914689026112927,0.7682217916231927,0.9742911153119093,0.9919399147859801,0.8590715892991082,0.9978907399914563,0.9084804543955299,47725,446,432,952,49555,1384,0.9822823125819796,0.6809728183118741,0.6878612716763006,0.9907413173901309,0.6843997124370956,0.9534422648697445,0.7103612744548206,103.77016698916754
9
+ 0.3765930473804474,0.7424014112977324,XGB,supervised,weighted,FB,2.0,15,3.0,True,192750,176,41,5249,198216,5290,0.9989052346934657,0.9675576036866359,0.9922495274102079,0.999087733120471,0.9797480167988801,0.9998785149842558,0.9948955292297433,48029,142,472,912,49555,1384,0.9876097265664413,0.8652751423149905,0.6589595375722543,0.9970521683170372,0.7481542247744053,0.9543304332602195,0.7737554628839785,142.53574102719625
10
+ 0.1603667577708411,0.7068965517241379,ET,supervised,weighted,FB,2.0,15,3.0,True,191735,1191,425,4865,198216,5290,0.9918472777172378,0.803335535006605,0.9196597353497165,0.9938266485595514,0.8575709501145778,0.997956241564711,0.9410532422004124,47683,488,403,981,49555,1384,0.9820199778024418,0.6678012253233492,0.7088150289017341,0.9898694235120716,0.6876971608832808,0.9484654214759726,0.7242309832041485,96.69467223485312
models_logs.txt ADDED
The diff for this file is too large to render. See raw diff
 
predict_dir.py ADDED
@@ -0,0 +1,557 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+ """
5
+ predict_models_dir_with_truth.py
6
+
7
+ Predykcja dla wszystkich modeli w results_models/ na folderze genomów FASTA,
8
+ z adnotacją ground truth z pliku TSV (genomes-all_metadata_with_genetic_code_id_noNA.tsv).
9
+
10
+ Ground truth:
11
+ ALT = Genetic_code_ID != 11
12
+ STD = Genetic_code_ID == 11
13
+
14
+ Dodatkowo rozbicie metryk dla:
15
+ - Genome_type == "Isolate"
16
+ - Genome_type == "MAG"
17
+
18
+ Wyniki:
19
+ - predictions/<model>__pred.csv (per model, per genome)
20
+ - predictions/all_models_predictions_long.csv (long: model x genome)
21
+ - predictions/prediction_summary.csv (overall + Isolate + MAG + AUC)
22
+
23
+ Wymaga: aragorn w PATH (lub --aragorn).
24
+ """
25
+
26
+ import os
27
+ import re
28
+ import json
29
+ import time
30
+ import argparse
31
+ import subprocess
32
+ from pathlib import Path
33
+ from collections import Counter
34
+
35
+ import numpy as np
36
+ import pandas as pd
37
+ from joblib import load as joblib_load
38
+
39
+ # For metrics
40
+ from sklearn.metrics import (
41
+ confusion_matrix,
42
+ accuracy_score,
43
+ precision_score,
44
+ recall_score,
45
+ f1_score,
46
+ roc_auc_score,
47
+ average_precision_score,
48
+ )
49
+
50
+ # =========================
51
+ # PU class (for joblib load)
52
+ # =========================
53
+ from sklearn.base import BaseEstimator, ClassifierMixin, clone
54
+
55
+ class PUBaggingClassifier(BaseEstimator, ClassifierMixin):
56
+ def __init__(self, base_estimator, n_bags=15, u_ratio=3.0, random_state=42):
57
+ self.base_estimator = base_estimator
58
+ self.n_bags = int(n_bags)
59
+ self.u_ratio = float(u_ratio)
60
+ self.random_state = int(random_state)
61
+ self.models_ = None
62
+ self.classes_ = np.array([0, 1], dtype=int)
63
+
64
+ def fit(self, X, y, sample_weight=None):
65
+ y = np.asarray(y).astype(int)
66
+ pos_idx = np.where(y == 1)[0]
67
+ unl_idx = np.where(y == 0)[0]
68
+ if pos_idx.size == 0:
69
+ raise ValueError("PU training requires at least one positive sample (y==1).")
70
+
71
+ rng = np.random.RandomState(self.random_state)
72
+ self.models_ = []
73
+
74
+ if unl_idx.size == 0:
75
+ m = clone(self.base_estimator)
76
+ try:
77
+ if sample_weight is not None:
78
+ m.fit(X, y, sample_weight=np.asarray(sample_weight))
79
+ else:
80
+ m.fit(X, y)
81
+ except TypeError:
82
+ m.fit(X, y)
83
+ self.models_.append(m)
84
+ return self
85
+
86
+ k_u = int(min(unl_idx.size, max(1, round(self.u_ratio * pos_idx.size))))
87
+ for _ in range(self.n_bags):
88
+ u_b = rng.choice(unl_idx, size=k_u, replace=(k_u > unl_idx.size))
89
+ idx_b = np.concatenate([pos_idx, u_b])
90
+ X_b = X.iloc[idx_b] if hasattr(X, "iloc") else X[idx_b]
91
+ y_b = y[idx_b]
92
+
93
+ sw_b = None
94
+ if sample_weight is not None:
95
+ sw_b = np.asarray(sample_weight)[idx_b]
96
+
97
+ m = clone(self.base_estimator)
98
+ try:
99
+ if sw_b is not None:
100
+ m.fit(X_b, y_b, sample_weight=sw_b)
101
+ else:
102
+ m.fit(X_b, y_b)
103
+ except TypeError:
104
+ m.fit(X_b, y_b)
105
+
106
+ self.models_.append(m)
107
+ return self
108
+
109
+ def predict_proba(self, X):
110
+ if not self.models_:
111
+ raise RuntimeError("PUBaggingClassifier not fitted")
112
+ probs = [m.predict_proba(X) for m in self.models_]
113
+ return np.mean(np.stack(probs, axis=0), axis=0)
114
+
115
+ def predict(self, X):
116
+ return (self.predict_proba(X)[:, 1] >= 0.5).astype(int)
117
+
118
+
119
+ # =========================
120
+ # Feature extraction
121
+ # =========================
122
+ CODON_RE = re.compile(r"\(([ACGTUacgtu]{3})\)")
123
+
124
+ def set_single_thread_env():
125
+ os.environ["OMP_NUM_THREADS"] = "1"
126
+ os.environ["OPENBLAS_NUM_THREADS"] = "1"
127
+ os.environ["MKL_NUM_THREADS"] = "1"
128
+ os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
129
+ os.environ["NUMEXPR_NUM_THREADS"] = "1"
130
+
131
+ def list_fasta_files(genomes_dir: str):
132
+ exts = (".fna", ".fa", ".fasta")
133
+ paths = []
134
+ for fn in os.listdir(genomes_dir):
135
+ p = os.path.join(genomes_dir, fn)
136
+ if not os.path.isfile(p):
137
+ continue
138
+ if fn.endswith(exts):
139
+ paths.append(p)
140
+ return sorted(paths)
141
+
142
+ def calc_gc_and_tetra(fasta_path):
143
+ bases = ["A", "C", "G", "T"]
144
+ all_kmers = ["".join([a, b, c, d]) for a in bases for b in bases for c in bases for d in bases]
145
+ tetra_counts = {k: 0 for k in all_kmers}
146
+
147
+ A = C = G = T = 0
148
+ tail = ""
149
+
150
+ with open(fasta_path, "r") as fh:
151
+ for line in fh:
152
+ if line.startswith(">"):
153
+ continue
154
+ s = line.strip().upper().replace("U", "T")
155
+ s = re.sub(r"[^ACGT]", "N", s)
156
+ if not s:
157
+ continue
158
+
159
+ seq = tail + s
160
+
161
+ for ch in s:
162
+ if ch == "A": A += 1
163
+ elif ch == "C": C += 1
164
+ elif ch == "G": G += 1
165
+ elif ch == "T": T += 1
166
+
167
+ for i in range(len(seq) - 3):
168
+ k = seq[i:i+4]
169
+ if "N" in k:
170
+ continue
171
+ tetra_counts[k] += 1
172
+
173
+ tail = seq[-3:] if len(seq) >= 3 else seq
174
+
175
+ total_acgt = A + C + G + T
176
+ gc_percent = (float(G + C) / float(total_acgt) * 100.0) if total_acgt > 0 else 0.0
177
+
178
+ windows_total = sum(tetra_counts.values())
179
+ denom = float(windows_total) if windows_total > 0 else 1.0
180
+ tetra_freq = {f"tetra_{k}": float(v) / denom for k, v in tetra_counts.items()}
181
+
182
+ features = {
183
+ "gc_percent": float(gc_percent),
184
+ "genome_length": float(total_acgt),
185
+ }
186
+ features.update(tetra_freq)
187
+ return features
188
+
189
+ def run_aragorn(aragorn_bin, fasta_path, out_txt):
190
+ cmd = [aragorn_bin, "-t", "-l", "-gc1", "-w", "-o", out_txt, fasta_path]
191
+ with open(os.devnull, "w") as devnull:
192
+ subprocess.run(cmd, stdout=devnull, stderr=devnull, check=False)
193
+
194
+ def parse_anticodons_from_aragorn(aragorn_txt):
195
+ counts = Counter()
196
+ if not os.path.isfile(aragorn_txt):
197
+ return counts
198
+ with open(aragorn_txt, "r") as fh:
199
+ for line in fh:
200
+ for m in CODON_RE.finditer(line):
201
+ cod = m.group(1).upper().replace("U", "T")
202
+ if re.fullmatch(r"[ACGT]{3}", cod):
203
+ counts[cod] += 1
204
+ return counts
205
+
206
+ def build_ac_features(anticodon_counts):
207
+ bases = ["A", "C", "G", "T"]
208
+ feats = {}
209
+ for a in bases:
210
+ for b in bases:
211
+ for c in bases:
212
+ cod = f"{a}{b}{c}"
213
+ feats[f"ac_{cod}"] = float(anticodon_counts.get(cod, 0))
214
+ return feats
215
+
216
+ def build_plr_features(ac_features, needed_plr_cols, eps=0.5):
217
+ plr_feats = {}
218
+ for col in needed_plr_cols:
219
+ core = col[len("plr_"):]
220
+ left, right = core.split("__")
221
+ a = ac_features.get(f"ac_{left}", 0.0)
222
+ b = ac_features.get(f"ac_{right}", 0.0)
223
+ plr_feats[col] = float(np.log((a + eps) / (b + eps)))
224
+ return plr_feats
225
+
226
+ def build_features_for_genome(fasta_path, aragorn_bin, feature_columns, reuse_aragorn=True):
227
+ acc = os.path.splitext(os.path.basename(fasta_path))[0]
228
+
229
+ feat_gc_tetra = calc_gc_and_tetra(fasta_path)
230
+
231
+ tmp_aragorn = fasta_path + ".aragorn.txt"
232
+ if reuse_aragorn and os.path.isfile(tmp_aragorn):
233
+ try:
234
+ if os.path.getmtime(tmp_aragorn) < os.path.getmtime(fasta_path):
235
+ run_aragorn(aragorn_bin, fasta_path, tmp_aragorn)
236
+ except Exception:
237
+ run_aragorn(aragorn_bin, fasta_path, tmp_aragorn)
238
+ else:
239
+ run_aragorn(aragorn_bin, fasta_path, tmp_aragorn)
240
+
241
+ anticodon_counts = parse_anticodons_from_aragorn(tmp_aragorn)
242
+ ac_feats = build_ac_features(anticodon_counts)
243
+
244
+ plr_cols = [c for c in feature_columns if c.startswith("plr_")]
245
+ plr_feats = build_plr_features(ac_feats, plr_cols) if plr_cols else {}
246
+
247
+ all_feats = {}
248
+ all_feats.update(ac_feats)
249
+ all_feats.update(plr_feats)
250
+ all_feats.update(feat_gc_tetra)
251
+
252
+ row = {col: float(all_feats.get(col, 0.0)) for col in feature_columns}
253
+ return acc, row
254
+
255
+
256
+ # =========================
257
+ # Ground truth from TSV
258
+ # =========================
259
+ def load_truth_tsv(tsv_path: str) -> pd.DataFrame:
260
+ df = pd.read_csv(tsv_path, sep="\t", dtype=str)
261
+ # Normalize key columns
262
+ if "Genome" not in df.columns:
263
+ raise ValueError(f"TSV missing column 'Genome'. Columns: {list(df.columns)}")
264
+ if "Genome_type" not in df.columns:
265
+ raise ValueError(f"TSV missing column 'Genome_type'. Columns: {list(df.columns)}")
266
+ if "Genetic_code_ID" not in df.columns:
267
+ raise ValueError(f"TSV missing column 'Genetic_code_ID'. Columns: {list(df.columns)}")
268
+
269
+ df["Genome"] = df["Genome"].astype(str)
270
+ df["Genome_type"] = df["Genome_type"].astype(str)
271
+
272
+ # Genetic_code_ID -> int
273
+ df["Genetic_code_ID"] = pd.to_numeric(df["Genetic_code_ID"], errors="coerce").astype("Int64")
274
+
275
+ # ALT ground truth: != 11
276
+ df["y_true_alt"] = df["Genetic_code_ID"].apply(lambda x: (pd.notna(x) and int(x) != 11)).astype(int)
277
+ df["true_label"] = df["y_true_alt"].map({0: "STD", 1: "ALT"})
278
+
279
+ return df[["Genome", "Genome_type", "Genetic_code_ID", "y_true_alt", "true_label"]]
280
+
281
+
282
+ # =========================
283
+ # Metrics
284
+ # =========================
285
+ def safe_confusion(y_true, y_pred):
286
+ cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
287
+ tn, fp, fn, tp = int(cm[0,0]), int(cm[0,1]), int(cm[1,0]), int(cm[1,1])
288
+ return tn, fp, fn, tp
289
+
290
+ def compute_metrics_block(y_true, y_pred, y_score=None):
291
+ y_true = np.asarray(y_true, dtype=int)
292
+ y_pred = np.asarray(y_pred, dtype=int)
293
+
294
+ tn, fp, fn, tp = safe_confusion(y_true, y_pred)
295
+ n = int(len(y_true))
296
+ pos = int(np.sum(y_true == 1))
297
+
298
+ out = {
299
+ "n": n,
300
+ "positives": pos,
301
+ "tn": tn, "fp": fp, "fn": fn, "tp": tp,
302
+ "accuracy": float(accuracy_score(y_true, y_pred)) if n else np.nan,
303
+ "precision": float(precision_score(y_true, y_pred, zero_division=0)) if n else np.nan,
304
+ "recall": float(recall_score(y_true, y_pred, zero_division=0)) if n else np.nan,
305
+ "f1": float(f1_score(y_true, y_pred, zero_division=0)) if n else np.nan,
306
+ "specificity": float(tn / (tn + fp)) if (tn + fp) > 0 else np.nan,
307
+ "fn_rate": float(fn / (fn + tp)) if (fn + tp) > 0 else np.nan, # 1 - recall
308
+ "fp_rate": float(fp / (fp + tn)) if (fp + tn) > 0 else np.nan,
309
+ }
310
+
311
+ # AUCs only if y_score provided and both classes exist
312
+ if y_score is not None:
313
+ y_score = np.asarray(y_score, dtype=float)
314
+ if n > 0 and len(np.unique(y_true)) == 2:
315
+ try:
316
+ out["roc_auc"] = float(roc_auc_score(y_true, y_score))
317
+ except Exception:
318
+ out["roc_auc"] = np.nan
319
+ try:
320
+ out["pr_auc"] = float(average_precision_score(y_true, y_score))
321
+ except Exception:
322
+ out["pr_auc"] = np.nan
323
+ else:
324
+ out["roc_auc"] = np.nan
325
+ out["pr_auc"] = np.nan
326
+ else:
327
+ out["roc_auc"] = np.nan
328
+ out["pr_auc"] = np.nan
329
+
330
+ return out
331
+
332
+
333
+ # =========================
334
+ # Model discovery
335
+ # =========================
336
+ def find_models(models_dir: Path):
337
+ return sorted(models_dir.glob("model_*.joblib"))
338
+
339
+ def pick_feature_cols(models_dir: Path, feature_cols_arg: str | None):
340
+ if feature_cols_arg:
341
+ return Path(feature_cols_arg)
342
+ p = models_dir / "feature_columns_64log_gc_tetra.json"
343
+ if p.exists():
344
+ return p
345
+ candidates = sorted(models_dir.glob("feature_columns_*.json"))
346
+ if not candidates:
347
+ raise FileNotFoundError(f"No feature_columns_*.json found in {models_dir}")
348
+ return candidates[0]
349
+
350
+
351
+ # =========================
352
+ # Main
353
+ # =========================
354
+ def main():
355
+ ap = argparse.ArgumentParser(description="Predict for all models in a directory + annotate truth from TSV.")
356
+ ap.add_argument("--genomes_dir", required=True, help="Folder z genomami FASTA (.fna/.fa/.fasta).")
357
+ ap.add_argument("--models_dir", required=True, help="Folder z modelami (.joblib) + feature_columns_*.json.")
358
+ ap.add_argument("--outdir", required=True, help="Folder wyjściowy na predykcje CSV.")
359
+ ap.add_argument("--aragorn", default="aragorn", help="Ścieżka do binarki ARAGORN.")
360
+ ap.add_argument("--feature_cols", default=None, help="Opcjonalnie: wymuś konkretny feature_columns_*.json.")
361
+ ap.add_argument("--reuse_aragorn", action="store_true", help="Jeśli istnieje *.aragorn.txt i jest świeży, użyj go.")
362
+ ap.add_argument("--truth_tsv", required=True, help="genomes-all_metadata_with_genetic_code_id_noNA.tsv")
363
+ args = ap.parse_args()
364
+
365
+ set_single_thread_env()
366
+
367
+ genomes_dir = Path(args.genomes_dir)
368
+ models_dir = Path(args.models_dir)
369
+ outdir = Path(args.outdir)
370
+ outdir.mkdir(parents=True, exist_ok=True)
371
+
372
+ # Load truth
373
+ truth = load_truth_tsv(args.truth_tsv)
374
+ truth_map = truth.set_index("Genome")
375
+
376
+ # Input genomes
377
+ fasta_files = list_fasta_files(str(genomes_dir))
378
+ if not fasta_files:
379
+ raise SystemExit(f"Brak FASTA w {genomes_dir}")
380
+
381
+ models = find_models(models_dir)
382
+ if not models:
383
+ raise SystemExit(f"Brak model_*.joblib w {models_dir}")
384
+
385
+ feat_cols_path = pick_feature_cols(models_dir, args.feature_cols)
386
+
387
+ print(f"[INFO] Genomes: {len(fasta_files)} in {genomes_dir}")
388
+ print(f"[INFO] Models : {len(models)} in {models_dir}")
389
+ print(f"[INFO] Truth : {args.truth_tsv}")
390
+ print(f"[INFO] FeatCols: {feat_cols_path}")
391
+
392
+ # Load feature columns
393
+ with open(feat_cols_path, "r") as fh:
394
+ feature_columns = json.load(fh)
395
+
396
+ # Build features once (shared across all models)
397
+ t_feat0 = time.time()
398
+ rows, accs = [], []
399
+ for i, fasta in enumerate(fasta_files, 1):
400
+ if i % 50 == 0 or i == 1 or i == len(fasta_files):
401
+ print(f"[FEAT] {i}/{len(fasta_files)} {os.path.basename(fasta)}")
402
+ acc, feats = build_features_for_genome(
403
+ fasta_path=fasta,
404
+ aragorn_bin=args.aragorn,
405
+ feature_columns=feature_columns,
406
+ reuse_aragorn=args.reuse_aragorn
407
+ )
408
+ accs.append(acc)
409
+ rows.append(feats)
410
+
411
+ X = pd.DataFrame(rows, index=accs)[feature_columns]
412
+ print(f"[FEAT] Built X={X.shape} in {(time.time()-t_feat0):.1f}s")
413
+
414
+ # Merge truth annotation for these genomes
415
+ ann = pd.DataFrame({"Genome": accs}).merge(truth, how="left", left_on="Genome", right_on="Genome")
416
+ n_annot = int(ann["y_true_alt"].notna().sum())
417
+ n_missing = int(len(ann) - n_annot)
418
+ print(f"[TRUTH] Annotated: {n_annot}/{len(ann)} Missing_in_TSV: {n_missing}")
419
+
420
+ # Prepare outputs
421
+ long_rows = []
422
+ summary_rows = []
423
+
424
+ for mi, model_path in enumerate(models, 1):
425
+ model_name = model_path.stem
426
+ print("\n" + "="*80)
427
+ print(f"[{mi}/{len(models)}] MODEL: {model_path.name}")
428
+ print("="*80)
429
+
430
+ t0 = time.time()
431
+ model = joblib_load(model_path)
432
+
433
+ if hasattr(model, "predict_proba"):
434
+ proba = model.predict_proba(X)[:, 1]
435
+ else:
436
+ proba = None
437
+
438
+ if hasattr(model, "predict"):
439
+ try:
440
+ yhat = model.predict(X)
441
+ except Exception:
442
+ yhat = (proba >= 0.5).astype(int) if proba is not None else np.zeros(len(X), dtype=int)
443
+ else:
444
+ yhat = (proba >= 0.5).astype(int) if proba is not None else np.zeros(len(X), dtype=int)
445
+
446
+ elapsed = time.time() - t0
447
+
448
+ # Build per-model table with annotations
449
+ df_pred = ann.copy()
450
+ df_pred["model"] = model_name
451
+ df_pred["y_pred_alt"] = np.asarray(yhat).astype(int)
452
+ df_pred["pred_label"] = df_pred["y_pred_alt"].map({0: "STD", 1: "ALT"})
453
+ if proba is not None:
454
+ df_pred["proba_alt"] = np.asarray(proba, dtype=float)
455
+ else:
456
+ df_pred["proba_alt"] = np.nan
457
+
458
+ out_csv = outdir / f"{model_name}__pred.csv"
459
+ df_pred.to_csv(out_csv, index=False)
460
+ print(f"[WRITE] {out_csv} rows={len(df_pred)} time={(elapsed/60):.2f} min")
461
+
462
+ # Long output
463
+ keep_cols = ["model", "Genome", "Genome_type", "Genetic_code_ID", "y_true_alt", "y_pred_alt", "proba_alt"]
464
+ for row in df_pred[keep_cols].itertuples(index=False):
465
+ long_rows.append({
466
+ "model": row.model,
467
+ "Genome": row.Genome,
468
+ "Genome_type": row.Genome_type,
469
+ "Genetic_code_ID": row.Genetic_code_ID,
470
+ "y_true_alt": row.y_true_alt,
471
+ "y_pred_alt": row.y_pred_alt,
472
+ "proba_alt": row.proba_alt
473
+ })
474
+
475
+ # Summary metrics ONLY on annotated subset
476
+ df_eval = df_pred[df_pred["y_true_alt"].notna()].copy()
477
+ y_true = df_eval["y_true_alt"].astype(int).values
478
+ y_pred = df_eval["y_pred_alt"].astype(int).values
479
+ y_score = df_eval["proba_alt"].astype(float).values if proba is not None else None
480
+
481
+ overall = compute_metrics_block(y_true, y_pred, y_score=y_score)
482
+
483
+ # Isolate / MAG splits
484
+ def subset_metrics(gen_type: str):
485
+ sub = df_eval[df_eval["Genome_type"] == gen_type]
486
+ if sub.shape[0] == 0:
487
+ return {k: np.nan for k in compute_metrics_block([0,1],[0,1],y_score=np.array([0.1,0.9])).keys()}
488
+ yt = sub["y_true_alt"].astype(int).values
489
+ yp = sub["y_pred_alt"].astype(int).values
490
+ ys = sub["proba_alt"].astype(float).values if proba is not None else None
491
+ return compute_metrics_block(yt, yp, y_score=ys)
492
+
493
+ iso = subset_metrics("Isolate")
494
+ mag = subset_metrics("MAG")
495
+
496
+ # Pack summary row
497
+ srow = {
498
+ "model": model_name,
499
+ "model_file": str(model_path),
500
+ "feature_cols": str(feat_cols_path),
501
+ "n_genomes_total": int(len(df_pred)),
502
+ "n_annotated": int(df_eval.shape[0]),
503
+ "n_missing_truth": int(len(df_pred) - df_eval.shape[0]),
504
+ "elapsed_sec": float(elapsed),
505
+ "elapsed_min": float(elapsed/60.0),
506
+ }
507
+
508
+ # overall prefixed
509
+ for k, v in overall.items():
510
+ srow[f"overall_{k}"] = v
511
+
512
+ # isolate prefixed
513
+ for k, v in iso.items():
514
+ srow[f"isolate_{k}"] = v
515
+
516
+ # mag prefixed
517
+ for k, v in mag.items():
518
+ srow[f"mag_{k}"] = v
519
+
520
+ summary_rows.append(srow)
521
+
522
+ # Write combined outputs
523
+ long_csv = outdir / "all_models_predictions_long.csv"
524
+ pd.DataFrame(long_rows).to_csv(long_csv, index=False)
525
+ print(f"\n[WRITE] {long_csv} rows={len(long_rows)}")
526
+
527
+ summary_csv = outdir / "prediction_summary.csv"
528
+ df_sum = pd.DataFrame(summary_rows)
529
+ df_sum.to_csv(summary_csv, index=False)
530
+ print(f"[WRITE] {summary_csv} rows={len(df_sum)}")
531
+
532
+ # End report: best PR-AUC overall (if available)
533
+ if "overall_pr_auc" in df_sum.columns:
534
+ df_rank = df_sum.sort_values(["overall_pr_auc", "overall_roc_auc"], ascending=False, na_position="last")
535
+ print("\n" + "="*80)
536
+ print("[REPORT] Top models by overall PR-AUC (ground truth ALT = Genetic_code_ID != 11):")
537
+ cols = [
538
+ "model",
539
+ "n_annotated",
540
+ "overall_positives",
541
+ "overall_precision",
542
+ "overall_recall",
543
+ "overall_f1",
544
+ "overall_pr_auc",
545
+ "overall_roc_auc",
546
+ "isolate_fn", "isolate_fp", "mag_fn", "mag_fp",
547
+ "elapsed_min",
548
+ ]
549
+ cols = [c for c in cols if c in df_rank.columns]
550
+ print(df_rank[cols].head(15).to_string(index=False))
551
+ print("="*80)
552
+
553
+ print("[DONE]")
554
+
555
+ if __name__ == "__main__":
556
+ main()
557
+
predictions_truth/all_models_predictions_long.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1ff896223d03dfb2a08d11ea69a938e07df40d496235f7e794b4d0382471419
3
+ size 15681101
predictions_truth/model_ET_pu_normal_F1_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_ET_pu_normal_FB_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_ET_pu_normal_PR_AUC_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_ET_pu_weighted_F1_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_ET_pu_weighted_FB_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_ET_pu_weighted_PR_AUC_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_ET_supervised_normal_F1_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_ET_supervised_normal_FB_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_ET_supervised_normal_PR_AUC_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_ET_supervised_weighted_F1_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_ET_supervised_weighted_FB_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_ET_supervised_weighted_PR_AUC_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_RF_pu_normal_F1_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_RF_pu_normal_FB_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_RF_pu_normal_PR_AUC_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_RF_pu_weighted_F1_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_RF_pu_weighted_FB_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_RF_pu_weighted_PR_AUC_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_RF_supervised_normal_F1_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_RF_supervised_normal_FB_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_RF_supervised_normal_PR_AUC_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_RF_supervised_weighted_F1_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_RF_supervised_weighted_FB_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_RF_supervised_weighted_PR_AUC_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_XGB_pu_normal_F1_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_XGB_pu_normal_FB_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_XGB_pu_normal_PR_AUC_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_XGB_pu_weighted_F1_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_XGB_pu_weighted_FB_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_XGB_pu_weighted_PR_AUC_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_XGB_supervised_normal_F1_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_XGB_supervised_normal_FB_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_XGB_supervised_normal_PR_AUC_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_XGB_supervised_weighted_F1_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_XGB_supervised_weighted_FB_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/model_XGB_supervised_weighted_PR_AUC_64log_gc_tetra__pred.csv ADDED
The diff for this file is too large to render. See raw diff
 
predictions_truth/prediction_summary.csv ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model,model_file,feature_cols,n_genomes_total,n_annotated,n_missing_truth,elapsed_sec,elapsed_min,overall_n,overall_positives,overall_tn,overall_fp,overall_fn,overall_tp,overall_accuracy,overall_precision,overall_recall,overall_f1,overall_specificity,overall_fn_rate,overall_fp_rate,overall_roc_auc,overall_pr_auc,isolate_n,isolate_positives,isolate_tn,isolate_fp,isolate_fn,isolate_tp,isolate_accuracy,isolate_precision,isolate_recall,isolate_f1,isolate_specificity,isolate_fn_rate,isolate_fp_rate,isolate_roc_auc,isolate_pr_auc,mag_n,mag_positives,mag_tn,mag_fp,mag_fn,mag_tp,mag_accuracy,mag_precision,mag_recall,mag_f1,mag_specificity,mag_fn_rate,mag_fp_rate,mag_roc_auc,mag_pr_auc
2
+ model_ET_pu_normal_F1_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_ET_pu_normal_F1_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,50.55208730697632,0.8425347884496053,4728,71,4596,61,36,35,0.9794839255499154,0.3645833333333333,0.49295774647887325,0.41916167664670656,0.9869014386944385,0.5070422535211268,0.01309856130556152,0.9452921091072957,0.39827940648466603,904,9,880,15,5,4,0.9778761061946902,0.21052631578947367,0.4444444444444444,0.2857142857142857,0.9832402234636871,0.5555555555555556,0.01675977653631285,0.91495965238982,0.3824080976567331,3824,62,3716,46,31,31,0.9798640167364017,0.4025974025974026,0.5,0.4460431654676259,0.987772461456672,0.5,0.012227538543328018,0.9500908919414862,0.43006088211550036
3
+ model_ET_pu_normal_FB_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_ET_pu_normal_FB_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,77.69140934944153,1.2948568224906922,4728,71,4571,86,34,37,0.9746192893401016,0.3008130081300813,0.5211267605633803,0.38144329896907214,0.9815331758642903,0.4788732394366197,0.018466824135709683,0.9428968053543507,0.37988079349086107,904,9,876,19,5,4,0.9734513274336283,0.17391304347826086,0.4444444444444444,0.25,0.9787709497206704,0.5555555555555556,0.021229050279329607,0.9160769708255743,0.41276268011543327,3824,62,3695,67,29,33,0.9748953974895398,0.33,0.532258064516129,0.4074074074074074,0.9821903242955875,0.46774193548387094,0.017809675704412546,0.9466695820685633,0.3918992950735184
4
+ model_ET_pu_normal_PR_AUC_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_ET_pu_normal_PR_AUC_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,120.88866925239563,2.0148111542065936,4728,71,4571,86,35,36,0.9744077834179357,0.29508196721311475,0.5070422535211268,0.37305699481865284,0.9815331758642903,0.49295774647887325,0.018466824135709683,0.9432688032856793,0.35632532830639385,904,9,879,16,5,4,0.9767699115044248,0.2,0.4444444444444444,0.27586206896551724,0.982122905027933,0.5555555555555556,0.017877094972067038,0.9217877094972068,0.38450041503321486,3824,62,3692,70,30,32,0.9738493723849372,0.3137254901960784,0.5161290322580645,0.3902439024390244,0.9813928761297183,0.4838709677419355,0.018607123870281767,0.9466095590883368,0.36977013156762745
5
+ model_ET_pu_weighted_F1_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_ET_pu_weighted_F1_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,106.17404961585999,1.7695674935976664,4728,71,4641,16,54,17,0.9851945854483926,0.5151515151515151,0.23943661971830985,0.3269230769230769,0.9965643117887052,0.7605633802816901,0.003435688211294825,0.9455612783421595,0.3715650874601461,904,9,892,3,5,4,0.9911504424778761,0.5714285714285714,0.4444444444444444,0.5,0.9966480446927374,0.5555555555555556,0.0033519553072625698,0.9353196772191186,0.42842752986492355,3824,62,3749,13,49,13,0.983786610878661,0.5,0.20967741935483872,0.29545454545454547,0.9965443912812334,0.7903225806451613,0.0034556087187666137,0.9474155819656669,0.36356733351960374
6
+ model_ET_pu_weighted_FB_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_ET_pu_weighted_FB_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,82.65518403053284,1.3775864005088807,4728,71,4580,77,35,36,0.9763113367174281,0.3185840707964602,0.5070422535211268,0.391304347826087,0.9834657504831437,0.49295774647887325,0.016534249516856347,0.9428544641263946,0.35436137678040497,904,9,880,15,5,4,0.9778761061946902,0.21052631578947367,0.4444444444444444,0.2857142857142857,0.9832402234636871,0.5555555555555556,0.01675977653631285,0.924643078833023,0.38470043564313927,3824,62,3700,62,30,32,0.9759414225941423,0.3404255319148936,0.5161290322580645,0.41025641025641024,0.9835194045720361,0.4838709677419355,0.01648059542796385,0.9460307660647219,0.3687535529399977
7
+ model_ET_pu_weighted_PR_AUC_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_ET_pu_weighted_PR_AUC_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,123.70275974273682,2.061712662378947,4728,71,4635,22,47,24,0.9854060913705583,0.5217391304347826,0.3380281690140845,0.41025641025641024,0.9952759287094696,0.6619718309859155,0.004724071290530384,0.9487005779577616,0.4259564813417571,904,9,890,5,5,4,0.9889380530973452,0.4444444444444444,0.4444444444444444,0.4444444444444444,0.994413407821229,0.5555555555555556,0.00558659217877095,0.9303538175046555,0.5056068757279014,3824,62,3745,17,42,20,0.984571129707113,0.5405405405405406,0.3225806451612903,0.40404040404040403,0.9954811270600744,0.6774193548387096,0.004518872939925572,0.9517801100992952,0.4147358376878987
8
+ model_ET_supervised_normal_F1_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_ET_supervised_normal_F1_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,63.84041881561279,1.0640069802602132,4728,71,4657,0,66,5,0.9860406091370558,1.0,0.07042253521126761,0.13157894736842105,1.0,0.9295774647887324,0.0,0.9553995650951015,0.44559891436675103,904,9,895,0,7,2,0.9922566371681416,1.0,0.2222222222222222,0.36363636363636365,1.0,0.7777777777777778,0.0,0.9279329608938547,0.5109520240817257,3824,62,3762,0,59,3,0.984571129707113,1.0,0.04838709677419355,0.09230769230769231,1.0,0.9516129032258065,0.0,0.9591607929893159,0.43312121548020954
9
+ model_ET_supervised_normal_FB_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_ET_supervised_normal_FB_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,26.79050636291504,0.4465084393819173,4728,71,4657,0,65,6,0.9862521150592216,1.0,0.08450704225352113,0.15584415584415584,1.0,0.9154929577464789,0.0,0.9546011305107864,0.4622851850458132,904,9,895,0,6,3,0.9933628318584071,1.0,0.3333333333333333,0.5,1.0,0.6666666666666666,0.0,0.9201738050900061,0.516873261585494,3824,62,3762,0,59,3,0.984571129707113,1.0,0.04838709677419355,0.09230769230769231,1.0,0.9516129032258065,0.0,0.9592015228687555,0.4515174716019063
10
+ model_ET_supervised_normal_PR_AUC_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_ET_supervised_normal_PR_AUC_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,45.076682806015015,0.7512780467669169,4728,71,4657,0,69,2,0.9854060913705583,1.0,0.028169014084507043,0.0547945205479452,1.0,0.971830985915493,0.0,0.9525657271954682,0.4356412526112598,904,9,895,0,8,1,0.9911504424778761,1.0,0.1111111111111111,0.2,1.0,0.8888888888888888,0.0,0.9317815021725636,0.5251450492609465,3824,62,3762,0,61,1,0.9840481171548117,1.0,0.016129032258064516,0.031746031746031744,1.0,0.9838709677419355,0.0,0.9553836325907633,0.41843805776851867
11
+ model_ET_supervised_weighted_F1_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_ET_supervised_weighted_F1_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,18.306095123291016,0.3051015853881836,4728,71,4657,0,65,6,0.9862521150592216,1.0,0.08450704225352113,0.15584415584415584,1.0,0.9154929577464789,0.0,0.9572277988307772,0.4579961401580452,904,9,895,0,6,3,0.9933628318584071,1.0,0.3333333333333333,0.5,1.0,0.6666666666666666,0.0,0.9270018621973929,0.5219244087813817,3824,62,3762,0,59,3,0.984571129707113,1.0,0.04838709677419355,0.09230769230769231,1.0,0.9516129032258065,0.0,0.96116298811545,0.44718476427977677
12
+ model_ET_supervised_weighted_FB_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_ET_supervised_weighted_FB_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,33.15771460533142,0.5526285767555237,4728,71,4654,3,58,13,0.987098138747885,0.8125,0.18309859154929578,0.2988505747126437,0.9993558084603822,0.8169014084507042,0.0006441915396177797,0.953108602225334,0.4648988205928246,904,9,893,2,6,3,0.9911504424778761,0.6,0.3333333333333333,0.42857142857142855,0.9977653631284916,0.6666666666666666,0.0022346368715083797,0.9025450031036624,0.4252996463057907,3824,62,3761,1,52,10,0.9861401673640168,0.9090909090909091,0.16129032258064516,0.273972602739726,0.9997341839447103,0.8387096774193549,0.0002658160552897395,0.9596645572876473,0.4868861186802772
13
+ model_ET_supervised_weighted_PR_AUC_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_ET_supervised_weighted_PR_AUC_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,37.03258180618286,0.6172096967697144,4728,71,4657,0,65,6,0.9862521150592216,1.0,0.08450704225352113,0.15584415584415584,1.0,0.9154929577464789,0.0,0.9543697659437406,0.45731081328192,904,9,895,0,6,3,0.9933628318584071,1.0,0.3333333333333333,0.5,1.0,0.6666666666666666,0.0,0.9200496585971446,0.5170537927389657,3824,62,3762,0,59,3,0.984571129707113,1.0,0.04838709677419355,0.09230769230769231,1.0,0.9516129032258065,0.0,0.958867109121778,0.446047263192599
14
+ model_RF_pu_normal_F1_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_RF_pu_normal_F1_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,42.984647035598755,0.7164107839266459,4728,71,4595,62,38,33,0.9788494077834179,0.3473684210526316,0.4647887323943662,0.39759036144578314,0.9866867081812326,0.5352112676056338,0.013313291818767448,0.944502747643257,0.41314388689946535,904,9,884,11,5,4,0.9823008849557522,0.26666666666666666,0.4444444444444444,0.3333333333333333,0.9877094972067039,0.5555555555555556,0.012290502793296089,0.9138423339540658,0.4470001697242661,3824,62,3711,51,33,29,0.9780334728033473,0.3625,0.46774193548387094,0.4084507042253521,0.9864433811802232,0.532258064516129,0.013556618819776715,0.9487832484436899,0.4191021823945725
15
+ model_RF_pu_normal_FB_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_RF_pu_normal_FB_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,29.3424015045166,0.4890400250752767,4728,71,4596,61,38,33,0.9790609137055838,0.35106382978723405,0.4647887323943662,0.4,0.9869014386944385,0.5352112676056338,0.01309856130556152,0.9444422601747483,0.4089677391813839,904,9,885,10,5,4,0.9834070796460177,0.2857142857142857,0.4444444444444444,0.34782608695652173,0.9888268156424581,0.5555555555555556,0.0111731843575419,0.9144630664183737,0.44677775412233117,3824,62,3711,51,33,29,0.9780334728033473,0.3625,0.46774193548387094,0.4084507042253521,0.9864433811802232,0.532258064516129,0.013556618819776715,0.9485517312342439,0.4153182068657609
16
+ model_RF_pu_normal_PR_AUC_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_RF_pu_normal_PR_AUC_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,52.97184228897095,0.8828640381495158,4728,71,4631,26,50,21,0.9839255499153976,0.44680851063829785,0.29577464788732394,0.3559322033898305,0.9944170066566459,0.704225352112676,0.0055829933433540905,0.9439765066672312,0.36971768238282215,904,9,891,4,5,4,0.9900442477876106,0.5,0.4444444444444444,0.47058823529411764,0.9955307262569832,0.5555555555555556,0.004469273743016759,0.933457479826195,0.43976839784190314,3824,62,3740,22,45,17,0.9824790794979079,0.4358974358974359,0.27419354838709675,0.33663366336633666,0.9941520467836257,0.7258064516129032,0.005847953216374269,0.9456148925588654,0.3590319158994692
17
+ model_RF_pu_weighted_F1_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_RF_pu_weighted_F1_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,56.54481363296509,0.9424135605494182,4728,71,4632,25,49,22,0.9843485617597293,0.46808510638297873,0.30985915492957744,0.3728813559322034,0.9946317371698519,0.6901408450704225,0.005368262830148164,0.9436710449512622,0.3725512281474701,904,9,891,4,5,4,0.9900442477876106,0.5,0.4444444444444444,0.47058823529411764,0.9955307262569832,0.5555555555555556,0.004469273743016759,0.9330850403476102,0.42972094634184643,3824,62,3741,21,44,18,0.9830020920502092,0.46153846153846156,0.2903225806451613,0.3564356435643564,0.9944178628389154,0.7096774193548387,0.005582137161084529,0.9456234672703263,0.36556161220560734
18
+ model_RF_pu_weighted_FB_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_RF_pu_weighted_FB_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,28.662107467651367,0.4777017911275228,4728,71,4591,66,37,34,0.9782148900169205,0.34,0.4788732394366197,0.39766081871345027,0.9858277861284088,0.5211267605633803,0.014172213871591153,0.9458485938175759,0.3909344311525199,904,9,885,10,5,4,0.9834070796460177,0.2857142857142857,0.4444444444444444,0.34782608695652173,0.9888268156424581,0.5555555555555556,0.0111731843575419,0.9251396648044693,0.45292861132542767,3824,62,3706,56,32,30,0.9769874476987448,0.3488372093023256,0.4838709677419355,0.40540540540540543,0.9851143009037746,0.5161290322580645,0.014885699096225412,0.9487918231551509,0.3868216640580115
19
+ model_RF_pu_weighted_PR_AUC_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_RF_pu_weighted_PR_AUC_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,41.19235801696777,0.6865393002827962,4728,71,4636,21,53,18,0.9843485617597293,0.46153846153846156,0.2535211267605634,0.32727272727272727,0.9954906592226755,0.7464788732394366,0.004509340777324458,0.9420197370609744,0.3505845016170188,904,9,892,3,5,4,0.9911504424778761,0.5714285714285714,0.4444444444444444,0.5,0.9966480446927374,0.5555555555555556,0.0033519553072625698,0.9354438237119801,0.4183311734629107,3824,62,3744,18,48,14,0.9827405857740585,0.4375,0.22580645161290322,0.2978723404255319,0.9952153110047847,0.7741935483870968,0.004784688995215311,0.9429910308518118,0.33967107904845484
20
+ model_RF_supervised_normal_F1_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_RF_supervised_normal_F1_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,13.129471063613892,0.2188245177268982,4728,71,4656,1,67,4,0.9856175972927242,0.8,0.056338028169014086,0.10526315789473684,0.999785269486794,0.9436619718309859,0.00021473051320592657,0.9040396555843544,0.3099394130002518,904,9,895,0,7,2,0.9922566371681416,1.0,0.2222222222222222,0.36363636363636365,1.0,0.7777777777777778,0.0,0.9389199255121042,0.45773587880573713,3824,62,3761,1,60,2,0.9840481171548117,0.6666666666666666,0.03225806451612903,0.06153846153846154,0.9997341839447103,0.967741935483871,0.0002658160552897395,0.9033887259693711,0.28449460411540883
21
+ model_RF_supervised_normal_FB_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_RF_supervised_normal_FB_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,13.361087560653687,0.22268479267756144,4728,71,4656,1,68,3,0.9854060913705583,0.75,0.04225352112676056,0.08,0.999785269486794,0.9577464788732394,0.00021473051320592657,0.9388335596572781,0.42883871512801697,904,9,895,0,7,2,0.9922566371681416,1.0,0.2222222222222222,0.36363636363636365,1.0,0.7777777777777778,0.0,0.8685288640595903,0.42362481833477084,3824,62,3761,1,61,1,0.983786610878661,0.5,0.016129032258064516,0.03125,0.9997341839447103,0.9838709677419355,0.0002658160552897395,0.9462558522405721,0.47207855674456456
22
+ model_RF_supervised_normal_PR_AUC_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_RF_supervised_normal_PR_AUC_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,9.392892360687256,0.15654820601145428,4728,71,4656,1,68,3,0.9854060913705583,0.75,0.04225352112676056,0.08,0.999785269486794,0.9577464788732394,0.00021473051320592657,0.9094003574809388,0.34860273594436314,904,9,895,0,7,2,0.9922566371681416,1.0,0.2222222222222222,0.36363636363636365,1.0,0.7777777777777778,0.0,0.9057107386716324,0.5125317061876677,3824,62,3761,1,61,1,0.983786610878661,0.5,0.016129032258064516,0.03125,0.9997341839447103,0.9838709677419355,0.0002658160552897395,0.91186911560426,0.3174744471105539
23
+ model_RF_supervised_weighted_F1_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_RF_supervised_weighted_F1_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,8.055122375488281,0.13425203959147136,4728,71,4657,0,68,3,0.9856175972927242,1.0,0.04225352112676056,0.08108108108108109,1.0,0.9577464788732394,0.0,0.9123551703175894,0.33441108769844863,904,9,895,0,7,2,0.9922566371681416,1.0,0.2222222222222222,0.36363636363636365,1.0,0.7777777777777778,0.0,0.9345127250155183,0.48490971565684204,3824,62,3762,0,61,1,0.9840481171548117,1.0,0.016129032258064516,0.031746031746031744,1.0,0.9838709677419355,0.0,0.9108015640273703,0.30325839249215525
24
+ model_RF_supervised_weighted_FB_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_RF_supervised_weighted_FB_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,9.112311601638794,0.15187186002731323,4728,71,4655,2,68,3,0.9851945854483926,0.6,0.04225352112676056,0.07894736842105263,0.9995705389735882,0.9577464788732394,0.00042946102641185313,0.9355642119843821,0.43779528201008766,904,9,894,1,7,2,0.9911504424778761,0.6666666666666666,0.2222222222222222,0.3333333333333333,0.9988826815642458,0.7777777777777778,0.0011173184357541898,0.8546865301055245,0.4123708848159698,3824,62,3761,1,61,1,0.983786610878661,0.5,0.016129032258064516,0.03125,0.9997341839447103,0.9838709677419355,0.0002658160552897395,0.9448946167961448,0.4836024823405589
25
+ model_RF_supervised_weighted_PR_AUC_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_RF_supervised_weighted_PR_AUC_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,17.61688995361328,0.29361483256022136,4728,71,4656,1,68,3,0.9854060913705583,0.75,0.04225352112676056,0.08,0.999785269486794,0.9577464788732394,0.00021473051320592657,0.897792812274117,0.31668159947087104,904,9,895,0,7,2,0.9922566371681416,1.0,0.2222222222222222,0.36363636363636365,1.0,0.7777777777777778,0.0,0.9248913718187461,0.45728721163207403,3824,62,3761,1,61,1,0.983786610878661,0.5,0.016129032258064516,0.03125,0.9997341839447103,0.9838709677419355,0.0002658160552897395,0.8991163759839482,0.2900293205193154
26
+ model_XGB_pu_normal_F1_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_XGB_pu_normal_F1_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,4.651256084442139,0.0775209347407023,4728,71,4587,70,34,37,0.9780033840947546,0.34579439252336447,0.5211267605633803,0.4157303370786517,0.9849688640755851,0.4788732394366197,0.01503113592441486,0.9288183470589481,0.37253149430834054,904,9,888,7,4,5,0.9878318584070797,0.4166666666666667,0.5555555555555556,0.47619047619047616,0.9921787709497206,0.4444444444444444,0.00782122905027933,0.9565487274984481,0.5443184781420076,3824,62,3699,63,30,32,0.9756799163179917,0.3368421052631579,0.5161290322580645,0.40764331210191085,0.9832535885167464,0.4838709677419355,0.01674641148325359,0.9261331481195658,0.3538834608749478
27
+ model_XGB_pu_normal_FB_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_XGB_pu_normal_FB_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,3.669872999191284,0.061164549986521405,4728,71,4576,81,29,42,0.9767343485617598,0.34146341463414637,0.5915492957746479,0.4329896907216495,0.98260682843032,0.4084507042253521,0.017393171569680052,0.9310382371532178,0.44220584603783913,904,9,883,12,4,5,0.9823008849557522,0.29411764705882354,0.5555555555555556,0.38461538461538464,0.9865921787709497,0.4444444444444444,0.013407821229050279,0.9348230912476723,0.5182603645516074,3824,62,3693,69,25,37,0.975418410041841,0.3490566037735849,0.5967741935483871,0.44047619047619047,0.981658692185008,0.4032258064516129,0.018341307814992026,0.9303819176484709,0.4251840272211057
28
+ model_XGB_pu_normal_PR_AUC_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_XGB_pu_normal_PR_AUC_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,3.8302955627441406,0.063838259379069,4728,71,4586,71,34,37,0.9777918781725888,0.3425925925925926,0.5211267605633803,0.4134078212290503,0.9847541335623792,0.4788732394366197,0.015245866437620785,0.9348640695363937,0.4245710492021031,904,9,885,10,4,5,0.9845132743362832,0.3333333333333333,0.5555555555555556,0.4166666666666667,0.9888268156424581,0.4444444444444444,0.0111731843575419,0.945747982619491,0.49644777720049926,3824,62,3701,61,30,32,0.9762029288702929,0.34408602150537637,0.5161290322580645,0.4129032258064516,0.9837852206273259,0.4838709677419355,0.01621477937267411,0.9335974344463309,0.4145513820351087
29
+ model_XGB_pu_weighted_F1_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_XGB_pu_weighted_F1_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,9.871084928512573,0.16451808214187622,4728,71,4616,41,39,32,0.9830795262267343,0.4383561643835616,0.4507042253521127,0.4444444444444444,0.9911960489585571,0.5492957746478874,0.008803951041442989,0.932553448239361,0.37901311493458495,904,9,888,7,5,4,0.9867256637168141,0.36363636363636365,0.4444444444444444,0.4,0.9921787709497206,0.5555555555555556,0.00782122905027933,0.930477963997517,0.4319554057567376,3824,62,3728,34,34,28,0.9822175732217573,0.45161290322580644,0.45161290322580644,0.45161290322580644,0.9909622541201488,0.5483870967741935,0.009037745879851143,0.932937181663837,0.37055748704402675
30
+ model_XGB_pu_weighted_FB_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_XGB_pu_weighted_FB_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,4.087388038635254,0.06812313397725424,4728,71,4576,81,31,40,0.9763113367174281,0.3305785123966942,0.5633802816901409,0.4166666666666667,0.98260682843032,0.43661971830985913,0.017393171569680052,0.9332913953551674,0.3907542031182485,904,9,886,9,4,5,0.9856194690265486,0.35714285714285715,0.5555555555555556,0.43478260869565216,0.9899441340782122,0.4444444444444444,0.01005586592178771,0.9437616387337058,0.5918783382934327,3824,62,3690,72,27,35,0.9741108786610879,0.32710280373831774,0.5645161290322581,0.41420118343195267,0.9808612440191388,0.43548387096774194,0.019138755980861243,0.9319596645572876,0.3688010414478227
31
+ model_XGB_pu_weighted_PR_AUC_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_XGB_pu_weighted_PR_AUC_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,7.8919336795806885,0.13153222799301148,4728,71,4588,69,34,37,0.9782148900169205,0.3490566037735849,0.5211267605633803,0.4180790960451977,0.985183594588791,0.4788732394366197,0.014816405411208933,0.9344013404023022,0.41140616698019167,904,9,886,9,4,5,0.9856194690265486,0.35714285714285715,0.5555555555555556,0.43478260869565216,0.9899441340782122,0.4444444444444444,0.01005586592178771,0.9502172563625078,0.5298304430027967,3824,62,3702,60,30,32,0.9764644351464435,0.34782608695652173,0.5161290322580645,0.4155844155844156,0.9840510366826156,0.4838709677419355,0.01594896331738437,0.9332201471420487,0.3948293467173613
32
+ model_XGB_supervised_normal_F1_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_XGB_supervised_normal_F1_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,5.092371225357056,0.08487285375595092,4728,71,4653,4,55,16,0.9875211505922166,0.8,0.22535211267605634,0.3516483516483517,0.9991410779471763,0.7746478873239436,0.0008589220528237063,0.9292432715252218,0.46633194184550697,904,9,893,2,6,3,0.9911504424778761,0.6,0.3333333333333333,0.42857142857142855,0.9977653631284916,0.6666666666666666,0.0022346368715083797,0.9484792054624457,0.46459973178105496,3824,62,3760,2,49,13,0.986663179916318,0.8666666666666667,0.20967741935483872,0.33766233766233766,0.9994683678894205,0.7903225806451613,0.000531632110579479,0.9270849410917322,0.4824182792587978
33
+ model_XGB_supervised_normal_FB_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_XGB_supervised_normal_FB_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,3.9309542179107666,0.0655159036318461,4728,71,4655,2,63,8,0.9862521150592216,0.8,0.11267605633802817,0.19753086419753085,0.9995705389735882,0.8873239436619719,0.00042946102641185313,0.9275345005398508,0.44439749793785804,904,9,895,0,6,3,0.9933628318584071,1.0,0.3333333333333333,0.5,1.0,0.6666666666666666,0.0,0.9566728739913097,0.4886770131440351,3824,62,3760,2,57,5,0.984571129707113,0.7142857142857143,0.08064516129032258,0.14492753623188406,0.9994683678894205,0.9193548387096774,0.000531632110579479,0.9252070792817821,0.45146045643566773
34
+ model_XGB_supervised_normal_PR_AUC_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_XGB_supervised_normal_PR_AUC_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,6.661342144012451,0.11102236906687418,4728,71,4654,3,56,15,0.9875211505922166,0.8333333333333334,0.2112676056338028,0.33707865168539325,0.9993558084603822,0.7887323943661971,0.0006441915396177797,0.927768889480322,0.43621452803897554,904,9,893,2,6,3,0.9911504424778761,0.6,0.3333333333333333,0.42857142857142855,0.9977653631284916,0.6666666666666666,0.0022346368715083797,0.939913097454997,0.4239432138100045,3824,62,3761,1,50,12,0.986663179916318,0.9230769230769231,0.1935483870967742,0.32,0.9997341839447103,0.8064516129032258,0.0002658160552897395,0.9282168030045789,0.46947082191995665
35
+ model_XGB_supervised_weighted_F1_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_XGB_supervised_weighted_F1_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,1.3843553066253662,0.023072588443756103,4728,71,4646,11,58,13,0.9854060913705583,0.5416666666666666,0.18309859154929578,0.2736842105263158,0.9976379643547348,0.8169014084507042,0.002362035645265192,0.9126833148342492,0.3560702303037348,904,9,894,1,7,2,0.9911504424778761,0.6666666666666666,0.2222222222222222,0.3333333333333333,0.9988826815642458,0.7777777777777778,0.0011173184357541898,0.9776536312849162,0.47891203197466886,3824,62,3752,10,51,11,0.9840481171548117,0.5238095238095238,0.1774193548387097,0.26506024096385544,0.9973418394471026,0.8225806451612904,0.002658160552897395,0.9114961156557082,0.342459594653059
36
+ model_XGB_supervised_weighted_FB_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_XGB_supervised_weighted_FB_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,2.361746072769165,0.03936243454615275,4728,71,4650,7,55,16,0.9868866328257191,0.6956521739130435,0.22535211267605634,0.3404255319148936,0.9984968864075585,0.7746478873239436,0.001503113592441486,0.929102638160939,0.4639064536999952,904,9,892,3,6,3,0.9900442477876106,0.5,0.3333333333333333,0.4,0.9966480446927374,0.6666666666666666,0.0033519553072625698,0.9422718808193669,0.4547986659037171,3824,62,3758,4,49,13,0.9861401673640168,0.7647058823529411,0.20967741935483872,0.3291139240506329,0.998936735778841,0.7903225806451613,0.001063264221158958,0.9271020905146543,0.4775050650943073
37
+ model_XGB_supervised_weighted_PR_AUC_64log_gc_tetra,/TomaszLab/tRNA/Modele_final/results_models/model_XGB_supervised_weighted_PR_AUC_64log_gc_tetra.joblib,/TomaszLab/tRNA/Modele_final/results_models/feature_columns_64log_gc_tetra.json,4744,4728,16,1.4389727115631104,0.02398287852605184,4728,71,4654,3,63,8,0.9860406091370558,0.7272727272727273,0.11267605633802817,0.1951219512195122,0.9993558084603822,0.8873239436619719,0.0006441915396177797,0.9258302661146176,0.4537539075820669,904,9,895,0,7,2,0.9922566371681416,1.0,0.2222222222222222,0.36363636363636365,1.0,0.7777777777777778,0.0,0.9633147113594042,0.5472066604419547,3824,62,3759,3,56,6,0.984571129707113,0.6666666666666666,0.0967741935483871,0.16901408450704225,0.9992025518341308,0.9032258064516129,0.0007974481658692185,0.9215735453002006,0.43755002037403923
results_models/best_params_ET_pu_normal_F1_64log_gc_tetra.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "n_estimators": 833,
3
+ "max_depth": 13,
4
+ "min_samples_split": 6,
5
+ "min_samples_leaf": 2,
6
+ "max_features": "sqrt",
7
+ "class_weight": "balanced"
8
+ }
results_models/best_params_ET_pu_normal_FB_64log_gc_tetra.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "n_estimators": 950,
3
+ "max_depth": 18,
4
+ "min_samples_split": 9,
5
+ "min_samples_leaf": 6,
6
+ "max_features": "sqrt",
7
+ "class_weight": "balanced"
8
+ }