QPromaQ commited on
Commit
b6d1ec8
·
verified ·
1 Parent(s): bd0542e

Create predict_dir.py

Browse files
Files changed (1) hide show
  1. predict_dir.py +154 -143
predict_dir.py CHANGED
@@ -2,25 +2,31 @@
2
  # -*- coding: utf-8 -*-
3
 
4
  """
5
- predict_models_dir_with_truth.py
6
 
7
- Predykcja dla wszystkich modeli w results_models/ na folderze genomów FASTA,
8
- z adnotacją ground truth z pliku TSV (genomes-all_metadata_with_genetic_code_id_noNA.tsv).
 
9
 
10
- Ground truth:
 
 
 
 
 
 
 
11
  ALT = Genetic_code_ID != 11
12
  STD = Genetic_code_ID == 11
13
 
14
- Dodatkowo rozbicie metryk dla:
15
- - Genome_type == "Isolate"
16
- - Genome_type == "MAG"
17
-
18
- Wyniki:
19
- - predictions/<model>__pred.csv (per model, per genome)
20
- - predictions/all_models_predictions_long.csv (long: model x genome)
21
- - predictions/prediction_summary.csv (overall + Isolate + MAG + AUC)
22
 
23
- Wymaga: aragorn w PATH (lub --aragorn).
 
24
  """
25
 
26
  import os
@@ -36,7 +42,6 @@ import numpy as np
36
  import pandas as pd
37
  from joblib import load as joblib_load
38
 
39
- # For metrics
40
  from sklearn.metrics import (
41
  confusion_matrix,
42
  accuracy_score,
@@ -47,11 +52,12 @@ from sklearn.metrics import (
47
  average_precision_score,
48
  )
49
 
 
 
 
50
  # =========================
51
  # PU class (for joblib load)
52
  # =========================
53
- from sklearn.base import BaseEstimator, ClassifierMixin, clone
54
-
55
  class PUBaggingClassifier(BaseEstimator, ClassifierMixin):
56
  def __init__(self, base_estimator, n_bags=15, u_ratio=3.0, random_state=42):
57
  self.base_estimator = base_estimator
@@ -254,22 +260,17 @@ def build_features_for_genome(fasta_path, aragorn_bin, feature_columns, reuse_ar
254
 
255
 
256
  # =========================
257
- # Ground truth from TSV
258
  # =========================
259
  def load_truth_tsv(tsv_path: str) -> pd.DataFrame:
260
  df = pd.read_csv(tsv_path, sep="\t", dtype=str)
261
- # Normalize key columns
262
- if "Genome" not in df.columns:
263
- raise ValueError(f"TSV missing column 'Genome'. Columns: {list(df.columns)}")
264
- if "Genome_type" not in df.columns:
265
- raise ValueError(f"TSV missing column 'Genome_type'. Columns: {list(df.columns)}")
266
- if "Genetic_code_ID" not in df.columns:
267
- raise ValueError(f"TSV missing column 'Genetic_code_ID'. Columns: {list(df.columns)}")
268
 
269
  df["Genome"] = df["Genome"].astype(str)
270
  df["Genome_type"] = df["Genome_type"].astype(str)
271
-
272
- # Genetic_code_ID -> int
273
  df["Genetic_code_ID"] = pd.to_numeric(df["Genetic_code_ID"], errors="coerce").astype("Int64")
274
 
275
  # ALT ground truth: != 11
@@ -280,7 +281,7 @@ def load_truth_tsv(tsv_path: str) -> pd.DataFrame:
280
 
281
 
282
  # =========================
283
- # Metrics
284
  # =========================
285
  def safe_confusion(y_true, y_pred):
286
  cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
@@ -308,7 +309,6 @@ def compute_metrics_block(y_true, y_pred, y_score=None):
308
  "fp_rate": float(fp / (fp + tn)) if (fp + tn) > 0 else np.nan,
309
  }
310
 
311
- # AUCs only if y_score provided and both classes exist
312
  if y_score is not None:
313
  y_score = np.asarray(y_score, dtype=float)
314
  if n > 0 and len(np.unique(y_true)) == 2:
@@ -352,14 +352,20 @@ def pick_feature_cols(models_dir: Path, feature_cols_arg: str | None):
352
  # Main
353
  # =========================
354
  def main():
355
- ap = argparse.ArgumentParser(description="Predict for all models in a directory + annotate truth from TSV.")
356
- ap.add_argument("--genomes_dir", required=True, help="Folder z genomami FASTA (.fna/.fa/.fasta).")
357
- ap.add_argument("--models_dir", required=True, help="Folder z modelami (.joblib) + feature_columns_*.json.")
358
- ap.add_argument("--outdir", required=True, help="Folder wyjściowy na predykcje CSV.")
359
- ap.add_argument("--aragorn", default="aragorn", help="Ścieżka do binarki ARAGORN.")
360
- ap.add_argument("--feature_cols", default=None, help="Opcjonalnie: wymuś konkretny feature_columns_*.json.")
361
- ap.add_argument("--reuse_aragorn", action="store_true", help="Jeśli istnieje *.aragorn.txt i jest świeży, użyj go.")
362
- ap.add_argument("--truth_tsv", required=True, help="genomes-all_metadata_with_genetic_code_id_noNA.tsv")
 
 
 
 
 
 
363
  args = ap.parse_args()
364
 
365
  set_single_thread_env()
@@ -369,30 +375,30 @@ def main():
369
  outdir = Path(args.outdir)
370
  outdir.mkdir(parents=True, exist_ok=True)
371
 
372
- # Load truth
373
- truth = load_truth_tsv(args.truth_tsv)
374
- truth_map = truth.set_index("Genome")
375
-
376
- # Input genomes
377
  fasta_files = list_fasta_files(str(genomes_dir))
378
  if not fasta_files:
379
- raise SystemExit(f"Brak FASTA w {genomes_dir}")
380
 
381
  models = find_models(models_dir)
382
  if not models:
383
- raise SystemExit(f"Brak model_*.joblib w {models_dir}")
384
 
385
  feat_cols_path = pick_feature_cols(models_dir, args.feature_cols)
386
 
387
- print(f"[INFO] Genomes: {len(fasta_files)} in {genomes_dir}")
388
- print(f"[INFO] Models : {len(models)} in {models_dir}")
389
- print(f"[INFO] Truth : {args.truth_tsv}")
390
  print(f"[INFO] FeatCols: {feat_cols_path}")
 
391
 
392
  # Load feature columns
393
  with open(feat_cols_path, "r") as fh:
394
  feature_columns = json.load(fh)
395
 
 
 
 
 
 
396
  # Build features once (shared across all models)
397
  t_feat0 = time.time()
398
  rows, accs = [], []
@@ -411,13 +417,21 @@ def main():
411
  X = pd.DataFrame(rows, index=accs)[feature_columns]
412
  print(f"[FEAT] Built X={X.shape} in {(time.time()-t_feat0):.1f}s")
413
 
414
- # Merge truth annotation for these genomes
415
- ann = pd.DataFrame({"Genome": accs}).merge(truth, how="left", left_on="Genome", right_on="Genome")
416
- n_annot = int(ann["y_true_alt"].notna().sum())
417
- n_missing = int(len(ann) - n_annot)
418
- print(f"[TRUTH] Annotated: {n_annot}/{len(ann)} Missing_in_TSV: {n_missing}")
 
 
 
 
 
 
 
 
 
419
 
420
- # Prepare outputs
421
  long_rows = []
422
  summary_rows = []
423
 
@@ -430,11 +444,15 @@ def main():
430
  t0 = time.time()
431
  model = joblib_load(model_path)
432
 
 
 
433
  if hasattr(model, "predict_proba"):
434
- proba = model.predict_proba(X)[:, 1]
435
- else:
436
- proba = None
 
437
 
 
438
  if hasattr(model, "predict"):
439
  try:
440
  yhat = model.predict(X)
@@ -445,15 +463,11 @@ def main():
445
 
446
  elapsed = time.time() - t0
447
 
448
- # Build per-model table with annotations
449
  df_pred = ann.copy()
450
  df_pred["model"] = model_name
451
  df_pred["y_pred_alt"] = np.asarray(yhat).astype(int)
452
  df_pred["pred_label"] = df_pred["y_pred_alt"].map({0: "STD", 1: "ALT"})
453
- if proba is not None:
454
- df_pred["proba_alt"] = np.asarray(proba, dtype=float)
455
- else:
456
- df_pred["proba_alt"] = np.nan
457
 
458
  out_csv = outdir / f"{model_name}__pred.csv"
459
  df_pred.to_csv(out_csv, index=False)
@@ -461,97 +475,94 @@ def main():
461
 
462
  # Long output
463
  keep_cols = ["model", "Genome", "Genome_type", "Genetic_code_ID", "y_true_alt", "y_pred_alt", "proba_alt"]
464
- for row in df_pred[keep_cols].itertuples(index=False):
465
- long_rows.append({
466
- "model": row.model,
467
- "Genome": row.Genome,
468
- "Genome_type": row.Genome_type,
469
- "Genetic_code_ID": row.Genetic_code_ID,
470
- "y_true_alt": row.y_true_alt,
471
- "y_pred_alt": row.y_pred_alt,
472
- "proba_alt": row.proba_alt
473
- })
474
-
475
- # Summary metrics ONLY on annotated subset
476
- df_eval = df_pred[df_pred["y_true_alt"].notna()].copy()
477
- y_true = df_eval["y_true_alt"].astype(int).values
478
- y_pred = df_eval["y_pred_alt"].astype(int).values
479
- y_score = df_eval["proba_alt"].astype(float).values if proba is not None else None
480
-
481
- overall = compute_metrics_block(y_true, y_pred, y_score=y_score)
482
-
483
- # Isolate / MAG splits
484
- def subset_metrics(gen_type: str):
485
- sub = df_eval[df_eval["Genome_type"] == gen_type]
486
- if sub.shape[0] == 0:
487
- return {k: np.nan for k in compute_metrics_block([0,1],[0,1],y_score=np.array([0.1,0.9])).keys()}
488
- yt = sub["y_true_alt"].astype(int).values
489
- yp = sub["y_pred_alt"].astype(int).values
490
- ys = sub["proba_alt"].astype(float).values if proba is not None else None
491
- return compute_metrics_block(yt, yp, y_score=ys)
492
-
493
- iso = subset_metrics("Isolate")
494
- mag = subset_metrics("MAG")
495
-
496
- # Pack summary row
497
- srow = {
498
- "model": model_name,
499
- "model_file": str(model_path),
500
- "feature_cols": str(feat_cols_path),
501
- "n_genomes_total": int(len(df_pred)),
502
- "n_annotated": int(df_eval.shape[0]),
503
- "n_missing_truth": int(len(df_pred) - df_eval.shape[0]),
504
- "elapsed_sec": float(elapsed),
505
- "elapsed_min": float(elapsed/60.0),
506
- }
507
-
508
- # overall prefixed
509
- for k, v in overall.items():
510
- srow[f"overall_{k}"] = v
511
-
512
- # isolate prefixed
513
- for k, v in iso.items():
514
- srow[f"isolate_{k}"] = v
515
-
516
- # mag prefixed
517
- for k, v in mag.items():
518
- srow[f"mag_{k}"] = v
519
-
520
- summary_rows.append(srow)
521
 
522
  # Write combined outputs
523
  long_csv = outdir / "all_models_predictions_long.csv"
524
  pd.DataFrame(long_rows).to_csv(long_csv, index=False)
525
  print(f"\n[WRITE] {long_csv} rows={len(long_rows)}")
526
 
527
- summary_csv = outdir / "prediction_summary.csv"
528
- df_sum = pd.DataFrame(summary_rows)
529
- df_sum.to_csv(summary_csv, index=False)
530
- print(f"[WRITE] {summary_csv} rows={len(df_sum)}")
531
-
532
- # End report: best PR-AUC overall (if available)
533
- if "overall_pr_auc" in df_sum.columns:
534
- df_rank = df_sum.sort_values(["overall_pr_auc", "overall_roc_auc"], ascending=False, na_position="last")
535
- print("\n" + "="*80)
536
- print("[REPORT] Top models by overall PR-AUC (ground truth ALT = Genetic_code_ID != 11):")
537
- cols = [
538
- "model",
539
- "n_annotated",
540
- "overall_positives",
541
- "overall_precision",
542
- "overall_recall",
543
- "overall_f1",
544
- "overall_pr_auc",
545
- "overall_roc_auc",
546
- "isolate_fn", "isolate_fp", "mag_fn", "mag_fp",
547
- "elapsed_min",
548
- ]
549
- cols = [c for c in cols if c in df_rank.columns]
550
- print(df_rank[cols].head(15).to_string(index=False))
551
- print("="*80)
 
 
 
552
 
553
  print("[DONE]")
554
 
 
555
  if __name__ == "__main__":
556
  main()
557
-
 
2
  # -*- coding: utf-8 -*-
3
 
4
  """
5
+ predict_models_dir.py
6
 
7
+ Predict for all models in models_dir on a folder of FASTA genomes.
8
+ Optionally annotate with ground truth from a TSV and compute the same metrics
9
+ as in your original script (overall + Isolate + MAG + AUC).
10
 
11
+ Inputs:
12
+ --genomes_dir Folder with FASTA files (.fna/.fa/.fasta)
13
+ --models_dir Folder with model_*.joblib + feature_columns_*.json
14
+ --outdir Output folder
15
+ --truth_tsv OPTIONAL: genomes-all_metadata_with_genetic_code_id_noNA.tsv
16
+ (must contain Genome, Genome_type, Genetic_code_ID)
17
+
18
+ Ground truth (if provided):
19
  ALT = Genetic_code_ID != 11
20
  STD = Genetic_code_ID == 11
21
 
22
+ Outputs:
23
+ - <outdir>/<model>__pred.csv (per model, per genome)
24
+ - <outdir>/all_models_predictions_long.csv (long: model x genome)
25
+ - <outdir>/prediction_summary.csv (ONLY if truth_tsv is provided)
26
+ - <outdir>/top_models_by_pr_auc.txt (ONLY if truth_tsv is provided)
 
 
 
27
 
28
+ Requires:
29
+ - aragorn in PATH (or pass --aragorn)
30
  """
31
 
32
  import os
 
42
  import pandas as pd
43
  from joblib import load as joblib_load
44
 
 
45
  from sklearn.metrics import (
46
  confusion_matrix,
47
  accuracy_score,
 
52
  average_precision_score,
53
  )
54
 
55
+ from sklearn.base import BaseEstimator, ClassifierMixin, clone
56
+
57
+
58
  # =========================
59
  # PU class (for joblib load)
60
  # =========================
 
 
61
  class PUBaggingClassifier(BaseEstimator, ClassifierMixin):
62
  def __init__(self, base_estimator, n_bags=15, u_ratio=3.0, random_state=42):
63
  self.base_estimator = base_estimator
 
260
 
261
 
262
  # =========================
263
+ # Ground truth from TSV (optional)
264
  # =========================
265
  def load_truth_tsv(tsv_path: str) -> pd.DataFrame:
266
  df = pd.read_csv(tsv_path, sep="\t", dtype=str)
267
+
268
+ for col in ["Genome", "Genome_type", "Genetic_code_ID"]:
269
+ if col not in df.columns:
270
+ raise ValueError(f"TSV missing column '{col}'. Columns: {list(df.columns)}")
 
 
 
271
 
272
  df["Genome"] = df["Genome"].astype(str)
273
  df["Genome_type"] = df["Genome_type"].astype(str)
 
 
274
  df["Genetic_code_ID"] = pd.to_numeric(df["Genetic_code_ID"], errors="coerce").astype("Int64")
275
 
276
  # ALT ground truth: != 11
 
281
 
282
 
283
  # =========================
284
+ # Metrics (only if truth exists)
285
  # =========================
286
  def safe_confusion(y_true, y_pred):
287
  cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
 
309
  "fp_rate": float(fp / (fp + tn)) if (fp + tn) > 0 else np.nan,
310
  }
311
 
 
312
  if y_score is not None:
313
  y_score = np.asarray(y_score, dtype=float)
314
  if n > 0 and len(np.unique(y_true)) == 2:
 
352
  # Main
353
  # =========================
354
  def main():
355
+ ap = argparse.ArgumentParser(
356
+ description="Predict for all models in a directory; optionally annotate truth from TSV and compute metrics."
357
+ )
358
+ ap.add_argument("--genomes_dir", required=True, help="Folder with FASTA genomes (.fna/.fa/.fasta).")
359
+ ap.add_argument("--models_dir", required=True, help="Folder with model_*.joblib + feature_columns_*.json.")
360
+ ap.add_argument("--outdir", required=True, help="Output folder for CSV predictions.")
361
+ ap.add_argument("--aragorn", default="aragorn", help="Path to ARAGORN binary.")
362
+ ap.add_argument("--feature_cols", default=None, help="Optional: force a specific feature_columns_*.json.")
363
+ ap.add_argument("--reuse_aragorn", action="store_true", help="Reuse *.aragorn.txt if it exists and is fresh.")
364
+ ap.add_argument(
365
+ "--truth_tsv",
366
+ default=None,
367
+ help="OPTIONAL: genomes-all_metadata_with_genetic_code_id_noNA.tsv (Genome, Genome_type, Genetic_code_ID).",
368
+ )
369
  args = ap.parse_args()
370
 
371
  set_single_thread_env()
 
375
  outdir = Path(args.outdir)
376
  outdir.mkdir(parents=True, exist_ok=True)
377
 
 
 
 
 
 
378
  fasta_files = list_fasta_files(str(genomes_dir))
379
  if not fasta_files:
380
+ raise SystemExit(f"No FASTA files found in {genomes_dir}")
381
 
382
  models = find_models(models_dir)
383
  if not models:
384
+ raise SystemExit(f"No model_*.joblib found in {models_dir}")
385
 
386
  feat_cols_path = pick_feature_cols(models_dir, args.feature_cols)
387
 
388
+ print(f"[INFO] Genomes : {len(fasta_files)} in {genomes_dir}")
389
+ print(f"[INFO] Models : {len(models)} in {models_dir}")
 
390
  print(f"[INFO] FeatCols: {feat_cols_path}")
391
+ print(f"[INFO] Truth : {args.truth_tsv if args.truth_tsv else '(none)'}")
392
 
393
  # Load feature columns
394
  with open(feat_cols_path, "r") as fh:
395
  feature_columns = json.load(fh)
396
 
397
+ # Optional truth
398
+ truth = None
399
+ if args.truth_tsv:
400
+ truth = load_truth_tsv(args.truth_tsv)
401
+
402
  # Build features once (shared across all models)
403
  t_feat0 = time.time()
404
  rows, accs = [], []
 
417
  X = pd.DataFrame(rows, index=accs)[feature_columns]
418
  print(f"[FEAT] Built X={X.shape} in {(time.time()-t_feat0):.1f}s")
419
 
420
+ # Annotation base table (always exists)
421
+ ann = pd.DataFrame({"Genome": accs})
422
+
423
+ if truth is not None:
424
+ ann = ann.merge(truth, how="left", on="Genome")
425
+ n_annot = int(ann["y_true_alt"].notna().sum())
426
+ n_missing = int(len(ann) - n_annot)
427
+ print(f"[TRUTH] Annotated: {n_annot}/{len(ann)} Missing_in_TSV: {n_missing}")
428
+ else:
429
+ # Ensure columns exist for consistent outputs
430
+ ann["Genome_type"] = pd.NA
431
+ ann["Genetic_code_ID"] = pd.NA
432
+ ann["y_true_alt"] = pd.NA
433
+ ann["true_label"] = pd.NA
434
 
 
435
  long_rows = []
436
  summary_rows = []
437
 
 
444
  t0 = time.time()
445
  model = joblib_load(model_path)
446
 
447
+ # Probabilities (if possible)
448
+ proba = None
449
  if hasattr(model, "predict_proba"):
450
+ try:
451
+ proba = model.predict_proba(X)[:, 1]
452
+ except Exception:
453
+ proba = None
454
 
455
+ # Pred class
456
  if hasattr(model, "predict"):
457
  try:
458
  yhat = model.predict(X)
 
463
 
464
  elapsed = time.time() - t0
465
 
 
466
  df_pred = ann.copy()
467
  df_pred["model"] = model_name
468
  df_pred["y_pred_alt"] = np.asarray(yhat).astype(int)
469
  df_pred["pred_label"] = df_pred["y_pred_alt"].map({0: "STD", 1: "ALT"})
470
+ df_pred["proba_alt"] = np.asarray(proba, dtype=float) if proba is not None else np.nan
 
 
 
471
 
472
  out_csv = outdir / f"{model_name}__pred.csv"
473
  df_pred.to_csv(out_csv, index=False)
 
475
 
476
  # Long output
477
  keep_cols = ["model", "Genome", "Genome_type", "Genetic_code_ID", "y_true_alt", "y_pred_alt", "proba_alt"]
478
+ keep_cols = [c for c in keep_cols if c in df_pred.columns]
479
+ long_rows.extend(df_pred[keep_cols].to_dict(orient="records"))
480
+
481
+ # Metrics only if we have truth annotations
482
+ if truth is not None:
483
+ df_eval = df_pred[df_pred["y_true_alt"].notna()].copy()
484
+ if df_eval.shape[0] == 0:
485
+ print("[METRICS] No annotated genomes for this run (truth TSV did not match Genome names).")
486
+ continue
487
+
488
+ y_true = df_eval["y_true_alt"].astype(int).values
489
+ y_pred = df_eval["y_pred_alt"].astype(int).values
490
+ y_score = df_eval["proba_alt"].astype(float).values if proba is not None else None
491
+
492
+ overall = compute_metrics_block(y_true, y_pred, y_score=y_score)
493
+
494
+ def subset_metrics(gen_type: str):
495
+ sub = df_eval[df_eval["Genome_type"] == gen_type]
496
+ if sub.shape[0] == 0:
497
+ return None
498
+ yt = sub["y_true_alt"].astype(int).values
499
+ yp = sub["y_pred_alt"].astype(int).values
500
+ ys = sub["proba_alt"].astype(float).values if proba is not None else None
501
+ return compute_metrics_block(yt, yp, y_score=ys)
502
+
503
+ iso = subset_metrics("Isolate")
504
+ mag = subset_metrics("MAG")
505
+
506
+ srow = {
507
+ "model": model_name,
508
+ "model_file": str(model_path),
509
+ "feature_cols": str(feat_cols_path),
510
+ "n_genomes_total": int(len(df_pred)),
511
+ "n_annotated": int(df_eval.shape[0]),
512
+ "n_missing_truth": int(len(df_pred) - df_eval.shape[0]),
513
+ "elapsed_sec": float(elapsed),
514
+ "elapsed_min": float(elapsed/60.0),
515
+ }
516
+
517
+ for k, v in overall.items():
518
+ srow[f"overall_{k}"] = v
519
+
520
+ if iso is not None:
521
+ for k, v in iso.items():
522
+ srow[f"isolate_{k}"] = v
523
+
524
+ if mag is not None:
525
+ for k, v in mag.items():
526
+ srow[f"mag_{k}"] = v
527
+
528
+ summary_rows.append(srow)
 
 
 
 
 
 
529
 
530
  # Write combined outputs
531
  long_csv = outdir / "all_models_predictions_long.csv"
532
  pd.DataFrame(long_rows).to_csv(long_csv, index=False)
533
  print(f"\n[WRITE] {long_csv} rows={len(long_rows)}")
534
 
535
+ if truth is not None:
536
+ summary_csv = outdir / "prediction_summary.csv"
537
+ df_sum = pd.DataFrame(summary_rows)
538
+ df_sum.to_csv(summary_csv, index=False)
539
+ print(f"[WRITE] {summary_csv} rows={len(df_sum)}")
540
+
541
+ # Top models report
542
+ if not df_sum.empty and "overall_pr_auc" in df_sum.columns:
543
+ df_rank = df_sum.sort_values(["overall_pr_auc", "overall_roc_auc"], ascending=False, na_position="last")
544
+ report_path = outdir / "top_models_by_pr_auc.txt"
545
+ cols = [
546
+ "model",
547
+ "n_annotated",
548
+ "overall_positives",
549
+ "overall_precision",
550
+ "overall_recall",
551
+ "overall_f1",
552
+ "overall_pr_auc",
553
+ "overall_roc_auc",
554
+ "isolate_fn", "isolate_fp", "mag_fn", "mag_fp",
555
+ "elapsed_min",
556
+ ]
557
+ cols = [c for c in cols if c in df_rank.columns]
558
+ with open(report_path, "w", encoding="utf-8") as f:
559
+ f.write("Top models by overall PR-AUC (ALT = Genetic_code_ID != 11)\n")
560
+ f.write(df_rank[cols].head(25).to_string(index=False))
561
+ f.write("\n")
562
+ print(f"[WRITE] {report_path}")
563
 
564
  print("[DONE]")
565
 
566
+
567
  if __name__ == "__main__":
568
  main()