| #!/usr/bin/env bash |
| set -euo pipefail |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| THREADS=$(( $(nproc) > 8 ? 8 : $(nproc) )) |
| WORKDIR="$(cd "$(dirname "$0")" && pwd)" |
| DATA="${WORKDIR}/data" |
| REF="${WORKDIR}/reference" |
| OUT="${WORKDIR}/outputs" |
| RESULTS="${WORKDIR}/results" |
|
|
| mkdir -p "${OUT}"/{trimmed,merged,filtered,qc,derep,clusters,chimera,taxonomy,community} |
| mkdir -p "${RESULTS}" |
|
|
| |
| SAMPLES=(DRR205394 DRR205395 DRR205396 DRR205397 DRR205398 DRR205399) |
|
|
| |
| FWD_PRIMER="GTCGGTAAAACTCGTGCCAGC" |
| REV_PRIMER="CATAGTGGGGTATCTAATCCCAGTTTG" |
| |
| REV_PRIMER_RC=$(echo "$REV_PRIMER" | tr ACGTacgt TGCAtgca | rev) |
|
|
| |
| |
| |
| echo "=== Level 1: Primer removal ===" |
| for S in "${SAMPLES[@]}"; do |
| if [ ! -f "${OUT}/trimmed/${S}_R1.fastq.gz" ]; then |
| cutadapt \ |
| -g "${FWD_PRIMER}" \ |
| -G "${REV_PRIMER}" \ |
| --discard-untrimmed \ |
| --minimum-length 50 \ |
| -j ${THREADS} \ |
| -o "${OUT}/trimmed/${S}_R1.fastq.gz" \ |
| -p "${OUT}/trimmed/${S}_R2.fastq.gz" \ |
| "${DATA}/${S}_R1.fastq.gz" \ |
| "${DATA}/${S}_R2.fastq.gz" \ |
| > "${OUT}/trimmed/${S}_cutadapt.log" 2>&1 |
| echo " ${S}: trimmed" |
| fi |
| done |
|
|
| |
| |
| |
| echo "=== Level 2: QC + merge + stats ===" |
|
|
| |
| if [ ! -f "${OUT}/qc/fastqc_done" ]; then |
| for S in "${SAMPLES[@]}"; do |
| fastqc -t ${THREADS} -o "${OUT}/qc/" \ |
| "${OUT}/trimmed/${S}_R1.fastq.gz" \ |
| "${OUT}/trimmed/${S}_R2.fastq.gz" \ |
| > /dev/null 2>&1 |
| done |
| touch "${OUT}/qc/fastqc_done" |
| echo " FastQC done" |
| fi |
|
|
| |
| if [ ! -f "${OUT}/qc/seqkit_stats.tsv" ]; then |
| seqkit stats -T -j ${THREADS} "${OUT}/trimmed/"*.fastq.gz > "${OUT}/qc/seqkit_stats.tsv" 2>/dev/null |
| echo " seqkit stats done" |
| fi |
|
|
| |
| for S in "${SAMPLES[@]}"; do |
| if [ ! -f "${OUT}/merged/${S}.fastq" ]; then |
| vsearch --fastq_mergepairs "${OUT}/trimmed/${S}_R1.fastq.gz" \ |
| --reverse "${OUT}/trimmed/${S}_R2.fastq.gz" \ |
| --fastqout "${OUT}/merged/${S}.fastq" \ |
| --fastq_maxdiffs 10 \ |
| --fastq_minovlen 50 \ |
| --threads ${THREADS} \ |
| --label_suffix ";sample=${S}" \ |
| > "${OUT}/merged/${S}_merge.log" 2>&1 |
| echo " ${S}: merged" |
| fi |
| done |
|
|
| |
| |
| |
| echo "=== Level 3: Convergence 1 (QC + merged) ===" |
| |
| if [ ! -f "${OUT}/qc/multiqc_report.html" ]; then |
| multiqc "${OUT}/qc/" "${OUT}/trimmed/" -o "${OUT}/qc/" --force > /dev/null 2>&1 || true |
| echo " MultiQC done" |
| fi |
|
|
| |
| |
| |
| echo "=== Level 4: Quality filtering ===" |
| for S in "${SAMPLES[@]}"; do |
| if [ ! -f "${OUT}/filtered/${S}.fasta" ]; then |
| vsearch --fastq_filter "${OUT}/merged/${S}.fastq" \ |
| --fastq_maxee 1.0 \ |
| --fastq_minlen 100 \ |
| --fastq_maxlen 300 \ |
| --fastaout "${OUT}/filtered/${S}.fasta" \ |
| --relabel "${S}." \ |
| > "${OUT}/filtered/${S}_filter.log" 2>&1 |
| echo " ${S}: filtered" |
| fi |
| done |
|
|
| |
| |
| |
| echo "=== Level 5: Pool + dereplicate ===" |
| if [ ! -f "${OUT}/derep/all_derep.fasta" ]; then |
| |
| cat "${OUT}/filtered/"*.fasta > "${OUT}/derep/all_pooled.fasta" |
|
|
| |
| vsearch --derep_fulllength "${OUT}/derep/all_pooled.fasta" \ |
| --output "${OUT}/derep/all_derep.fasta" \ |
| --sizein --sizeout \ |
| --minuniquesize 2 \ |
| --uc "${OUT}/derep/all_derep.uc" \ |
| > "${OUT}/derep/derep.log" 2>&1 |
| echo " Dereplication done" |
| fi |
|
|
| UNIQUE_COUNT=$(grep -c "^>" "${OUT}/derep/all_derep.fasta" || true) |
| echo " Unique sequences: ${UNIQUE_COUNT}" |
|
|
| |
| |
| |
| echo "=== Level 6: Clustering (3 methods) ===" |
|
|
| |
| if [ ! -f "${OUT}/clusters/otu97_centroids.fasta" ]; then |
| vsearch --cluster_size "${OUT}/derep/all_derep.fasta" \ |
| --id 0.97 \ |
| --centroids "${OUT}/clusters/otu97_centroids.fasta" \ |
| --uc "${OUT}/clusters/otu97.uc" \ |
| --sizein --sizeout \ |
| --threads ${THREADS} \ |
| > "${OUT}/clusters/otu97.log" 2>&1 |
| echo " OTU 97% clustering done" |
| fi |
|
|
| |
| if [ ! -f "${OUT}/clusters/swarm_centroids.fasta" ]; then |
| |
| vsearch --sortbysize "${OUT}/derep/all_derep.fasta" \ |
| --output "${OUT}/clusters/sorted_for_swarm.fasta" \ |
| --sizein --sizeout 2>/dev/null |
|
|
| swarm -d 1 -z \ |
| -w "${OUT}/clusters/swarm_centroids.fasta" \ |
| -o "${OUT}/clusters/swarm_otus.txt" \ |
| -s "${OUT}/clusters/swarm_stats.txt" \ |
| -t ${THREADS} \ |
| "${OUT}/clusters/sorted_for_swarm.fasta" \ |
| > "${OUT}/clusters/swarm.log" 2>&1 |
| echo " SWARM clustering done" |
| fi |
|
|
| |
| if [ ! -f "${OUT}/clusters/unoise3_asvs.fasta" ]; then |
| vsearch --cluster_unoise "${OUT}/derep/all_derep.fasta" \ |
| --centroids "${OUT}/clusters/unoise3_asvs.fasta" \ |
| --sizein --sizeout \ |
| --minsize 2 \ |
| > "${OUT}/clusters/unoise3.log" 2>&1 |
| echo " UNOISE3 denoising done" |
| fi |
|
|
| OTU97_COUNT=$(grep -c "^>" "${OUT}/clusters/otu97_centroids.fasta" || true) |
| SWARM_COUNT=$(grep -c "^>" "${OUT}/clusters/swarm_centroids.fasta" || true) |
| UNOISE3_COUNT=$(grep -c "^>" "${OUT}/clusters/unoise3_asvs.fasta" || true) |
| echo " OTU97: ${OTU97_COUNT}, SWARM: ${SWARM_COUNT}, UNOISE3: ${UNOISE3_COUNT}" |
|
|
| |
| |
| |
| echo "=== Level 7: Convergence 2 (consensus + chimera removal) ===" |
|
|
| |
| |
| if [ ! -f "${OUT}/chimera/clean_asvs.fasta" ]; then |
| vsearch --uchime_denovo "${OUT}/clusters/unoise3_asvs.fasta" \ |
| --nonchimeras "${OUT}/chimera/clean_asvs.fasta" \ |
| --chimeras "${OUT}/chimera/chimeras.fasta" \ |
| --sizein --sizeout \ |
| > "${OUT}/chimera/chimera.log" 2>&1 |
| echo " Chimera removal done" |
| fi |
|
|
| CLEAN_COUNT=$(grep -c "^>" "${OUT}/chimera/clean_asvs.fasta" || true) |
| CHIMERA_COUNT=$(grep -c "^>" "${OUT}/chimera/chimeras.fasta" 2>/dev/null || true) |
| CHIMERA_COUNT=${CHIMERA_COUNT:-0} |
| echo " Clean ASVs: ${CLEAN_COUNT}, Chimeras removed: ${CHIMERA_COUNT}" |
|
|
| |
| |
| |
| echo "=== Building BLAST database ===" |
| if [ ! -f "${REF}/mitofish_12S.ndb" ]; then |
| makeblastdb -in "${REF}/mitofish_12S.fasta" \ |
| -dbtype nucl \ |
| -out "${REF}/mitofish_12S" \ |
| -parse_seqids \ |
| > /dev/null 2>&1 |
| echo " BLAST DB built" |
| fi |
|
|
| |
| |
| |
| echo "=== Level 8: Taxonomy assignment ===" |
|
|
| |
| if [ ! -f "${OUT}/taxonomy/blast_hits.tsv" ]; then |
| |
| sed 's/;size=[0-9]*//' "${OUT}/chimera/clean_asvs.fasta" > "${OUT}/taxonomy/query.fasta" |
|
|
| blastn -query "${OUT}/taxonomy/query.fasta" \ |
| -db "${REF}/mitofish_12S" \ |
| -out "${OUT}/taxonomy/blast_hits.tsv" \ |
| -outfmt "6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore" \ |
| -evalue 1e-10 \ |
| -max_target_seqs 10 \ |
| -num_threads ${THREADS} \ |
| > /dev/null 2>&1 |
| echo " BLAST done" |
| fi |
|
|
| |
| if [ ! -f "${OUT}/taxonomy/vsearch_hits.tsv" ]; then |
| vsearch --usearch_global "${OUT}/taxonomy/query.fasta" \ |
| --db "${REF}/mitofish_12S.fasta" \ |
| --id 0.80 \ |
| --maxaccepts 10 \ |
| --blast6out "${OUT}/taxonomy/vsearch_hits.tsv" \ |
| --threads ${THREADS} \ |
| > "${OUT}/taxonomy/vsearch.log" 2>&1 |
| echo " vsearch global search done" |
| fi |
|
|
| |
| |
| |
| echo "=== Level 9: Convergence 3 (LCA taxonomy) ===" |
|
|
| if [ ! -f "${OUT}/taxonomy/lca_taxonomy.tsv" ]; then |
| python3 << 'PYEOF' |
| import csv |
| import sys |
| from collections import defaultdict |
|
|
| |
| tax_lookup = {} |
| with open("reference/mitofish_12S_taxonomy.tsv") as f: |
| reader = csv.DictReader(f, delimiter='\t') |
| for row in reader: |
| acc = row['Accession'] |
| tax_lookup[acc] = { |
| 'superkingdom': row.get('Superkingdom', ''), |
| 'phylum': row.get('Phylum', ''), |
| 'class': row.get('Class', ''), |
| 'order': row.get('Order', ''), |
| 'family': row.get('Family', ''), |
| 'genus': row.get('Genus', ''), |
| 'species': row.get('Species', '') |
| } |
|
|
| |
| blast_tax = defaultdict(list) |
| with open("outputs/taxonomy/blast_hits.tsv") as f: |
| for line in f: |
| parts = line.strip().split('\t') |
| qid, sid, pident = parts[0], parts[1], float(parts[2]) |
| if pident >= 97.0 and sid in tax_lookup: |
| blast_tax[qid].append(tax_lookup[sid]) |
|
|
| |
| vsearch_tax = defaultdict(list) |
| with open("outputs/taxonomy/vsearch_hits.tsv") as f: |
| for line in f: |
| parts = line.strip().split('\t') |
| qid, sid, pident = parts[0], parts[1], float(parts[2]) |
| if pident >= 97.0 and sid in tax_lookup: |
| vsearch_tax[qid].append(tax_lookup[sid]) |
|
|
| |
| RANKS = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'] |
| def lca(tax_list): |
| if not tax_list: |
| return {r: '' for r in RANKS} |
| result = {} |
| for rank in RANKS: |
| values = set(t[rank] for t in tax_list if t[rank]) |
| if len(values) == 1: |
| result[rank] = values.pop() |
| else: |
| result[rank] = '' |
| break |
| for rank in RANKS: |
| if rank not in result: |
| result[rank] = '' |
| return result |
|
|
| |
| all_queries = set(list(blast_tax.keys()) + list(vsearch_tax.keys())) |
| with open("outputs/taxonomy/lca_taxonomy.tsv", 'w') as f: |
| f.write("asv_id\tsuperkingdom\tphylum\tclass\torder\tfamily\tgenus\tspecies\n") |
| for qid in sorted(all_queries): |
| combined = blast_tax.get(qid, []) + vsearch_tax.get(qid, []) |
| tax = lca(combined) |
| f.write(f"{qid}\t{tax['superkingdom']}\t{tax['phylum']}\t{tax['class']}\t{tax['order']}\t{tax['family']}\t{tax['genus']}\t{tax['species']}\n") |
|
|
| print(f"LCA taxonomy assigned to {len(all_queries)} ASVs") |
| PYEOF |
| fi |
|
|
| |
| |
| |
| echo "=== Level 9: Community analyses ===" |
|
|
| |
| if [ ! -f "${OUT}/community/otu_table.tsv" ]; then |
| |
| vsearch --usearch_global "${OUT}/derep/all_pooled.fasta" \ |
| --db "${OUT}/chimera/clean_asvs.fasta" \ |
| --id 0.97 \ |
| --otutabout "${OUT}/community/otu_table.tsv" \ |
| --threads ${THREADS} \ |
| > "${OUT}/community/map.log" 2>&1 |
| echo " OTU table built" |
| fi |
|
|
| |
| |
| |
| if [ ! -f "${OUT}/community/diversity_results.tsv" ]; then |
| python3 << 'PYEOF' |
| import csv |
| import math |
| from collections import defaultdict |
|
|
| |
| otu_table = {} |
| samples = [] |
| with open("outputs/community/otu_table.tsv") as f: |
| header = f.readline().strip().split('\t') |
| samples = header[1:] |
| for line in f: |
| parts = line.strip().split('\t') |
| asv_id = parts[0] |
| counts = [int(x) for x in parts[1:]] |
| otu_table[asv_id] = dict(zip(samples, counts)) |
|
|
| |
| taxonomy = {} |
| with open("outputs/taxonomy/lca_taxonomy.tsv") as f: |
| reader = csv.DictReader(f, delimiter='\t') |
| for row in reader: |
| taxonomy[row['asv_id']] = row |
|
|
| |
| species_set = set() |
| genus_set = set() |
| family_set = set() |
| order_set = set() |
| for asv_id, tax in taxonomy.items(): |
| if tax['species']: |
| species_set.add(tax['species']) |
| if tax['genus']: |
| genus_set.add(tax['genus']) |
| if tax['family']: |
| family_set.add(tax['family']) |
| if tax['order']: |
| order_set.add(tax['order']) |
|
|
| with open("outputs/community/species_list.tsv", 'w') as f: |
| f.write("species\tgenus\tfamily\torder\n") |
| for sp in sorted(species_set): |
| |
| for asv_id, tax in taxonomy.items(): |
| if tax['species'] == sp: |
| f.write(f"{sp}\t{tax['genus']}\t{tax['family']}\t{tax['order']}\n") |
| break |
|
|
| |
| shannon_per_sample = {} |
| richness_per_sample = {} |
| for s in samples: |
| counts = [otu_table[asv][s] for asv in otu_table if otu_table[asv].get(s, 0) > 0] |
| total = sum(counts) |
| if total == 0: |
| shannon_per_sample[s] = 0.0 |
| richness_per_sample[s] = 0 |
| continue |
| richness_per_sample[s] = len(counts) |
| shannon = 0.0 |
| for c in counts: |
| p = c / total |
| if p > 0: |
| shannon -= p * math.log(p) |
| shannon_per_sample[s] = round(shannon, 4) |
|
|
| with open("outputs/community/diversity_results.tsv", 'w') as f: |
| f.write("sample\tshannon_diversity\tspecies_richness\n") |
| for s in samples: |
| f.write(f"{s}\t{shannon_per_sample[s]}\t{richness_per_sample[s]}\n") |
|
|
| |
| |
| species_detection = defaultdict(int) |
| species_total_reads = defaultdict(int) |
| for asv_id in otu_table: |
| sp = taxonomy.get(asv_id, {}).get('species', '') |
| if not sp: |
| continue |
| for s in samples: |
| if otu_table[asv_id].get(s, 0) > 0: |
| species_detection[sp] += 1 |
| species_total_reads[sp] += otu_table[asv_id][s] |
|
|
| with open("outputs/community/detection_probability.tsv", 'w') as f: |
| f.write("species\tsamples_detected\tdetection_rate\ttotal_reads\n") |
| for sp in sorted(species_detection.keys()): |
| det_rate = round(species_detection[sp] / len(samples), 4) |
| f.write(f"{sp}\t{species_detection[sp]}\t{det_rate}\t{species_total_reads[sp]}\n") |
|
|
| print(f"Species: {len(species_set)}, Genera: {len(genus_set)}, Families: {len(family_set)}, Orders: {len(order_set)}") |
| print(f"Shannon range: {min(shannon_per_sample.values()):.4f} - {max(shannon_per_sample.values()):.4f}") |
| PYEOF |
| echo " Python community analysis done" |
| fi |
|
|
| |
| if [ ! -f "${OUT}/community/beta_diversity.tsv" ]; then |
| cat > "${OUT}/community/run_vegan.R" << 'REOF' |
| library(vegan) |
| otu <- read.delim("outputs/community/otu_table.tsv", row.names=1, check.names=FALSE) |
| otu_t <- t(otu) |
| bc <- as.matrix(vegdist(otu_t, method="bray")) |
| write.table(bc, "outputs/community/beta_diversity.tsv", sep="\t", quote=FALSE) |
| cat("Beta diversity (Bray-Curtis) range:", range(bc[lower.tri(bc)]), "\n") |
| cat("Mean Bray-Curtis:", mean(bc[lower.tri(bc)]), "\n") |
| REOF |
| Rscript "${OUT}/community/run_vegan.R" |
| echo " R/vegan beta diversity done" |
| fi |
|
|
| |
| |
| |
| echo "=== Level 10: Final report ===" |
|
|
| python3 << 'PYEOF' |
| import csv |
| import math |
| import os |
| from collections import defaultdict |
|
|
| |
| metrics = {} |
|
|
| |
| total_raw = 0 |
| for s in ["DRR205394","DRR205395","DRR205396","DRR205397","DRR205398","DRR205399"]: |
| for r in ["R1", "R2"]: |
| fpath = f"outputs/qc/seqkit_stats.tsv" |
| break |
| break |
|
|
| |
| with open("outputs/qc/seqkit_stats.tsv") as f: |
| reader = csv.DictReader(f, delimiter='\t') |
| for row in reader: |
| total_raw += int(row['num_seqs'].replace(',', '')) |
| metrics['total_raw_reads'] = total_raw |
|
|
| |
| total_merged = 0 |
| for s in ["DRR205394","DRR205395","DRR205396","DRR205397","DRR205398","DRR205399"]: |
| logf = f"outputs/merged/{s}_merge.log" |
| if os.path.exists(logf): |
| with open(logf) as f: |
| for line in f: |
| if "Merged" in line and "pairs" in line: |
| |
| parts = line.strip().split() |
| for i, p in enumerate(parts): |
| if p.isdigit() or p.replace(',','').isdigit(): |
| total_merged += int(p.replace(',','')) |
| break |
| break |
|
|
| |
| if total_merged == 0: |
| for s in ["DRR205394","DRR205395","DRR205396","DRR205397","DRR205398","DRR205399"]: |
| mf = f"outputs/merged/{s}.fastq" |
| if os.path.exists(mf): |
| count = sum(1 for line in open(mf)) // 4 |
| total_merged += count |
| metrics['total_merged_reads'] = total_merged |
|
|
| |
| if total_raw > 0: |
| |
| metrics['merge_rate'] = round(total_merged / (total_raw / 2) * 100, 2) |
| else: |
| metrics['merge_rate'] = 0.0 |
|
|
| |
| derep_fasta = "outputs/derep/all_derep.fasta" |
| unique_count = sum(1 for line in open(derep_fasta) if line.startswith('>')) |
| metrics['unique_sequences'] = unique_count |
|
|
| |
| for method, fname in [("clusters_otu97", "outputs/clusters/otu97_centroids.fasta"), |
| ("clusters_swarm", "outputs/clusters/swarm_centroids.fasta"), |
| ("clusters_denoised", "outputs/clusters/unoise3_asvs.fasta")]: |
| count = sum(1 for line in open(fname) if line.startswith('>')) |
| metrics[method] = count |
|
|
| |
| clean_fasta = "outputs/chimera/clean_asvs.fasta" |
| chimera_fasta = "outputs/chimera/chimeras.fasta" |
| metrics['clean_sequence_count'] = sum(1 for line in open(clean_fasta) if line.startswith('>')) |
| chimera_count = 0 |
| if os.path.exists(chimera_fasta): |
| chimera_count = sum(1 for line in open(chimera_fasta) if line.startswith('>')) |
| metrics['chimera_count'] = chimera_count |
|
|
| |
| assigned = 0 |
| total_asvs = 0 |
| with open("outputs/taxonomy/lca_taxonomy.tsv") as f: |
| reader = csv.DictReader(f, delimiter='\t') |
| for row in reader: |
| total_asvs += 1 |
| if row['species'] or row['genus'] or row['family']: |
| assigned += 1 |
| metrics['assigned_sequences'] = assigned |
| metrics['unassigned_sequences'] = total_asvs - assigned |
|
|
| |
| species_set = set() |
| genus_set = set() |
| family_set = set() |
| order_set = set() |
| with open("outputs/taxonomy/lca_taxonomy.tsv") as f: |
| reader = csv.DictReader(f, delimiter='\t') |
| for row in reader: |
| if row['species']: species_set.add(row['species']) |
| if row['genus']: genus_set.add(row['genus']) |
| if row['family']: family_set.add(row['family']) |
| if row['order']: order_set.add(row['order']) |
| metrics['species_count'] = len(species_set) |
| metrics['genus_count'] = len(genus_set) |
| metrics['family_count'] = len(family_set) |
| metrics['order_count'] = len(order_set) |
|
|
| |
| shannon_vals = [] |
| richness_vals = [] |
| with open("outputs/community/diversity_results.tsv") as f: |
| reader = csv.DictReader(f, delimiter='\t') |
| for row in reader: |
| shannon_vals.append(float(row['shannon_diversity'])) |
| richness_vals.append(int(row['species_richness'])) |
| metrics['mean_shannon_diversity'] = round(sum(shannon_vals)/len(shannon_vals), 4) |
| metrics['min_species_richness'] = min(richness_vals) |
| metrics['max_species_richness'] = max(richness_vals) |
|
|
| |
| bc_vals = [] |
| with open("outputs/community/beta_diversity.tsv") as f: |
| header = f.readline().strip().split('\t') |
| rows = [] |
| for line in f: |
| parts = line.strip().split('\t') |
| rows.append([float(x) for x in parts[1:]]) |
| for i in range(len(rows)): |
| for j in range(i+1, len(rows)): |
| bc_vals.append(rows[i][j]) |
| if bc_vals: |
| metrics['mean_beta_diversity'] = round(sum(bc_vals)/len(bc_vals), 4) |
| metrics['min_beta_diversity'] = round(min(bc_vals), 4) |
| metrics['max_beta_diversity'] = round(max(bc_vals), 4) |
|
|
| |
| det_rates = [] |
| with open("outputs/community/detection_probability.tsv") as f: |
| reader = csv.DictReader(f, delimiter='\t') |
| for row in reader: |
| det_rates.append(float(row['detection_rate'])) |
| if det_rates: |
| metrics['mean_detection_rate'] = round(sum(det_rates)/len(det_rates), 4) |
|
|
| |
| with open("results/report.csv", 'w') as f: |
| f.write("metric,value\n") |
| for k, v in metrics.items(): |
| f.write(f"{k},{v}\n") |
|
|
| print("=== Report generated ===") |
| for k, v in metrics.items(): |
| print(f" {k} = {v}") |
| PYEOF |
|
|
| echo "=== Pipeline complete ===" |
|
|