lingzhi227 commited on
Commit
b247629
·
verified ·
1 Parent(s): a4388e7

Upload tasks/edna-metabarcoding/run_script.sh with huggingface_hub

Browse files
tasks/edna-metabarcoding/run_script.sh ADDED
@@ -0,0 +1,670 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ # ============================================================
5
+ # eDNA Metabarcoding Pipeline: Aquatic Biodiversity Assessment
6
+ # ============================================================
7
+ # DAG structure (depth=10, convergence=4):
8
+ #
9
+ # sample_R1.fq.gz + sample_R2.fq.gz (x6 samples)
10
+ # │
11
+ # [cutadapt primer removal] ─── per sample Level 1
12
+ # │
13
+ # ┌──────┼──────────┐
14
+ # │ │ │
15
+ # [vsearch [fastqc [seqkit Level 2
16
+ # merge QC] stats]
17
+ # pairs]
18
+ # │ │ │
19
+ # └──────┼──────────┘
20
+ # │
21
+ # [CONVERGENCE 1: QC + merged reads] Level 3
22
+ # │
23
+ # [vsearch quality filter] Level 4
24
+ # │
25
+ # [pool samples + vsearch dereplicate] Level 5
26
+ # │
27
+ # ┌──────┼──────────┐
28
+ # │ │ │
29
+ # [vsearch [swarm [vsearch Level 6
30
+ # cluster cluster] denoise
31
+ # 97%] UNOISE3]
32
+ # │ │ │
33
+ # └──────┼──────────┘
34
+ # │
35
+ # [CONVERGENCE 2: select consensus + chimera removal] Level 7
36
+ # │
37
+ # ┌──────┼──────────┐
38
+ # │ │
39
+ # [BLAST [vsearch Level 8
40
+ # taxonomy] usearch_global
41
+ # taxonomy]
42
+ # │ │
43
+ # └────────┬────────┘
44
+ # │
45
+ # [CONVERGENCE 3: LCA consensus taxonomy] Level 9
46
+ # │
47
+ # ┌────────┼──────────┐
48
+ # │ │ │
49
+ # [species [R/vegan [detection Level 9
50
+ # list] diversity] probability]
51
+ # │ │ │
52
+ # └────────┼──────────┘
53
+ # │
54
+ # [CONVERGENCE 4: final report with QC] Level 10
55
+ #
56
+ # Longest path: primer_removal -> merge -> QC_convergence ->
57
+ # quality_filter -> dereplicate -> cluster -> chimera_removal ->
58
+ # BLAST -> LCA -> diversity -> report = depth 10
59
+ # ============================================================
60
+
61
+ THREADS=$(( $(nproc) > 8 ? 8 : $(nproc) ))
62
+ WORKDIR="$(cd "$(dirname "$0")" && pwd)"
63
+ DATA="${WORKDIR}/data"
64
+ REF="${WORKDIR}/reference"
65
+ OUT="${WORKDIR}/outputs"
66
+ RESULTS="${WORKDIR}/results"
67
+
68
+ mkdir -p "${OUT}"/{trimmed,merged,filtered,qc,derep,clusters,chimera,taxonomy,community}
69
+ mkdir -p "${RESULTS}"
70
+
71
+ # Sample list
72
+ SAMPLES=(DRR205394 DRR205395 DRR205396 DRR205397 DRR205398 DRR205399)
73
+
74
+ # MiFish-U primer sequences (Miya et al. 2015)
75
+ FWD_PRIMER="GTCGGTAAAACTCGTGCCAGC"
76
+ REV_PRIMER="CATAGTGGGGTATCTAATCCCAGTTTG"
77
+ # Reverse complement of reverse primer
78
+ REV_PRIMER_RC=$(echo "$REV_PRIMER" | tr ACGTacgt TGCAtgca | rev)
79
+
80
+ # ============================================================
81
+ # Level 1: Primer removal with cutadapt (per sample)
82
+ # ============================================================
83
+ echo "=== Level 1: Primer removal ==="
84
+ for S in "${SAMPLES[@]}"; do
85
+ if [ ! -f "${OUT}/trimmed/${S}_R1.fastq.gz" ]; then
86
+ cutadapt \
87
+ -g "${FWD_PRIMER}" \
88
+ -G "${REV_PRIMER}" \
89
+ --discard-untrimmed \
90
+ --minimum-length 50 \
91
+ -j ${THREADS} \
92
+ -o "${OUT}/trimmed/${S}_R1.fastq.gz" \
93
+ -p "${OUT}/trimmed/${S}_R2.fastq.gz" \
94
+ "${DATA}/${S}_R1.fastq.gz" \
95
+ "${DATA}/${S}_R2.fastq.gz" \
96
+ > "${OUT}/trimmed/${S}_cutadapt.log" 2>&1
97
+ echo " ${S}: trimmed"
98
+ fi
99
+ done
100
+
101
+ # ============================================================
102
+ # Level 2: QC + merge + stats (parallel branches)
103
+ # ============================================================
104
+ echo "=== Level 2: QC + merge + stats ==="
105
+
106
+ # Branch 2a: FastQC on trimmed reads
107
+ if [ ! -f "${OUT}/qc/fastqc_done" ]; then
108
+ for S in "${SAMPLES[@]}"; do
109
+ fastqc -t ${THREADS} -o "${OUT}/qc/" \
110
+ "${OUT}/trimmed/${S}_R1.fastq.gz" \
111
+ "${OUT}/trimmed/${S}_R2.fastq.gz" \
112
+ > /dev/null 2>&1
113
+ done
114
+ touch "${OUT}/qc/fastqc_done"
115
+ echo " FastQC done"
116
+ fi
117
+
118
+ # Branch 2b: seqkit stats on trimmed reads
119
+ if [ ! -f "${OUT}/qc/seqkit_stats.tsv" ]; then
120
+ seqkit stats -T -j ${THREADS} "${OUT}/trimmed/"*.fastq.gz > "${OUT}/qc/seqkit_stats.tsv" 2>/dev/null
121
+ echo " seqkit stats done"
122
+ fi
123
+
124
+ # Branch 2c: vsearch merge pairs (per sample)
125
+ for S in "${SAMPLES[@]}"; do
126
+ if [ ! -f "${OUT}/merged/${S}.fastq" ]; then
127
+ vsearch --fastq_mergepairs "${OUT}/trimmed/${S}_R1.fastq.gz" \
128
+ --reverse "${OUT}/trimmed/${S}_R2.fastq.gz" \
129
+ --fastqout "${OUT}/merged/${S}.fastq" \
130
+ --fastq_maxdiffs 10 \
131
+ --fastq_minovlen 50 \
132
+ --threads ${THREADS} \
133
+ --label_suffix ";sample=${S}" \
134
+ > "${OUT}/merged/${S}_merge.log" 2>&1
135
+ echo " ${S}: merged"
136
+ fi
137
+ done
138
+
139
+ # ============================================================
140
+ # Level 3: CONVERGENCE 1 — QC + merged reads available
141
+ # ============================================================
142
+ echo "=== Level 3: Convergence 1 (QC + merged) ==="
143
+ # MultiQC aggregation
144
+ if [ ! -f "${OUT}/qc/multiqc_report.html" ]; then
145
+ multiqc "${OUT}/qc/" "${OUT}/trimmed/" -o "${OUT}/qc/" --force > /dev/null 2>&1 || true
146
+ echo " MultiQC done"
147
+ fi
148
+
149
+ # ============================================================
150
+ # Level 4: Quality filter (per sample)
151
+ # ============================================================
152
+ echo "=== Level 4: Quality filtering ==="
153
+ for S in "${SAMPLES[@]}"; do
154
+ if [ ! -f "${OUT}/filtered/${S}.fasta" ]; then
155
+ vsearch --fastq_filter "${OUT}/merged/${S}.fastq" \
156
+ --fastq_maxee 1.0 \
157
+ --fastq_minlen 100 \
158
+ --fastq_maxlen 300 \
159
+ --fastaout "${OUT}/filtered/${S}.fasta" \
160
+ --relabel "${S}." \
161
+ > "${OUT}/filtered/${S}_filter.log" 2>&1
162
+ echo " ${S}: filtered"
163
+ fi
164
+ done
165
+
166
+ # ============================================================
167
+ # Level 5: Pool samples + dereplicate
168
+ # ============================================================
169
+ echo "=== Level 5: Pool + dereplicate ==="
170
+ if [ ! -f "${OUT}/derep/all_derep.fasta" ]; then
171
+ # Pool all filtered sequences
172
+ cat "${OUT}/filtered/"*.fasta > "${OUT}/derep/all_pooled.fasta"
173
+
174
+ # Dereplicate
175
+ vsearch --derep_fulllength "${OUT}/derep/all_pooled.fasta" \
176
+ --output "${OUT}/derep/all_derep.fasta" \
177
+ --sizein --sizeout \
178
+ --minuniquesize 2 \
179
+ --uc "${OUT}/derep/all_derep.uc" \
180
+ > "${OUT}/derep/derep.log" 2>&1
181
+ echo " Dereplication done"
182
+ fi
183
+
184
+ UNIQUE_COUNT=$(grep -c "^>" "${OUT}/derep/all_derep.fasta" || true)
185
+ echo " Unique sequences: ${UNIQUE_COUNT}"
186
+
187
+ # ============================================================
188
+ # Level 6: Three parallel clustering methods
189
+ # ============================================================
190
+ echo "=== Level 6: Clustering (3 methods) ==="
191
+
192
+ # Method 6a: vsearch OTU clustering at 97%
193
+ if [ ! -f "${OUT}/clusters/otu97_centroids.fasta" ]; then
194
+ vsearch --cluster_size "${OUT}/derep/all_derep.fasta" \
195
+ --id 0.97 \
196
+ --centroids "${OUT}/clusters/otu97_centroids.fasta" \
197
+ --uc "${OUT}/clusters/otu97.uc" \
198
+ --sizein --sizeout \
199
+ --threads ${THREADS} \
200
+ > "${OUT}/clusters/otu97.log" 2>&1
201
+ echo " OTU 97% clustering done"
202
+ fi
203
+
204
+ # Method 6b: SWARM clustering
205
+ if [ ! -f "${OUT}/clusters/swarm_centroids.fasta" ]; then
206
+ # swarm needs dereplicated sequences sorted by abundance
207
+ vsearch --sortbysize "${OUT}/derep/all_derep.fasta" \
208
+ --output "${OUT}/clusters/sorted_for_swarm.fasta" \
209
+ --sizein --sizeout 2>/dev/null
210
+
211
+ swarm -d 1 -z \
212
+ -w "${OUT}/clusters/swarm_centroids.fasta" \
213
+ -o "${OUT}/clusters/swarm_otus.txt" \
214
+ -s "${OUT}/clusters/swarm_stats.txt" \
215
+ -t ${THREADS} \
216
+ "${OUT}/clusters/sorted_for_swarm.fasta" \
217
+ > "${OUT}/clusters/swarm.log" 2>&1
218
+ echo " SWARM clustering done"
219
+ fi
220
+
221
+ # Method 6c: vsearch UNOISE3 denoising (ASVs)
222
+ if [ ! -f "${OUT}/clusters/unoise3_asvs.fasta" ]; then
223
+ vsearch --cluster_unoise "${OUT}/derep/all_derep.fasta" \
224
+ --centroids "${OUT}/clusters/unoise3_asvs.fasta" \
225
+ --sizein --sizeout \
226
+ --minsize 2 \
227
+ > "${OUT}/clusters/unoise3.log" 2>&1
228
+ echo " UNOISE3 denoising done"
229
+ fi
230
+
231
+ OTU97_COUNT=$(grep -c "^>" "${OUT}/clusters/otu97_centroids.fasta" || true)
232
+ SWARM_COUNT=$(grep -c "^>" "${OUT}/clusters/swarm_centroids.fasta" || true)
233
+ UNOISE3_COUNT=$(grep -c "^>" "${OUT}/clusters/unoise3_asvs.fasta" || true)
234
+ echo " OTU97: ${OTU97_COUNT}, SWARM: ${SWARM_COUNT}, UNOISE3: ${UNOISE3_COUNT}"
235
+
236
+ # ============================================================
237
+ # Level 7: CONVERGENCE 2 — Select consensus + chimera removal
238
+ # ============================================================
239
+ echo "=== Level 7: Convergence 2 (consensus + chimera removal) ==="
240
+
241
+ # Use UNOISE3 ASVs as primary (most conservative denoising method)
242
+ # Then apply chimera removal
243
+ if [ ! -f "${OUT}/chimera/clean_asvs.fasta" ]; then
244
+ vsearch --uchime_denovo "${OUT}/clusters/unoise3_asvs.fasta" \
245
+ --nonchimeras "${OUT}/chimera/clean_asvs.fasta" \
246
+ --chimeras "${OUT}/chimera/chimeras.fasta" \
247
+ --sizein --sizeout \
248
+ > "${OUT}/chimera/chimera.log" 2>&1
249
+ echo " Chimera removal done"
250
+ fi
251
+
252
+ CLEAN_COUNT=$(grep -c "^>" "${OUT}/chimera/clean_asvs.fasta" || true)
253
+ CHIMERA_COUNT=$(grep -c "^>" "${OUT}/chimera/chimeras.fasta" 2>/dev/null || true)
254
+ CHIMERA_COUNT=${CHIMERA_COUNT:-0}
255
+ echo " Clean ASVs: ${CLEAN_COUNT}, Chimeras removed: ${CHIMERA_COUNT}"
256
+
257
+ # ============================================================
258
+ # Level 7.5: Build BLAST database from reference
259
+ # ============================================================
260
+ echo "=== Building BLAST database ==="
261
+ if [ ! -f "${REF}/mitofish_12S.ndb" ]; then
262
+ makeblastdb -in "${REF}/mitofish_12S.fasta" \
263
+ -dbtype nucl \
264
+ -out "${REF}/mitofish_12S" \
265
+ -parse_seqids \
266
+ > /dev/null 2>&1
267
+ echo " BLAST DB built"
268
+ fi
269
+
270
+ # ============================================================
271
+ # Level 8: Taxonomy assignment (two parallel methods)
272
+ # ============================================================
273
+ echo "=== Level 8: Taxonomy assignment ==="
274
+
275
+ # Method 8a: BLAST against MitoFish 12S
276
+ if [ ! -f "${OUT}/taxonomy/blast_hits.tsv" ]; then
277
+ # Strip size annotations from headers for BLAST
278
+ sed 's/;size=[0-9]*//' "${OUT}/chimera/clean_asvs.fasta" > "${OUT}/taxonomy/query.fasta"
279
+
280
+ blastn -query "${OUT}/taxonomy/query.fasta" \
281
+ -db "${REF}/mitofish_12S" \
282
+ -out "${OUT}/taxonomy/blast_hits.tsv" \
283
+ -outfmt "6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore" \
284
+ -evalue 1e-10 \
285
+ -max_target_seqs 10 \
286
+ -num_threads ${THREADS} \
287
+ > /dev/null 2>&1
288
+ echo " BLAST done"
289
+ fi
290
+
291
+ # Method 8b: vsearch usearch_global against MitoFish 12S
292
+ if [ ! -f "${OUT}/taxonomy/vsearch_hits.tsv" ]; then
293
+ vsearch --usearch_global "${OUT}/taxonomy/query.fasta" \
294
+ --db "${REF}/mitofish_12S.fasta" \
295
+ --id 0.80 \
296
+ --maxaccepts 10 \
297
+ --blast6out "${OUT}/taxonomy/vsearch_hits.tsv" \
298
+ --threads ${THREADS} \
299
+ > "${OUT}/taxonomy/vsearch.log" 2>&1
300
+ echo " vsearch global search done"
301
+ fi
302
+
303
+ # ============================================================
304
+ # Level 9: CONVERGENCE 3 — LCA consensus taxonomy
305
+ # ============================================================
306
+ echo "=== Level 9: Convergence 3 (LCA taxonomy) ==="
307
+
308
+ if [ ! -f "${OUT}/taxonomy/lca_taxonomy.tsv" ]; then
309
+ python3 << 'PYEOF'
310
+ import csv
311
+ import sys
312
+ from collections import defaultdict
313
+
314
+ # Load MitoFish taxonomy lookup
315
+ tax_lookup = {}
316
+ with open("reference/mitofish_12S_taxonomy.tsv") as f:
317
+ reader = csv.DictReader(f, delimiter='\t')
318
+ for row in reader:
319
+ acc = row['Accession']
320
+ tax_lookup[acc] = {
321
+ 'superkingdom': row.get('Superkingdom', ''),
322
+ 'phylum': row.get('Phylum', ''),
323
+ 'class': row.get('Class', ''),
324
+ 'order': row.get('Order', ''),
325
+ 'family': row.get('Family', ''),
326
+ 'genus': row.get('Genus', ''),
327
+ 'species': row.get('Species', '')
328
+ }
329
+
330
+ # Read BLAST hits (top hits per query)
331
+ blast_tax = defaultdict(list)
332
+ with open("outputs/taxonomy/blast_hits.tsv") as f:
333
+ for line in f:
334
+ parts = line.strip().split('\t')
335
+ qid, sid, pident = parts[0], parts[1], float(parts[2])
336
+ if pident >= 97.0 and sid in tax_lookup:
337
+ blast_tax[qid].append(tax_lookup[sid])
338
+
339
+ # Read vsearch hits
340
+ vsearch_tax = defaultdict(list)
341
+ with open("outputs/taxonomy/vsearch_hits.tsv") as f:
342
+ for line in f:
343
+ parts = line.strip().split('\t')
344
+ qid, sid, pident = parts[0], parts[1], float(parts[2])
345
+ if pident >= 97.0 and sid in tax_lookup:
346
+ vsearch_tax[qid].append(tax_lookup[sid])
347
+
348
+ # LCA function
349
+ RANKS = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']
350
+ def lca(tax_list):
351
+ if not tax_list:
352
+ return {r: '' for r in RANKS}
353
+ result = {}
354
+ for rank in RANKS:
355
+ values = set(t[rank] for t in tax_list if t[rank])
356
+ if len(values) == 1:
357
+ result[rank] = values.pop()
358
+ else:
359
+ result[rank] = ''
360
+ break # stop at first disagreement
361
+ for rank in RANKS:
362
+ if rank not in result:
363
+ result[rank] = ''
364
+ return result
365
+
366
+ # Merge BLAST + vsearch via LCA
367
+ all_queries = set(list(blast_tax.keys()) + list(vsearch_tax.keys()))
368
+ with open("outputs/taxonomy/lca_taxonomy.tsv", 'w') as f:
369
+ f.write("asv_id\tsuperkingdom\tphylum\tclass\torder\tfamily\tgenus\tspecies\n")
370
+ for qid in sorted(all_queries):
371
+ combined = blast_tax.get(qid, []) + vsearch_tax.get(qid, [])
372
+ tax = lca(combined)
373
+ f.write(f"{qid}\t{tax['superkingdom']}\t{tax['phylum']}\t{tax['class']}\t{tax['order']}\t{tax['family']}\t{tax['genus']}\t{tax['species']}\n")
374
+
375
+ print(f"LCA taxonomy assigned to {len(all_queries)} ASVs")
376
+ PYEOF
377
+ fi
378
+
379
+ # ============================================================
380
+ # Level 9 continued: Three parallel community analyses
381
+ # ============================================================
382
+ echo "=== Level 9: Community analyses ==="
383
+
384
+ # Build OTU table (ASV x sample) by mapping reads back
385
+ if [ ! -f "${OUT}/community/otu_table.tsv" ]; then
386
+ # Map all filtered reads back to clean ASVs
387
+ vsearch --usearch_global "${OUT}/derep/all_pooled.fasta" \
388
+ --db "${OUT}/chimera/clean_asvs.fasta" \
389
+ --id 0.97 \
390
+ --otutabout "${OUT}/community/otu_table.tsv" \
391
+ --threads ${THREADS} \
392
+ > "${OUT}/community/map.log" 2>&1
393
+ echo " OTU table built"
394
+ fi
395
+
396
+ # Branch 9a: Species list
397
+ # Branch 9b: Alpha + beta diversity (R/vegan)
398
+ # Branch 9c: Detection probability
399
+ if [ ! -f "${OUT}/community/diversity_results.tsv" ]; then
400
+ python3 << 'PYEOF'
401
+ import csv
402
+ import math
403
+ from collections import defaultdict
404
+
405
+ # Load OTU table
406
+ otu_table = {} # {asv_id: {sample: count}}
407
+ samples = []
408
+ with open("outputs/community/otu_table.tsv") as f:
409
+ header = f.readline().strip().split('\t')
410
+ samples = header[1:] # first col is OTU ID
411
+ for line in f:
412
+ parts = line.strip().split('\t')
413
+ asv_id = parts[0]
414
+ counts = [int(x) for x in parts[1:]]
415
+ otu_table[asv_id] = dict(zip(samples, counts))
416
+
417
+ # Load taxonomy
418
+ taxonomy = {}
419
+ with open("outputs/taxonomy/lca_taxonomy.tsv") as f:
420
+ reader = csv.DictReader(f, delimiter='\t')
421
+ for row in reader:
422
+ taxonomy[row['asv_id']] = row
423
+
424
+ # === Branch 9a: Species list ===
425
+ species_set = set()
426
+ genus_set = set()
427
+ family_set = set()
428
+ order_set = set()
429
+ for asv_id, tax in taxonomy.items():
430
+ if tax['species']:
431
+ species_set.add(tax['species'])
432
+ if tax['genus']:
433
+ genus_set.add(tax['genus'])
434
+ if tax['family']:
435
+ family_set.add(tax['family'])
436
+ if tax['order']:
437
+ order_set.add(tax['order'])
438
+
439
+ with open("outputs/community/species_list.tsv", 'w') as f:
440
+ f.write("species\tgenus\tfamily\torder\n")
441
+ for sp in sorted(species_set):
442
+ # find matching taxonomy
443
+ for asv_id, tax in taxonomy.items():
444
+ if tax['species'] == sp:
445
+ f.write(f"{sp}\t{tax['genus']}\t{tax['family']}\t{tax['order']}\n")
446
+ break
447
+
448
+ # === Branch 9b: Alpha diversity (Shannon index per sample) ===
449
+ shannon_per_sample = {}
450
+ richness_per_sample = {}
451
+ for s in samples:
452
+ counts = [otu_table[asv][s] for asv in otu_table if otu_table[asv].get(s, 0) > 0]
453
+ total = sum(counts)
454
+ if total == 0:
455
+ shannon_per_sample[s] = 0.0
456
+ richness_per_sample[s] = 0
457
+ continue
458
+ richness_per_sample[s] = len(counts)
459
+ shannon = 0.0
460
+ for c in counts:
461
+ p = c / total
462
+ if p > 0:
463
+ shannon -= p * math.log(p)
464
+ shannon_per_sample[s] = round(shannon, 4)
465
+
466
+ with open("outputs/community/diversity_results.tsv", 'w') as f:
467
+ f.write("sample\tshannon_diversity\tspecies_richness\n")
468
+ for s in samples:
469
+ f.write(f"{s}\t{shannon_per_sample[s]}\t{richness_per_sample[s]}\n")
470
+
471
+ # === Branch 9c: Detection probability ===
472
+ # For each species, proportion of samples where detected
473
+ species_detection = defaultdict(int)
474
+ species_total_reads = defaultdict(int)
475
+ for asv_id in otu_table:
476
+ sp = taxonomy.get(asv_id, {}).get('species', '')
477
+ if not sp:
478
+ continue
479
+ for s in samples:
480
+ if otu_table[asv_id].get(s, 0) > 0:
481
+ species_detection[sp] += 1
482
+ species_total_reads[sp] += otu_table[asv_id][s]
483
+
484
+ with open("outputs/community/detection_probability.tsv", 'w') as f:
485
+ f.write("species\tsamples_detected\tdetection_rate\ttotal_reads\n")
486
+ for sp in sorted(species_detection.keys()):
487
+ det_rate = round(species_detection[sp] / len(samples), 4)
488
+ f.write(f"{sp}\t{species_detection[sp]}\t{det_rate}\t{species_total_reads[sp]}\n")
489
+
490
+ print(f"Species: {len(species_set)}, Genera: {len(genus_set)}, Families: {len(family_set)}, Orders: {len(order_set)}")
491
+ print(f"Shannon range: {min(shannon_per_sample.values()):.4f} - {max(shannon_per_sample.values()):.4f}")
492
+ PYEOF
493
+ echo " Python community analysis done"
494
+ fi
495
+
496
+ # R/vegan beta diversity
497
+ if [ ! -f "${OUT}/community/beta_diversity.tsv" ]; then
498
+ cat > "${OUT}/community/run_vegan.R" << 'REOF'
499
+ library(vegan)
500
+ otu <- read.delim("outputs/community/otu_table.tsv", row.names=1, check.names=FALSE)
501
+ otu_t <- t(otu)
502
+ bc <- as.matrix(vegdist(otu_t, method="bray"))
503
+ write.table(bc, "outputs/community/beta_diversity.tsv", sep="\t", quote=FALSE)
504
+ cat("Beta diversity (Bray-Curtis) range:", range(bc[lower.tri(bc)]), "\n")
505
+ cat("Mean Bray-Curtis:", mean(bc[lower.tri(bc)]), "\n")
506
+ REOF
507
+ Rscript "${OUT}/community/run_vegan.R"
508
+ echo " R/vegan beta diversity done"
509
+ fi
510
+
511
+ # ============================================================
512
+ # Level 10: CONVERGENCE 4 — Final report
513
+ # ============================================================
514
+ echo "=== Level 10: Final report ==="
515
+
516
+ python3 << 'PYEOF'
517
+ import csv
518
+ import math
519
+ import os
520
+ from collections import defaultdict
521
+
522
+ # Gather all metrics
523
+ metrics = {}
524
+
525
+ # --- Raw read counts ---
526
+ total_raw = 0
527
+ for s in ["DRR205394","DRR205395","DRR205396","DRR205397","DRR205398","DRR205399"]:
528
+ for r in ["R1", "R2"]:
529
+ fpath = f"outputs/qc/seqkit_stats.tsv"
530
+ break
531
+ break
532
+
533
+ # Count from seqkit stats
534
+ with open("outputs/qc/seqkit_stats.tsv") as f:
535
+ reader = csv.DictReader(f, delimiter='\t')
536
+ for row in reader:
537
+ total_raw += int(row['num_seqs'].replace(',', ''))
538
+ metrics['total_raw_reads'] = total_raw
539
+
540
+ # --- Merged read counts ---
541
+ total_merged = 0
542
+ for s in ["DRR205394","DRR205395","DRR205396","DRR205397","DRR205398","DRR205399"]:
543
+ logf = f"outputs/merged/{s}_merge.log"
544
+ if os.path.exists(logf):
545
+ with open(logf) as f:
546
+ for line in f:
547
+ if "Merged" in line and "pairs" in line:
548
+ # Parse vsearch merge log
549
+ parts = line.strip().split()
550
+ for i, p in enumerate(parts):
551
+ if p.isdigit() or p.replace(',','').isdigit():
552
+ total_merged += int(p.replace(',',''))
553
+ break
554
+ break
555
+
556
+ # Fallback: count merged reads directly
557
+ if total_merged == 0:
558
+ for s in ["DRR205394","DRR205395","DRR205396","DRR205397","DRR205398","DRR205399"]:
559
+ mf = f"outputs/merged/{s}.fastq"
560
+ if os.path.exists(mf):
561
+ count = sum(1 for line in open(mf)) // 4
562
+ total_merged += count
563
+ metrics['total_merged_reads'] = total_merged
564
+
565
+ # Merge rate
566
+ if total_raw > 0:
567
+ # total_raw is R1+R2, so pairs = total_raw / 2
568
+ metrics['merge_rate'] = round(total_merged / (total_raw / 2) * 100, 2)
569
+ else:
570
+ metrics['merge_rate'] = 0.0
571
+
572
+ # --- Unique sequences ---
573
+ derep_fasta = "outputs/derep/all_derep.fasta"
574
+ unique_count = sum(1 for line in open(derep_fasta) if line.startswith('>'))
575
+ metrics['unique_sequences'] = unique_count
576
+
577
+ # --- Clustering results ---
578
+ for method, fname in [("clusters_otu97", "outputs/clusters/otu97_centroids.fasta"),
579
+ ("clusters_swarm", "outputs/clusters/swarm_centroids.fasta"),
580
+ ("clusters_denoised", "outputs/clusters/unoise3_asvs.fasta")]:
581
+ count = sum(1 for line in open(fname) if line.startswith('>'))
582
+ metrics[method] = count
583
+
584
+ # --- Chimera removal ---
585
+ clean_fasta = "outputs/chimera/clean_asvs.fasta"
586
+ chimera_fasta = "outputs/chimera/chimeras.fasta"
587
+ metrics['clean_sequence_count'] = sum(1 for line in open(clean_fasta) if line.startswith('>'))
588
+ chimera_count = 0
589
+ if os.path.exists(chimera_fasta):
590
+ chimera_count = sum(1 for line in open(chimera_fasta) if line.startswith('>'))
591
+ metrics['chimera_count'] = chimera_count
592
+
593
+ # --- Taxonomy assignment ---
594
+ assigned = 0
595
+ total_asvs = 0
596
+ with open("outputs/taxonomy/lca_taxonomy.tsv") as f:
597
+ reader = csv.DictReader(f, delimiter='\t')
598
+ for row in reader:
599
+ total_asvs += 1
600
+ if row['species'] or row['genus'] or row['family']:
601
+ assigned += 1
602
+ metrics['assigned_sequences'] = assigned
603
+ metrics['unassigned_sequences'] = total_asvs - assigned
604
+
605
+ # --- Species/genus/family/order counts ---
606
+ species_set = set()
607
+ genus_set = set()
608
+ family_set = set()
609
+ order_set = set()
610
+ with open("outputs/taxonomy/lca_taxonomy.tsv") as f:
611
+ reader = csv.DictReader(f, delimiter='\t')
612
+ for row in reader:
613
+ if row['species']: species_set.add(row['species'])
614
+ if row['genus']: genus_set.add(row['genus'])
615
+ if row['family']: family_set.add(row['family'])
616
+ if row['order']: order_set.add(row['order'])
617
+ metrics['species_count'] = len(species_set)
618
+ metrics['genus_count'] = len(genus_set)
619
+ metrics['family_count'] = len(family_set)
620
+ metrics['order_count'] = len(order_set)
621
+
622
+ # --- Diversity ---
623
+ shannon_vals = []
624
+ richness_vals = []
625
+ with open("outputs/community/diversity_results.tsv") as f:
626
+ reader = csv.DictReader(f, delimiter='\t')
627
+ for row in reader:
628
+ shannon_vals.append(float(row['shannon_diversity']))
629
+ richness_vals.append(int(row['species_richness']))
630
+ metrics['mean_shannon_diversity'] = round(sum(shannon_vals)/len(shannon_vals), 4)
631
+ metrics['min_species_richness'] = min(richness_vals)
632
+ metrics['max_species_richness'] = max(richness_vals)
633
+
634
+ # --- Beta diversity ---
635
+ bc_vals = []
636
+ with open("outputs/community/beta_diversity.tsv") as f:
637
+ header = f.readline().strip().split('\t')
638
+ rows = []
639
+ for line in f:
640
+ parts = line.strip().split('\t')
641
+ rows.append([float(x) for x in parts[1:]])
642
+ for i in range(len(rows)):
643
+ for j in range(i+1, len(rows)):
644
+ bc_vals.append(rows[i][j])
645
+ if bc_vals:
646
+ metrics['mean_beta_diversity'] = round(sum(bc_vals)/len(bc_vals), 4)
647
+ metrics['min_beta_diversity'] = round(min(bc_vals), 4)
648
+ metrics['max_beta_diversity'] = round(max(bc_vals), 4)
649
+
650
+ # --- Detection rate ---
651
+ det_rates = []
652
+ with open("outputs/community/detection_probability.tsv") as f:
653
+ reader = csv.DictReader(f, delimiter='\t')
654
+ for row in reader:
655
+ det_rates.append(float(row['detection_rate']))
656
+ if det_rates:
657
+ metrics['mean_detection_rate'] = round(sum(det_rates)/len(det_rates), 4)
658
+
659
+ # === Write report ===
660
+ with open("results/report.csv", 'w') as f:
661
+ f.write("metric,value\n")
662
+ for k, v in metrics.items():
663
+ f.write(f"{k},{v}\n")
664
+
665
+ print("=== Report generated ===")
666
+ for k, v in metrics.items():
667
+ print(f" {k} = {v}")
668
+ PYEOF
669
+
670
+ echo "=== Pipeline complete ==="