File size: 1,443 Bytes
b5159c0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import pandas as pd
from Bio import SeqIO

def run_full_split_workflow(tsv_path, fasta_path, train_pct=0.6, val_pct=0.2, test_pct=0.2):
    df = pd.read_csv(tsv_path, sep='\t', names=['rep', 'member'])
    cluster_groups = df.groupby('rep')['member'].apply(list).to_dict()
    sorted_reps = sorted(cluster_groups.keys(), key=lambda x: len(cluster_groups[x]), reverse=True)

    total_seqs = len(df)
    targets = {'train': total_seqs * train_pct, 'val': total_seqs * val_pct, 'test': total_seqs * test_pct}

    split_ids = {'train': set(), 'val': set(), 'test': set()}
    counts = {'train': 0, 'val': 0, 'test': 0}

    for rep in sorted_reps:
        members = cluster_groups[rep]
        deficit = {k: targets[k] - counts[k] for k in split_ids.keys()}
        best_fit = max(deficit, key=deficit.get)

        split_ids[best_fit].update(members)
        counts[best_fit] += len(members)

    files = {k: open(f"{k}.fasta", "w") for k in split_ids.keys()}

    written_counts = {k: 0 for k in split_ids.keys()}

    for record in SeqIO.parse(fasta_path, "fasta"):
        for split_name, id_set in split_ids.items():
            if record.id in id_set:
                SeqIO.write(record, files[split_name], "fasta")
                written_counts[split_name] += 1
                break 

    for f in files.values():
        f.close()

mmseqs_tsv = "iiab_db_cluster.tsv"
fasta = "iiab_db.fasta"

run_full_split_workflow(mmseqs_tsv, fasta)