File size: 4,590 Bytes
2560dd0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
import os
import random
from os.path import join
from collections import Counter
import numpy as np
import pysam
from tokenizers import Tokenizer
from tokenizers.models import BPE
from tokenizers.trainers import BpeTrainer
from tokenizers.pre_tokenizers import PreTokenizer
from tokenizers.pre_tokenizers import ByteLevel
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.pre_tokenizers import CharDelimiterSplit
from tokenizers.normalizers import Sequence, Lowercase
from tokenizers import models, pre_tokenizers, decoders
from tokenizers.pre_tokenizers import Split


def writetsv(data, label, savefile):
    with open(savefile, 'w') as f:
        f.write('sequence\tlabels\n')
        for seq, lab in zip(data, label):
            f.write(f'{seq}\t{lab}\n')


def nonoverlap_split(tokens, maxlen, tolerance=0.5):
    seqs = []
    skipped = 0

    num_windows = len(tokens) // maxlen

    for i in range(num_windows):
        window = tokens[i*maxlen:(i+1)*maxlen]

        # NEW: token-aware N detection
        num_N = sum('N' in tok for tok in window)

        if num_N / maxlen < tolerance:
            seqs.append(" ".join(window))
        else:
            skipped += 1

    print(f"In this chromosome, skipped sequences: {skipped}")
    return seqs


def tokenize_full_sequence_collect(tokenizer, sequence, chunk_size=1_000_000):
    raw_tokens = []

    for i in range(0, len(sequence), chunk_size):
        chunk = sequence[i:i + chunk_size]
        encoded = tokenizer.encode(chunk)
        raw_tokens.extend(encoded.tokens)

        if i % (10 * chunk_size) == 0:
            print(f"Processed {i:,} bp")

    return raw_tokens

maxlen = 512
tolerance = 0.5
CHUNK_SIZE = 1_000_000
fasta_path = '/home/n5huang/dna_token/hg38.fa'
args_token_path = '/home/n5huang/dna_token/output_tokens'
os.makedirs(args_token_path, exist_ok=True)

# Set to a list like ["chr1", "chr2"] to limit processing.
CHROMOSOMES = None
EXCLUDE_CHROMS = set()



# --- 2. LOAD YOUR TOKENIZERS ---
VOCAB_PATHS = {
    "cCRE_region_BPE": "/home/n5huang/dna_token/tokenizer_files/cCRE_region_BPE_tokenizer.json",
    "motif_region_BPE": "/home/n5huang/dna_token/tokenizer_files/motif_region_BPE_tokenizer.json",
}

tokenizers = {}
for name, path in VOCAB_PATHS.items():
    tokenizers[name] = Tokenizer.from_file(path)


with pysam.FastaFile(fasta_path) as genome:
    chroms = genome.references if CHROMOSOMES is None else CHROMOSOMES
    chroms = [c for c in chroms if c not in EXCLUDE_CHROMS]

    for tok_name, tok in tokenizers.items():
        print(tok.pre_tokenizer)
        print(tok.model)

        print(f"\n=== Processing tokenizer: {tok_name} ===")

        all_seqs = []
        all_labels = []

        for chrm in chroms:
            full_sequence = genome.fetch(reference=chrm)

            print(f"\nChromosome: {chrm}")
            print(f"Total length: {len(full_sequence):,} bases")
            print(f"First 100 bases:\n{full_sequence[:100]}")

            # 1. Tokenize full chromosome
            raw_tokens = tokenize_full_sequence_collect(
                tok,
                full_sequence,
                chunk_size=CHUNK_SIZE
            )
            print(f"Total raw tokens: {len(raw_tokens):,}")

            # 2. Build sequences
            final_seqs = nonoverlap_split(
                tokens=raw_tokens,
                maxlen=maxlen,
                tolerance=tolerance
            )

            print(f"Total sequences for pretrain: {len(final_seqs):,}")

            all_seqs.extend(final_seqs)
            all_labels.extend([chrm] * len(final_seqs))

        if not all_seqs:
            print(f"No sequences generated for tokenizer: {tok_name}")
            continue

        # 3. Shuffle
        combined = list(zip(all_seqs, all_labels))
        random.seed(42)
        random.shuffle(combined)
        shuffle_data, shuffle_labels = zip(*combined)

        # 4. Train / Val split
        train_num = int(0.9 * len(shuffle_data))

        train_data = shuffle_data[:train_num]
        train_labels = shuffle_labels[:train_num]
        val_data = shuffle_data[train_num:]
        val_labels = shuffle_labels[train_num:]

        # 5. Save TSVs
        train_path = join(
            args_token_path,
            f"{tok_name}_allchr_all_tokenized_train.tsv"
        )
        val_path = join(
            args_token_path,
            f"{tok_name}_allchr_all_tokenized_val.tsv"
        )

        writetsv(train_data, train_labels, train_path)
        writetsv(val_data, val_labels, val_path)

        print(f"Saved:\n  {train_path}\n  {val_path}")