jheuschkel commited on
Commit
3385332
·
verified ·
1 Parent(s): 4fb1ea3

Upload 9 files

Browse files
__init__.py ADDED
File without changes
clean_split_sequence.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def clean_split_sequence(seq):
2
+ seq = seq.upper()
3
+ seq = seq.replace('U', 'T')
4
+
5
+ for base in seq:
6
+ if base not in {'A', 'T', 'G', 'C'}:
7
+ raise ValueError(f"Invalid character '{base}' found in sequence. Only A, T, G, C, and U are allowed.")
8
+
9
+ spaced_seq = " ".join([seq[i:i+3] for i in range(0, len(seq), 3)])
10
+
11
+ return spaced_seq
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SynCodonLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "hidden_act": "gelu_new",
7
+ "hidden_dropout_prob": 0.1,
8
+ "hidden_size": 768,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 3072,
11
+ "layer_norm_eps": 1e-07,
12
+ "legacy": true,
13
+ "max_position_embeddings": 1024,
14
+ "max_relative_positions": -1,
15
+ "model_type": "deberta-v2",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 12,
18
+ "pad_token_id": 0,
19
+ "pooler_dropout": 0,
20
+ "pooler_hidden_act": "gelu",
21
+ "pooler_hidden_size": 768,
22
+ "pos_att_type": [
23
+ "p2c",
24
+ "c2p"
25
+ ],
26
+ "position_biased_input": true,
27
+ "relative_attention": true,
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.48.3",
30
+ "type_vocab_size": 501,
31
+ "vocab_size": 69
32
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f2462757de28e7dc0881e013e2a6d0aa4fe75ce005133f1a9d67fb011904fc2
3
+ size 410508700
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "[SEP]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "mask_token": {
17
+ "content": "[MASK]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "pad_token": {
24
+ "content": "[PAD]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
species_token_type.py ADDED
The diff for this file is too large to render. See raw diff
 
synonomous_codons.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ synonymous_codons = {
2
+ "A": ["GCT", "GCC", "GCA", "GCG"],
3
+ "C": ["TGT", "TGC"],
4
+ "D": ["GAT", "GAC"],
5
+ "E": ["GAA", "GAG"],
6
+ "F": ["TTT", "TTC"],
7
+ "G": ["GGT", "GGC", "GGA", "GGG"],
8
+ "H": ["CAT", "CAC"],
9
+ "I": ["ATT", "ATC", "ATA"],
10
+ "K": ["AAA", "AAG"],
11
+ "L": ["TTA", "TTG", "CTT", "CTC", "CTA", "CTG"],
12
+ "M": ["ATG"],
13
+ "N": ["AAT", "AAC"],
14
+ "P": ["CCT", "CCC", "CCA", "CCG"],
15
+ "Q": ["CAA", "CAG"],
16
+ "R": ["CGT", "CGC", "CGA", "CGG", "AGA", "AGG"],
17
+ "S": ["TCT", "TCC", "TCA", "TCG", "AGT", "AGC"],
18
+ "T": ["ACT", "ACC", "ACA", "ACG"],
19
+ "V": ["GTT", "GTC", "GTA", "GTG"],
20
+ "W": ["TGG"],
21
+ "Y": ["TAT", "TAC"],
22
+ "*": ["TAA", "TAG", "TGA"]
23
+ }
tokenizer.json ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "[PAD]",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "[CLS]",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "[SEP]",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "[UNK]",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ },
42
+ {
43
+ "id": 4,
44
+ "content": "[MASK]",
45
+ "single_word": false,
46
+ "lstrip": false,
47
+ "rstrip": false,
48
+ "normalized": false,
49
+ "special": true
50
+ }
51
+ ],
52
+ "normalizer": null,
53
+ "pre_tokenizer": {
54
+ "type": "Whitespace"
55
+ },
56
+ "post_processor": {
57
+ "type": "TemplateProcessing",
58
+ "single": [
59
+ {
60
+ "SpecialToken": {
61
+ "id": "[CLS]",
62
+ "type_id": 500
63
+ }
64
+ },
65
+ {
66
+ "Sequence": {
67
+ "id": "A",
68
+ "type_id": 500
69
+ }
70
+ },
71
+ {
72
+ "SpecialToken": {
73
+ "id": "[SEP]",
74
+ "type_id": 500
75
+ }
76
+ }
77
+ ],
78
+ "pair": [
79
+ {
80
+ "SpecialToken": {
81
+ "id": "[CLS]",
82
+ "type_id": 500
83
+ }
84
+ },
85
+ {
86
+ "Sequence": {
87
+ "id": "A",
88
+ "type_id": 500
89
+ }
90
+ },
91
+ {
92
+ "SpecialToken": {
93
+ "id": "[SEP]",
94
+ "type_id": 500
95
+ }
96
+ },
97
+ {
98
+ "Sequence": {
99
+ "id": "B",
100
+ "type_id": 500
101
+ }
102
+ },
103
+ {
104
+ "SpecialToken": {
105
+ "id": "[SEP]",
106
+ "type_id": 500
107
+ }
108
+ }
109
+ ],
110
+ "special_tokens": {
111
+ "[CLS]": {
112
+ "id": "[CLS]",
113
+ "ids": [1],
114
+ "tokens": ["[CLS]"]
115
+ },
116
+ "[SEP]": {
117
+ "id": "[SEP]",
118
+ "ids": [2],
119
+ "tokens": ["[SEP]"]
120
+ }
121
+ }
122
+ },
123
+ "decoder": null,
124
+ "model": {
125
+ "type": "WordLevel",
126
+ "vocab": {
127
+ "[PAD]": 0,
128
+ "[CLS]": 1,
129
+ "[SEP]": 2,
130
+ "[UNK]": 3,
131
+ "[MASK]": 4,
132
+ "GCT": 5,
133
+ "GCC": 6,
134
+ "GCA": 7,
135
+ "GCG": 8,
136
+ "TGT": 9,
137
+ "TGC": 10,
138
+ "GAT": 11,
139
+ "GAC": 12,
140
+ "GAA": 13,
141
+ "GAG": 14,
142
+ "TTT": 15,
143
+ "TTC": 16,
144
+ "GGT": 17,
145
+ "GGC": 18,
146
+ "GGA": 19,
147
+ "GGG": 20,
148
+ "CAT": 21,
149
+ "CAC": 22,
150
+ "ATT": 23,
151
+ "ATC": 24,
152
+ "ATA": 25,
153
+ "AAA": 26,
154
+ "AAG": 27,
155
+ "TTA": 28,
156
+ "TTG": 29,
157
+ "CTT": 30,
158
+ "CTC": 31,
159
+ "CTA": 32,
160
+ "CTG": 33,
161
+ "ATG": 34,
162
+ "AAT": 35,
163
+ "AAC": 36,
164
+ "CCT": 37,
165
+ "CCC": 38,
166
+ "CCA": 39,
167
+ "CCG": 40,
168
+ "CAA": 41,
169
+ "CAG": 42,
170
+ "CGT": 43,
171
+ "CGC": 44,
172
+ "CGA": 45,
173
+ "CGG": 46,
174
+ "AGA": 47,
175
+ "AGG": 48,
176
+ "TCT": 49,
177
+ "TCC": 50,
178
+ "TCA": 51,
179
+ "TCG": 52,
180
+ "AGT": 53,
181
+ "AGC": 54,
182
+ "ACT": 55,
183
+ "ACC": 56,
184
+ "ACA": 57,
185
+ "ACG": 58,
186
+ "GTT": 59,
187
+ "GTC": 60,
188
+ "GTA": 61,
189
+ "GTG": 62,
190
+ "TGG": 63,
191
+ "TAT": 64,
192
+ "TAC": 65,
193
+ "TAA": 66,
194
+ "TAG": 67,
195
+ "TGA": 68
196
+ },
197
+ "unk_token": "[UNK]"
198
+ }
199
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[CLS]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "[CLS]",
45
+ "clean_up_tokenization_spaces": false,
46
+ "eos_token": "[SEP]",
47
+ "extra_special_tokens": {},
48
+ "mask_token": "[MASK]",
49
+ "max_length": 1024,
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "pad_to_multiple_of": null,
52
+ "pad_token": "[PAD]",
53
+ "pad_token_type_id": 500,
54
+ "padding_side": "right",
55
+ "stride": 0,
56
+ "tokenizer_class": "PreTrainedTokenizerFast",
57
+ "truncation_side": "right",
58
+ "truncation_strategy": "longest_first",
59
+ "unk_token": "[UNK]"
60
+ }