hazemessam commited on
Commit
3442f56
·
verified ·
1 Parent(s): f57c17a

Upload upload.py

Browse files
Files changed (1) hide show
  1. upload.py +273 -0
upload.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from Bio import SeqIO
3
+ from tqdm.auto import tqdm
4
+ import pandas as pd
5
+ from huggingface_hub import HfApi
6
+
7
+
8
+ aav_files = [
9
+ "des_mut",
10
+ "low_vs_high",
11
+ "mut_des",
12
+ "one_vs_many",
13
+ "sampled",
14
+ "seven_vs_many",
15
+ "two_vs_many",
16
+ ]
17
+
18
+
19
+ meltome_files = [
20
+ "human",
21
+ "human_cell",
22
+ "mixed_split",
23
+ ]
24
+
25
+
26
+ sav_files = [
27
+ "human",
28
+ "only_savs",
29
+ "mixed",
30
+ ]
31
+
32
+
33
+ scl_files = [
34
+ "balanced",
35
+ "human_hard",
36
+ "human_soft",
37
+ "mixed_hard",
38
+ "mixed_soft",
39
+ ]
40
+
41
+
42
+ gb1_files = [
43
+ "low_vs_high",
44
+ "one_vs_rest",
45
+ "sampled",
46
+ "three_vs_rest",
47
+ "two_vs_rest",
48
+ ]
49
+
50
+
51
+ def download_wget(filename, repo):
52
+ if os.path.exists(f"{filename}.fasta"):
53
+ return
54
+ url = f"http://data.bioembeddings.com/public/FLIP/fasta/{repo}/{filename}.fasta"
55
+ os.system(f"wget {url}")
56
+
57
+
58
+ def upload_aav():
59
+ repo = "aav"
60
+ for filename in aav_files:
61
+ download_wget(filename, repo)
62
+ fasta_file = f"{filename}.fasta"
63
+ csv_file = f"{filename}.csv"
64
+ db = SeqIO.index(fasta_file, "fasta")
65
+ output = []
66
+
67
+ for _, record in tqdm(db.items()):
68
+ description = record.description
69
+ description = description.split()
70
+ sequence = str(record.seq)
71
+ seqid = description[0]
72
+ label = float(description[1].split("=")[1])
73
+ split = description[2].split("=")[1]
74
+ validation = description[3].split("=")[1]
75
+
76
+ output.append({
77
+ "seqid": seqid,
78
+ "label": label,
79
+ "sequence": sequence,
80
+ "split": split.lower(),
81
+ "validation": validation.lower(),
82
+ })
83
+ pd.DataFrame(output).to_csv(csv_file, index=False)
84
+
85
+ api = HfApi()
86
+ api.create_repo(
87
+ repo_id=f"hazemessam/{repo}",
88
+ repo_type="dataset",
89
+ exist_ok=True,
90
+ )
91
+ api.upload_file(
92
+ path_or_fileobj=csv_file,
93
+ path_in_repo=csv_file,
94
+ repo_id=f"hazemessam/{repo}",
95
+ repo_type="dataset",
96
+ )
97
+ os.system(f"rm -rf {fasta_file}")
98
+ os.system(f"rm -rf {csv_file}")
99
+
100
+
101
+ def upload_meltome():
102
+ repo = "meltome"
103
+ for filename in meltome_files:
104
+ download_wget(filename, repo)
105
+ fasta_file = f"{filename}.fasta"
106
+ csv_file = f"{filename}.csv"
107
+ db = SeqIO.index(fasta_file, "fasta")
108
+ output = []
109
+ for _, record in tqdm(db.items()):
110
+ description = record.description
111
+ description = description.split()
112
+ sequence = str(record.seq)
113
+ seqid = description[0]
114
+ label = float(description[1].split("=")[1])
115
+ split = description[2].split("=")[1]
116
+ validation = description[3].split("=")[1]
117
+
118
+ output.append({
119
+ "seqid": seqid,
120
+ "label": label,
121
+ "sequence": sequence,
122
+ "split": split.lower(),
123
+ "validation": validation.lower(),
124
+ })
125
+ pd.DataFrame(output).to_csv(csv_file, index=False)
126
+
127
+ api = HfApi()
128
+ api.create_repo(
129
+ repo_id=f"hazemessam/{repo}",
130
+ repo_type="dataset",
131
+ exist_ok=True,
132
+ )
133
+ api.upload_file(
134
+ path_or_fileobj=csv_file,
135
+ path_in_repo=csv_file,
136
+ repo_id=f"hazemessam/{repo}",
137
+ repo_type="dataset",
138
+ )
139
+ os.system(f"rm -rf {fasta_file}")
140
+ os.system(f"rm -rf {csv_file}")
141
+
142
+
143
+ def upload_sav():
144
+ repo = "sav"
145
+ for filename in sav_files:
146
+ download_wget(filename, repo)
147
+ fasta_file = f"{filename}.fasta"
148
+ csv_file = f"{filename}.csv"
149
+ db = SeqIO.index(fasta_file, "fasta")
150
+ output = []
151
+ for _, record in tqdm(db.items()):
152
+ description = record.description
153
+ description = description.split()
154
+ sequence = str(record.seq)
155
+ seqid = description[0]
156
+ # the label is a string in sav and scl datasets
157
+ label = description[1].split("=")[1]
158
+ split = description[2].split("=")[1]
159
+ validation = description[3].split("=")[1]
160
+
161
+ output.append({
162
+ "seqid": seqid,
163
+ "label": label,
164
+ "sequence": sequence,
165
+ "split": split.lower(),
166
+ "validation": validation.lower(),
167
+ })
168
+ pd.DataFrame(output).to_csv(csv_file, index=False)
169
+ api = HfApi()
170
+ api.create_repo(
171
+ repo_id=f"hazemessam/{repo}",
172
+ repo_type="dataset",
173
+ exist_ok=True,
174
+ )
175
+ api.upload_file(
176
+ path_or_fileobj=csv_file,
177
+ path_in_repo=csv_file,
178
+ repo_id=f"hazemessam/{repo}",
179
+ repo_type="dataset",
180
+ )
181
+ os.system(f"rm -rf {fasta_file}")
182
+ os.system(f"rm -rf {csv_file}")
183
+
184
+
185
+ def upload_scl():
186
+ repo = "scl"
187
+ for filename in scl_files:
188
+ download_wget(filename, repo)
189
+ fasta_file = f"{filename}.fasta"
190
+ csv_file = f"{filename}.csv"
191
+ db = SeqIO.index(fasta_file, "fasta")
192
+ output = []
193
+ for _, record in tqdm(db.items()):
194
+ description = record.description
195
+ description = description.split()
196
+ sequence = str(record.seq)
197
+ seqid = description[0]
198
+ # the label is a string in sav and scl datasets
199
+ label = description[1].split("=")[1]
200
+ split = description[2].split("=")[1]
201
+ validation = description[3].split("=")[1]
202
+
203
+ output.append({
204
+ "seqid": seqid,
205
+ "label": label,
206
+ "sequence": sequence,
207
+ "split": split.lower(),
208
+ "validation": validation.lower(),
209
+ })
210
+ pd.DataFrame(output).to_csv(csv_file, index=False)
211
+ api = HfApi()
212
+ api.create_repo(
213
+ repo_id=f"hazemessam/{repo}",
214
+ repo_type="dataset",
215
+ exist_ok=True,
216
+ )
217
+ api.upload_file(
218
+ path_or_fileobj=csv_file,
219
+ path_in_repo=csv_file,
220
+ repo_id=f"hazemessam/{repo}",
221
+ repo_type="dataset",
222
+ )
223
+ os.system(f"rm -rf {fasta_file}")
224
+ os.system(f"rm -rf {csv_file}")
225
+
226
+
227
+ def upload_gb1():
228
+ repo = "gb1"
229
+ for filename in gb1_files:
230
+ download_wget(filename, repo)
231
+ fasta_file = f"{filename}.fasta"
232
+ csv_file = f"{filename}.csv"
233
+ db = SeqIO.index(fasta_file, "fasta")
234
+ output = []
235
+ for _, record in tqdm(db.items()):
236
+ description = record.description
237
+ description = description.split()
238
+ sequence = str(record.seq)
239
+ seqid = description[0]
240
+ label = float(description[1].split("=")[1])
241
+ split = description[2].split("=")[1]
242
+ validation = description[3].split("=")[1]
243
+
244
+ output.append({
245
+ "seqid": seqid,
246
+ "label": label,
247
+ "sequence": sequence,
248
+ "split": split.lower(),
249
+ "validation": validation.lower(),
250
+ })
251
+ pd.DataFrame(output).to_csv(csv_file, index=False)
252
+ api = HfApi()
253
+ api.create_repo(
254
+ repo_id=f"hazemessam/{repo}",
255
+ repo_type="dataset",
256
+ exist_ok=True,
257
+ )
258
+ api.upload_file(
259
+ path_or_fileobj=csv_file,
260
+ path_in_repo=csv_file,
261
+ repo_id=f"hazemessam/{repo}",
262
+ repo_type="dataset",
263
+ )
264
+ os.system(f"rm -rf {fasta_file}")
265
+ os.system(f"rm -rf {csv_file}")
266
+
267
+
268
+ if __name__ == "__main__":
269
+ upload_gb1()
270
+ upload_meltome()
271
+ upload_sav()
272
+ upload_scl()
273
+ upload_aav()