remove urls.txt and update dataloading script
Browse files- multi_species_genomes.py +10 -9
- urls.txt +0 -0
multi_species_genomes.py
CHANGED
|
@@ -19,6 +19,7 @@ from typing import List
|
|
| 19 |
import datasets
|
| 20 |
import pandas as pd
|
| 21 |
from Bio import SeqIO
|
|
|
|
| 22 |
|
| 23 |
|
| 24 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
|
@@ -128,17 +129,17 @@ class MultiSpeciesGenomes(datasets.GeneratorBasedBuilder):
|
|
| 128 |
|
| 129 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
| 130 |
|
| 131 |
-
|
| 132 |
-
with open(
|
| 133 |
-
|
| 134 |
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
|
| 139 |
-
train_downloaded_files = dl_manager.download_and_extract(
|
| 140 |
-
test_downloaded_files = dl_manager.download_and_extract(
|
| 141 |
-
validation_downloaded_files = dl_manager.download_and_extract(
|
| 142 |
|
| 143 |
return [
|
| 144 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": train_downloaded_files, "chunk_length": self.config.chunk_length}),
|
|
|
|
| 19 |
import datasets
|
| 20 |
import pandas as pd
|
| 21 |
from Bio import SeqIO
|
| 22 |
+
import os
|
| 23 |
|
| 24 |
|
| 25 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
|
|
|
| 129 |
|
| 130 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
| 131 |
|
| 132 |
+
genome_file_paths = dl_manager.download_and_extract('genomes.txt')
|
| 133 |
+
with open(genome_file_paths) as genome_files:
|
| 134 |
+
genomes = [os.path.join('genomes',line.rstrip()) for line in genome_files]
|
| 135 |
|
| 136 |
+
test_genomes = genomes[-50:] # 50 genomes for test set
|
| 137 |
+
validation_genomes = genomes[-100:-50] # 50 genomes for validation set
|
| 138 |
+
train_genomes = genomes[:-100] # 800 genomes for training
|
| 139 |
|
| 140 |
+
train_downloaded_files = dl_manager.download_and_extract(train_genomes)
|
| 141 |
+
test_downloaded_files = dl_manager.download_and_extract(test_genomes)
|
| 142 |
+
validation_downloaded_files = dl_manager.download_and_extract(validation_genomes)
|
| 143 |
|
| 144 |
return [
|
| 145 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": train_downloaded_files, "chunk_length": self.config.chunk_length}),
|
urls.txt
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|