| | import os |
| | import json |
| | import re |
| | import pandas as pd |
| | import datasets |
| | from functools import cached_property |
| |
|
| | logger = datasets.logging.get_logger(__name__) |
| |
|
| |
|
| | _CITATION = """\ |
| | @article{jolma2010multiplexed, |
| | title={Multiplexed massively parallel SELEX for characterization of human transcription factor binding specificities}, |
| | author={Jolma, Arttu and Kivioja, Teemu and Toivonen, Jarkko and Cheng, Lu and Wei, Gonghong and Enge, Martin and \ |
| | Taipale, Mikko and Vaquerizas, Juan M and Yan, Jian and Sillanp{\"a}{\"a}, Mikko J and others}, |
| | journal={Genome research}, |
| | volume={20}, |
| | number={6}, |
| | pages={861--873}, |
| | year={2010}, |
| | publisher={Cold Spring Harbor Lab} |
| | } |
| | """ |
| |
|
| | _DESCRIPTION = """\ |
| | PRJEB3289 |
| | https://www.ebi.ac.uk/ena/browser/view/PRJEB3289 |
| | Data that has been generated by HT-SELEX experiments (see Jolma et al. 2010. PMID: 20378718 for description of method) \ |
| | that has been now used to generate transcription factor binding specificity models for most of the high confidence \ |
| | human transcription factors. Sequence data is composed of reads generated with Illumina Genome Analyzer IIX and \ |
| | HiSeq2000 instruments. Samples are composed of single read sequencing of synthetic DNA fragments with a fixed length \ |
| | randomized region or samples derived from such a initial library by selection with a sequence specific DNA binding \ |
| | protein. Originally multiple samples with different "barcode" tag sequences were run on the same Illumina sequencing \ |
| | lane but the released files have been already de-multiplexed, and the constant regions and "barcodes" of each sequence \ |
| | have been cut out of the sequencing reads to facilitate the use of data. Some of the files are composed of reads from \ |
| | multiple different sequencing lanes and due to this each of the names of the individual reads have been edited to show \ |
| | the flowcell and lane that was used to generate it. Barcodes and oligonucleotide designs are indicated in the names of \ |
| | individual entries. Depending of the selection ligand design, the sequences in each of these fastq-files are either \ |
| | 14, 20, 30 or 40 bases long and had different flanking regions in both sides of the sequence. Each run entry is named \ |
| | in either of the following ways: Example 1) "BCL6B_DBD_AC_TGCGGG20NGA_1", where name is composed of following fields \ |
| | ProteinName_CloneType_Batch_BarcodeDesign_SelectionCycle. This experiment used barcode ligand TGCGGG20NGA, where both \ |
| | of the variable flanking constant regions are indicated as they were on the original sequence-reads. This ligand has \ |
| | been selected for one round of HT-SELEX using recombinant protein that contained the DNA binding domain of \ |
| | human transcription factor BCL6B. It also tells that the experiment was performed on batch of experiments named as "AC".\ |
| | Example 2) 0_TGCGGG20NGA_0 where name is composed of (zero)_BarcodeDesign_(zero) These sequences have been generated \ |
| | from sequencing of the initial non-selected pool. Same initial pools have been used in multiple experiments that were \ |
| | on different batches, thus for example this background sequence pool is the shared background for all of the following \ |
| | samples. BCL6B_DBD_AC_TGCGGG20NGA_1, ZNF784_full_AE_TGCGGG20NGA_3, DLX6_DBD_Y_TGCGGG20NGA_4 and MSX2_DBD_W_TGCGGG20NGA_2 |
| | """ |
| |
|
| | _URL = "ftp://ftp.sra.ebi.ac.uk/vol1/run/" |
| | "ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR173/ERR173154/CTCF_full_AJ_TAGCGA20NGCT_1.fastq.gz" |
| | |
| | |
| | |
| |
|
| | _DOWNLODE_MANAGER = datasets.DownloadManager() |
| | _RESOURCE_URL = "https://huggingface.co/datasets/thewall/DeepBindWeight/resolve/main" |
| | SELEX_INFO_FILE = _DOWNLODE_MANAGER.download(f"{_RESOURCE_URL}/ERP001824-deepbind.xlsx") |
| |
|
| | pattern = re.compile("(\d+)") |
| |
|
| |
|
| | class JolmaConfig(datasets.BuilderConfig): |
| | def __init__(self, length_match=True, filter_N=True, **kwargs): |
| | super(JolmaConfig, self).__init__(**kwargs) |
| | self.length_match = length_match |
| | self.filter_N = filter_N |
| |
|
| |
|
| | class Jolma(datasets.GeneratorBasedBuilder): |
| |
|
| | SELEX_INFO = pd.read_excel(SELEX_INFO_FILE, index_col=0) |
| |
|
| | BUILDER_CONFIGS = [ |
| | JolmaConfig(name=index) for index in SELEX_INFO.index |
| | ] |
| |
|
| | DEFAULT_CONFIG_NAME = "ERR173157" |
| |
|
| | def _info(self): |
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=datasets.Features( |
| | { |
| | "id": datasets.Value("int32"), |
| | "identifier": datasets.Value("string"), |
| | "seq": datasets.Value("string"), |
| | "quality": datasets.Value("string"), |
| | } |
| | ), |
| | homepage="https://www.ebi.ac.uk/ena/browser/view/PRJEB3289", |
| | citation=_CITATION, |
| | ) |
| |
|
| | @cached_property |
| | def selex_info(self): |
| | return self.SELEX_INFO.loc[self.config.name] |
| |
|
| | @cached_property |
| | def design_length(self): |
| | selex_info = self.selex_info |
| | return int(pattern.search(selex_info["Ligand"]).group(0)) |
| |
|
| | def _split_generators(self, dl_manager): |
| | downloaded_files = None |
| | sra_id = self.config.name |
| | selex_info = self.selex_info |
| | file = selex_info['file'] |
| |
|
| | if getattr(self.config, "data_dir") is not None: |
| | path = os.path.join(self.config.data_dir, sra_id, file) |
| | downloaded_files = dl_manager.extract(path) |
| | logger.info(f"Load from {downloaded_files}") |
| | if downloaded_files is None or not os.path.exists(downloaded_files): |
| | downloaded_url = "/".join([_URL, sra_id[:6], sra_id, file]) |
| | logger.info(f"Download from {downloaded_url}") |
| | downloaded_files = dl_manager.download_and_extract(downloaded_url) |
| | return [ |
| | datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files}), |
| | ] |
| |
|
| | def _generate_examples(self, filepath): |
| | """This function returns the examples in the raw (text) form.""" |
| | logger.info("generating examples from = %s", filepath) |
| | key = 0 |
| | with open(filepath, encoding="utf-8") as f: |
| | ans = {"id": key} |
| | for i, line in enumerate(f): |
| | if line.startswith("@") and i%4==0: |
| | ans["identifier"] = line[1:].strip() |
| | elif i%4==1: |
| | ans["seq"] = line.strip() |
| | elif i%4==3: |
| | ans["quality"] = line.strip() |
| | if self.filter_fn(ans): |
| | yield key, ans |
| | key += 1 |
| | ans = {"id": key} |
| |
|
| | def filter_fn(self, example): |
| | seq = example["seq"] |
| | if self.config.length_match and len(seq)!=self.design_length: |
| | return False |
| | if self.config.filter_N and "N" in seq: |
| | return False |
| | return True |
| |
|
| |
|
| | if __name__=="__main__": |
| | from datasets import load_dataset |
| | dataset = load_dataset("jolma.py", name="ERR173157", split="all") |
| | |
| |
|