--- datasets: - multimolecule/gencode language: rna library_name: multimolecule license: agpl-3.0 mask_token: pipeline_tag: fill-mask tags: - Biology - RNA - 3' UTR widget: - example_title: Human GPI protein p137 mask_index: 17 mask_index_1based: 18 masked_char: U output: - label: GAC score: 0.998416 - label: AAC score: 0.000336 - label: GAA score: 0.000305 - label: UAC score: 7.9e-05 - label: GAG score: 6.6e-05 pipeline_tag: fill-mask sequence_type: 3UTR source_tag: 3' UTR task: fill-mask text: UUUUUAAAAGGAAAAGAACCAAAUGCCUGCUGCUACCACCCUUUUCAAUUGCUAUGUUU - example_title: microRNA 21 mask_index: 13 mask_index_1based: 14 masked_char: U output: - label: AGA score: 0.766467 - label: ACA score: 0.217402 - label: UGA score: 0.003536 - label: AAA score: 0.001181 - label: AUA score: 0.000865 pipeline_tag: fill-mask sequence_type: ncRNA source_tag: RNA task: fill-mask text: UAGCUUAUCAGACGAUGUUGA - example_title: microRNA 146a mask_index: 11 mask_index_1based: 12 masked_char: U output: - label: AUC score: 0.720922 - label: AAC score: 0.241986 - label: AAA score: 0.00924 - label: UUC score: 0.009112 - label: ACC score: 0.004808 pipeline_tag: fill-mask sequence_type: ncRNA source_tag: RNA task: fill-mask text: UGAGAACUGAAUCCAUGGGUU - example_title: microRNA 155 mask_index: 10 mask_index_1based: 11 masked_char: U output: - label: AAG score: 0.786774 - label: ACG score: 0.203443 - label: AUG score: 0.00293 - label: AAA score: 0.002028 - label: AAU score: 0.001918 pipeline_tag: fill-mask sequence_type: ncRNA source_tag: RNA task: fill-mask text: UUAAUGCUAACGUGAUAGGGGUU - example_title: metastasis associated lung adenocarcinoma transcript 1 mask_index: 27 mask_index_1based: 28 masked_char: U output: - label: GCC score: 0.795005 - label: GUC score: 0.185118 - label: GGC score: 0.011697 - label: CUC score: 0.001336 - label: GAC score: 0.001316 pipeline_tag: fill-mask sequence_type: ncRNA source_tag: RNA task: fill-mask text: AGGCAUUGAGGCAGCCAGCGCAGGGGCUCUGCUGAGGGGGCAGGCGGAGCUUGAGGAAA - example_title: Pvt1 oncogene mask_index: 10 mask_index_1based: 11 masked_char: U output: - label: CCC score: 0.999723 - label: GCC score: 9.9e-05 - label: UCC score: 4.5e-05 - label: CGC score: 4.0e-05 - label: CAC score: 1.7e-05 pipeline_tag: fill-mask sequence_type: ncRNA source_tag: RNA task: fill-mask text: CCCGCGCUCCCCGGGCAGAGCGCGUGUGGCGGCCGAGCACAUGGGCCCGCGGGCCGGGC - example_title: telomerase RNA component mask_index: 13 mask_index_1based: 14 masked_char: U output: - label: GGG score: 0.999875 - label: GGU score: 4.8e-05 - label: GGA score: 2.5e-05 - label: GUG score: 1.7e-05 - label: GAG score: 8.0e-06 pipeline_tag: fill-mask sequence_type: ncRNA source_tag: RNA task: fill-mask text: GGGUUGCGGAGGGGGGCCUGGGAGGGGUGGUGGCCAUUUUUUGUCUAACCCUAACUGAG - example_title: vault RNA 2-1 mask_index: 10 mask_index_1based: 11 masked_char: U output: - label: AGA score: 0.756469 - label: AUA score: 0.132066 - label: GUA score: 0.005061 - label: AGG score: 0.005042 - label: ACA score: 0.004648 pipeline_tag: fill-mask sequence_type: ncRNA source_tag: RNA task: fill-mask text: CGGGUCGGAGUAGCUCAAGCGGUUACCUCCUCAUGCCGGACUUUCUAUCUGUCCAUCUCUGUGCUGGGGUUCGAGACCCGCGGGUGCUUACUGACCCUUUUAUGCAA - example_title: brain cytoplasmic RNA 1 mask_index: 12 mask_index_1based: 13 masked_char: U output: - label: GGG score: 0.997855 - label: GGU score: 0.000292 - label: UGG score: 0.000212 - label: AGG score: 8.4e-05 - label: CGG score: 7.6e-05 pipeline_tag: fill-mask sequence_type: ncRNA source_tag: RNA task: fill-mask text: GGCCGGGCGCGGGGCUCACGCCUGUAAUCCCAGCUCUCAGGGAGGCUAAGAGGCGGGAGGAUAGCUUGAGCCCAGGAGUUCGAGACCUGCCUGGGCAAUAUAGCGAGACCCCGUUCUCCAGAAAAAGGAAAAAAAAAAACAAAAGACAAAAAAAAAAUAAGCGUAACUUCCCUCAAAGCAACAACCCCCCCCCCCCUUU - example_title: HIV-1 TAR-WT mask_index: 11 mask_index_1based: 12 masked_char: U output: - label: GGA score: 0.898416 - label: GUA score: 0.078891 - label: GGG score: 0.006394 - label: GGU score: 0.004711 - label: GGC score: 0.002485 pipeline_tag: fill-mask sequence_type: ncRNA source_tag: RNA task: fill-mask text: GGUCUCUCUGGUAGACCAGAUCUGAGCCUGGGAGCUCUCUGGCUAACUAGGGAACC - example_title: prion protein (Kanno blood group) mask_index: 10 mask_index_1based: 11 masked_char: U output: - label: CCG score: 0.643719 - label: CUG score: 0.343724 - label: GUG score: 0.002816 - label: UUG score: 0.001847 - label: AUG score: 0.001343 pipeline_tag: fill-mask sequence_type: mRNA source_tag: RNA task: fill-mask text: AUGGCGAACCUGGCUGCUGGAUGCUGGUUCUCUUUGUGGCCACAUGGAGUGACCUGGGCCUCUGC - example_title: interleukin 10 mask_index: 16 mask_index_1based: 17 masked_char: U output: - label: AGC score: 0.544763 - label: ACC score: 0.437037 - label: UGC score: 0.003539 - label: AUC score: 0.002685 - label: GGC score: 0.000976 pipeline_tag: fill-mask sequence_type: mRNA source_tag: RNA task: fill-mask text: AUGCACAGCUCAGCACGCUCUGUUGCCUGGUCCUCCUGACUGGGGUGAGGGCC - example_title: Zaire ebolavirus mask_index: 13 mask_index_1based: 14 masked_char: U output: - label: ACU score: 0.49825 - label: AUU score: 0.405071 - label: AGU score: 0.00522 - label: UUU score: 0.004056 - label: NGA score: 0.003007 pipeline_tag: fill-mask sequence_type: mRNA source_tag: RNA task: fill-mask text: AAUGUUCAAACACUUGUGAAGCUCUGUUAGCUGAUGGUCUUGCUAAAGCAUUUCCUAGCAAUAUGAUGGUAGUCACAGAGCGUGAGCAAAAAGAAAGCUUAUUGCAUCAAGCAUCAUGGCACCACACAAGUGAUGAUUUUGGUGAGCAUGCCACAGUUAGAGGGAGUAGCUUUGUAACUGAUUUAGAGAAAUACAAUCUUGCAUUUAGAUAUGAGUUUACAGCACCUUUUAUAGAAUAUUGUAACCGUUGCUAUGGUGUUAAGAAUGUUUUUAAUUGGAUGCAUUAUACAAUCCCACAGUGUUAU - example_title: SARS coronavirus mask_index: 10 mask_index_1based: 11 masked_char: U output: - label: UUU score: 0.434485 - label: UCU score: 0.187827 - label: NCG score: 0.017024 - label: AGN score: 0.011693 - label: UUN score: 0.011642 pipeline_tag: fill-mask sequence_type: mRNA source_tag: RNA task: fill-mask text: AUGUUUAUUUCUUAUUAUUUCUUACUCUCACUAGUGGUAGUGACCUUGACCGGUGCACCACUUUUGAUGAUGUUCAAGCUCCUAAUUACACUCAACAUACUUCAUCUAUGAGGGGGGUUUACUAUCCUGAUGAAAUUUUUAGAUCAGACACUCUUUAUUUAACUCAGGAUUUAUUUCUUCCAUUUUAUUCUAAUGUUACAGGGUUUCAUACUAUUAAUCAUACGUUUGACAACCCUGUCAUACCUUUUAAGGAUGGUAUUUAUUUUGCUGCCACAGAGAAAUCAAAUGUUGUCCGUGGUUGGGUUUUUGGUUCUACCAUGAACAACAAGUCACAGUCGGUGAUUAUUAUUAACAAUUCUACUAAUGUUGUUAUACGAGCAUGUAACUUUGAAUUGUGUGACAACCCUUUCUUUGCUGUUUCUAAACCCAUGGGUACACAGACACAUACUAUGAUAUUCGAUAAUGCAUUUAAAUGCACUUUCGAGUACAUAUCU --- # 3UTRBERT Pre-trained model on 3’ untranslated region (3’UTR) using a masked language modeling (MLM) objective. ## Disclaimer This is an UNOFFICIAL implementation of the [Deciphering 3’ UTR mediated gene regulation using interpretable deep representation learning](https://doi.org/10.1101/2023.09.08.556883) by Yuning Yang, Gen Li, et al. The OFFICIAL repository of 3UTRBERT is at [yangyn533/3UTRBERT](https://github.com/yangyn533/3UTRBERT). > [!TIP] > The MultiMolecule team has confirmed that the provided model and checkpoints are producing the same intermediate representations as the original implementation. **The team releasing 3UTRBERT did not write this model card for this model so this model card has been written by the MultiMolecule team.** ## Model Details 3UTRBERT is a [bert](https://huggingface.co/google-bert/bert-base-uncased)-style model pre-trained on a large corpus of 3’ untranslated regions (3’UTRs) in a self-supervised fashion. This means that the model was trained on the raw nucleotides of RNA sequences only, with an automatic process to generate inputs and labels from those texts. Please refer to the [Training Details](#training-details) section for more information on the training process. ### Variants - **[multimolecule/utrbert-3mer](https://huggingface.co/multimolecule/utrbert-3mer)**: The 3UTRBERT model pre-trained on 3-mer data. - **[multimolecule/utrbert-4mer](https://huggingface.co/multimolecule/utrbert-4mer)**: The 3UTRBERT model pre-trained on 4-mer data. - **[multimolecule/utrbert-5mer](https://huggingface.co/multimolecule/utrbert-5mer)**: The 3UTRBERT model pre-trained on 5-mer data. - **[multimolecule/utrbert-6mer](https://huggingface.co/multimolecule/utrbert-6mer)**: The 3UTRBERT model pre-trained on 6-mer data. ### Model Specification
Variants Num Layers Hidden Size Num Heads Intermediate Size Num Parameters (M) FLOPs (G) MACs (G) Max Num Tokens
3UTRBERT-3mer 12 768 12 3072 86.14 22.36 11.17 512
3UTRBERT-4mer 86.53
3UTRBERT-5mer 88.45
3UTRBERT-6mer 98.05
### Links - **Code**: [multimolecule.utrbert](https://github.com/DLS5-Omics/multimolecule/tree/master/multimolecule/models/utrbert) - **Data**: [multimolecule/gencode-human](https://huggingface.co/datasets/multimolecule/gencode-human) - **Paper**: [Deciphering 3’ UTR mediated gene regulation using interpretable deep representation learning](https://doi.org/10.1101/2023.09.08.556883) - **Developed by**: Yuning Yang, Gen Li, Kuan Pang, Wuxinhao Cao, Xiangtao Li, Zhaolei Zhang - **Model type**: [BERT](https://huggingface.co/google-bert/bert-base-uncased) - [FlashAttention](https://huggingface.co/docs/text-generation-inference/en/conceptual/flash_attention) - **Original Repository**: [yangyn533/3UTRBERT](https://github.com/yangyn533/3UTRBERT) ## Usage The model file depends on the [`multimolecule`](https://multimolecule.danling.org) library. You can install it using pip: ```bash pip install multimolecule ``` ### Direct Use #### Masked Language Modeling > [!WARNING] > Default transformers pipeline does not support K-mer tokenization. You can use this model directly with a pipeline for masked language modeling: ```python import multimolecule # you must import multimolecule to register models from transformers import pipeline predictor = pipeline("fill-mask", model="multimolecule/utrbert-3mer") output = predictor("gguccugguuagaccagaucugagccu")[1] ``` ### Downstream Use #### Extract Features Here is how to use this model to get the features of a given sequence in PyTorch: ```python from multimolecule import RnaTokenizer, UtrBertModel tokenizer = RnaTokenizer.from_pretrained("multimolecule/utrbert-3mer") model = UtrBertModel.from_pretrained("multimolecule/utrbert-3mer") text = "UAGCUUAUCAGACUGAUGUUG" input = tokenizer(text, return_tensors="pt") output = model(**input) ``` #### Sequence Classification / Regression > [!NOTE] > This model is not fine-tuned for any specific task. You will need to fine-tune the model on a downstream task to use it for sequence classification or regression. Here is how to use this model as backbone to fine-tune for a sequence-level task in PyTorch: ```python import torch from multimolecule import RnaTokenizer, UtrBertForSequencePrediction tokenizer = RnaTokenizer.from_pretrained("multimolecule/utrbert-3mer") model = UtrBertForSequencePrediction.from_pretrained("multimolecule/utrbert-3mer") text = "UAGCUUAUCAGACUGAUGUUG" input = tokenizer(text, return_tensors="pt") label = torch.tensor([1]) output = model(**input, labels=label) ``` #### Token Classification / Regression > [!NOTE] > This model is not fine-tuned for any specific task. You will need to fine-tune the model on a downstream task to use it for token classification or regression. Here is how to use this model as backbone to fine-tune for a nucleotide-level task in PyTorch: ```python import torch from multimolecule import RnaTokenizer, UtrBertForTokenPrediction tokenizer = RnaTokenizer.from_pretrained("multimolecule/utrbert-3mer") model = UtrBertForTokenPrediction.from_pretrained("multimolecule/utrbert-3mer") text = "UAGCUUAUCAGACUGAUGUUG" input = tokenizer(text, return_tensors="pt") label = torch.randint(2, (len(text), )) output = model(**input, labels=label) ``` #### Contact Classification / Regression > [!NOTE] > This model is not fine-tuned for any specific task. You will need to fine-tune the model on a downstream task to use it for contact classification or regression. Here is how to use this model as backbone to fine-tune for a contact-level task in PyTorch: ```python import torch from multimolecule import RnaTokenizer, UtrBertForContactPrediction tokenizer = RnaTokenizer.from_pretrained("multimolecule/utrbert-3mer") model = UtrBertForContactPrediction.from_pretrained("multimolecule/utrbert-3mer") text = "UAGCUUAUCAGACUGAUGUUG" input = tokenizer(text, return_tensors="pt") label = torch.randint(2, (len(text), len(text))) output = model(**input, labels=label) ``` ## Training Details 3UTRBERT used Masked Language Modeling (MLM) as the pre-training objective: taking a sequence, the model randomly masks 15% of the tokens in the input then runs the entire masked sentence through the model and has to predict the masked tokens. This is comparable to the Cloze task in language modeling. ### Training Data The 3UTRBERT model was pre-trained on human mRNA transcript sequences from [GENCODE](https://gencodegenes.org). GENCODE aims to identify all gene features in the human genome using a combination of computational analysis, manual annotation, and experimental validation. The GENCODE release 40 used by this work contains 61,544 genes, and 246,624 transcripts. 3UTRBERT collected the human mRNA transcript sequences from GENCODE, including 108,573 unique mRNA transcripts. Only the longest transcript of each gene was used in the pre-training process. 3UTRBERT only used the 3’ untranslated regions (3’UTRs) of the mRNA transcripts for pre-training to avoid codon constrains in the CDS region, and to reduce increased complexity of the entire mRNA transcripts. The average length of the 3’UTRs was 1,227 nucleotides, while the median length was 631 nucleotides. Each 3’UTR sequence was cut to non-overlapping patches of 510 nucleotides. The remaining sequences were padded to the same length. Note [`RnaTokenizer`][multimolecule.RnaTokenizer] will convert "T"s to "U"s for you, you may disable this behaviour by passing `replace_T_with_U=False`. ### Training Procedure #### Preprocessing 3UTRBERT used masked language modeling (MLM) as the pre-training objective. The masking procedure is similar to the one used in BERT: - 15% of the tokens are masked. - In 80% of the cases, the masked tokens are replaced by ``. - In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace. - In the 10% remaining cases, the masked tokens are left as is. Since 3UTRBERT used k-mer tokenizer, it masks the entire k-mer instead of individual nucleotides to avoid information leakage. For example, if the k-mer is 3, the sequence `"UAGCGUAU"` will be tokenized as `["UAG", "AGC", "GCG", "CGU", "GUA", "UAU"]`. If the nucleotide `"C"` is masked, the adjacent tokens will also be masked, resulting `["UAG", "", "", "", "GUA", "UAU"]`. #### Pre-training The model was trained on 4 NVIDIA Quadro RTX 6000 GPUs with 24GiB memories. - Batch size: 128 - Steps: 200,000 - Optimizer: AdamW(β1=0.9, β2=0.98, e=1e-6) - Learning rate: 3e-4 - Learning rate scheduler: Linear - Learning rate warm-up: 10,000 steps - Weight decay: 0.01 ## Citation **BibTeX**: ```bibtex @article {yang2023deciphering, author = {Yang, Yuning and Li, Gen and Pang, Kuan and Cao, Wuxinhao and Li, Xiangtao and Zhang, Zhaolei}, title = {Deciphering 3{\textquoteright} UTR mediated gene regulation using interpretable deep representation learning}, elocation-id = {2023.09.08.556883}, year = {2023}, doi = {10.1101/2023.09.08.556883}, publisher = {Cold Spring Harbor Laboratory}, abstract = {The 3{\textquoteright}untranslated regions (3{\textquoteright}UTRs) of messenger RNAs contain many important cis-regulatory elements that are under functional and evolutionary constraints. We hypothesize that these constraints are similar to grammars and syntaxes in human languages and can be modeled by advanced natural language models such as Transformers, which has been very effective in modeling protein sequence and structures. Here we describe 3UTRBERT, which implements an attention-based language model, i.e., Bidirectional Encoder Representations from Transformers (BERT). 3UTRBERT was pre-trained on aggregated 3{\textquoteright}UTR sequences of human mRNAs in a task-agnostic manner; the pre-trained model was then fine-tuned for specific downstream tasks such as predicting RBP binding sites, m6A RNA modification sites, and predicting RNA sub-cellular localizations. Benchmark results showed that 3UTRBERT generally outperformed other contemporary methods in each of these tasks. We also showed that the self-attention mechanism within 3UTRBERT allows direct visualization of the semantic relationship between sequence elements.Competing Interest StatementThe authors have declared no competing interest.}, URL = {https://www.biorxiv.org/content/early/2023/09/12/2023.09.08.556883}, eprint = {https://www.biorxiv.org/content/early/2023/09/12/2023.09.08.556883.full.pdf}, journal = {bioRxiv} } ``` ## Contact Please use GitHub issues of [MultiMolecule](https://github.com/DLS5-Omics/multimolecule/issues) for any questions or comments on the model card. Please contact the authors of the [3UTRBERT paper](https://doi.org/10.1101/2023.09.08.556883) for questions or comments on the paper/model. ## License This model is licensed under the [AGPL-3.0 License](https://www.gnu.org/licenses/agpl-3.0.html). ```spdx SPDX-License-Identifier: AGPL-3.0-or-later ```