utrbert-4mer / README.md
ZhiyuanChen's picture
Upload folder using huggingface_hub
f99e247 verified
---
datasets:
- multimolecule/gencode
language: rna
library_name: multimolecule
license: agpl-3.0
mask_token: <mask>
pipeline_tag: fill-mask
tags:
- Biology
- RNA
- 3' UTR
widget:
- example_title: Human GPI protein p137
mask_index: 17
mask_index_1based: 18
masked_char: U
output:
- label: AACC
score: 0.364436
- label: AGCC
score: 0.346772
- label: AGAC
score: 0.264004
- label: AGAA
score: 0.01208
- label: AGGC
score: 0.002414
pipeline_tag: fill-mask
sequence_type: 3UTR
source_tag: 3' UTR
task: fill-mask
text: UUUUUAAAAGGAAAAGA<mask>ACCAAAUGCCUGCUGCUACCACCCUUUUCAAUUGCUAUGUUU
- example_title: microRNA 21
mask_index: 13
mask_index_1based: 14
masked_char: U
output:
- label: GACU
score: 0.37685
- label: GAAU
score: 0.342165
- label: GGAU
score: 0.210064
- label: GACA
score: 0.035684
- label: GAGU
score: 0.004396
pipeline_tag: fill-mask
sequence_type: ncRNA
source_tag: RNA
task: fill-mask
text: UAGCUUAUCAGAC<mask>GAUGUUGA
- example_title: microRNA 146a
mask_index: 11
mask_index_1based: 12
masked_char: U
output:
- label: GAAC
score: 0.405272
- label: GACC
score: 0.355268
- label: GUCC
score: 0.147301
- label: GAGC
score: 0.006135
- label: GAAG
score: 0.005959
pipeline_tag: fill-mask
sequence_type: ncRNA
source_tag: RNA
task: fill-mask
text: UGAGAACUGAA<mask>UCCAUGGGUU
- example_title: microRNA 155
mask_index: 10
mask_index_1based: 11
masked_char: U
output:
- label: UCGU
score: 0.487097
- label: UAAU
score: 0.332914
- label: UAGU
score: 0.172531
- label: UGGU
score: 0.003671
- label: CCGU
score: 0.000722
pipeline_tag: fill-mask
sequence_type: ncRNA
source_tag: RNA
task: fill-mask
text: UUAAUGCUAA<mask>CGUGAUAGGGGUU
- example_title: metastasis associated lung adenocarcinoma transcript 1
mask_index: 27
mask_index_1based: 28
masked_char: U
output:
- label: GGCU
score: 0.999646
- label: GUCU
score: 0.000209
- label: GGCG
score: 4.9e-05
- label: GGCA
score: 3.7e-05
- label: GGCC
score: 3.4e-05
pipeline_tag: fill-mask
sequence_type: ncRNA
source_tag: RNA
task: fill-mask
text: AGGCAUUGAGGCAGCCAGCGCAGGGGC<mask>UCUGCUGAGGGGGCAGGCGGAGCUUGAGGAAA
- example_title: Pvt1 oncogene
mask_index: 10
mask_index_1based: 11
masked_char: U
output:
- label: UCCG
score: 0.999919
- label: UCCA
score: 5.8e-05
- label: UCCC
score: 6.0e-06
- label: UCCU
score: 6.0e-06
- label: GCCG
score: 6.0e-06
pipeline_tag: fill-mask
sequence_type: ncRNA
source_tag: RNA
task: fill-mask
text: CCCGCGCUCC<mask>CCGGGCAGAGCGCGUGUGGCGGCCGAGCACAUGGGCCCGCGGGCCGGGC
- example_title: telomerase RNA component
mask_index: 13
mask_index_1based: 14
masked_char: U
output:
- label: GGGG
score: 0.999982
- label: UGGG
score: 7.0e-06
- label: AGGG
score: 6.0e-06
- label: CGGG
score: 2.0e-06
- label: GGGA
score: 1.0e-06
pipeline_tag: fill-mask
sequence_type: ncRNA
source_tag: RNA
task: fill-mask
text: GGGUUGCGGAGGG<mask>GGGCCUGGGAGGGGUGGUGGCCAUUUUUUGUCUAACCCUAACUGAG
- example_title: vault RNA 2-1
mask_index: 10
mask_index_1based: 11
masked_char: U
output:
- label: GAGG
score: 0.61774
- label: GAAG
score: 0.235558
- label: GUAG
score: 0.116111
- label: GGAG
score: 0.004167
- label: GAGC
score: 0.003201
pipeline_tag: fill-mask
sequence_type: ncRNA
source_tag: RNA
task: fill-mask
text: CGGGUCGGAG<mask>UAGCUCAAGCGGUUACCUCCUCAUGCCGGACUUUCUAUCUGUCCAUCUCUGUGCUGGGGUUCGAGACCCGCGGGUGCUUACUGACCCUUUUAUGCAA
- example_title: brain cytoplasmic RNA 1
mask_index: 12
mask_index_1based: 13
masked_char: U
output:
- label: CGGC
score: 0.999998
- label: UGGC
score: 1.0e-06
- label: AGGC
score: 0.0
- label: GGGC
score: 0.0
- label: CGGG
score: 0.0
pipeline_tag: fill-mask
sequence_type: ncRNA
source_tag: RNA
task: fill-mask
text: GGCCGGGCGCGG<mask>GGCUCACGCCUGUAAUCCCAGCUCUCAGGGAGGCUAAGAGGCGGGAGGAUAGCUUGAGCCCAGGAGUUCGAGACCUGCCUGGGCAAUAUAGCGAGACCCCGUUCUCCAGAAAAAGGAAAAAAAAAAACAAAAGACAAAAAAAAAAUAAGCGUAACUUCCCUCAAAGCAACAACCCCCCCCCCCCUUU
- example_title: HIV-1 TAR-WT
mask_index: 11
mask_index_1based: 12
masked_char: U
output:
- label: UGGG
score: 0.814569
- label: UGAG
score: 0.141636
- label: UUAG
score: 0.019851
- label: UGGA
score: 0.006125
- label: UGGC
score: 0.005718
pipeline_tag: fill-mask
sequence_type: ncRNA
source_tag: RNA
task: fill-mask
text: GGUCUCUCUGG<mask>UAGACCAGAUCUGAGCCUGGGAGCUCUCUGGCUAACUAGGGAACC
- example_title: prion protein (Kanno blood group)
mask_index: 10
mask_index_1based: 11
masked_char: U
output:
- label: AUGG
score: 0.812915
- label: ACGG
score: 0.080288
- label: ACCG
score: 0.071152
- label: CUGG
score: 0.012691
- label: GUGG
score: 0.007779
pipeline_tag: fill-mask
sequence_type: mRNA
source_tag: RNA
task: fill-mask
text: AUGGCGAACC<mask>UGGCUGCUGGAUGCUGGUUCUCUUUGUGGCCACAUGGAGUGACCUGGGCCUCUGC
- example_title: interleukin 10
mask_index: 16
mask_index_1based: 17
masked_char: U
output:
- label: CACU
score: 0.998267
- label: CGCU
score: 0.001032
- label: UGCU
score: 0.000106
- label: GGCU
score: 0.000104
- label: AGCU
score: 8.2e-05
pipeline_tag: fill-mask
sequence_type: mRNA
source_tag: RNA
task: fill-mask
text: AUGCACAGCUCAGCAC<mask>GCUCUGUUGCCUGGUCCUCCUGACUGGGGUGAGGGCC
- example_title: Zaire ebolavirus
mask_index: 13
mask_index_1based: 14
masked_char: U
output:
- label: CAUG
score: 0.565727
- label: CUUG
score: 0.203214
- label: CACG
score: 0.195402
- label: CACU
score: 0.002715
- label: CCUG
score: 0.001785
pipeline_tag: fill-mask
sequence_type: mRNA
source_tag: RNA
task: fill-mask
text: AAUGUUCAAACAC<mask>UUGUGAAGCUCUGUUAGCUGAUGGUCUUGCUAAAGCAUUUCCUAGCAAUAUGAUGGUAGUCACAGAGCGUGAGCAAAAAGAAAGCUUAUUGCAUCAAGCAUCAUGGCACCACACAAGUGAUGAUUUUGGUGAGCAUGCCACAGUUAGAGGGAGUAGCUUUGUAACUGAUUUAGAGAAAUACAAUCUUGCAUUUAGAUAUGAGUUUACAGCACCUUUUAUAGAAUAUUGUAACCGUUGCUAUGGUGUUAAGAAUGUUUUUAAUUGGAUGCAUUAUACAAUCCCACAGUGUUAU
- example_title: SARS coronavirus
mask_index: 10
mask_index_1based: 11
masked_char: U
output:
- label: UUUU
score: 0.998767
- label: UCUU
score: 0.000411
- label: UUUC
score: 6.6e-05
- label: UGUU
score: 1.2e-05
- label: UAUU
score: 8.0e-06
pipeline_tag: fill-mask
sequence_type: mRNA
source_tag: RNA
task: fill-mask
text: AUGUUUAUUU<mask>CUUAUUAUUUCUUACUCUCACUAGUGGUAGUGACCUUGACCGGUGCACCACUUUUGAUGAUGUUCAAGCUCCUAAUUACACUCAACAUACUUCAUCUAUGAGGGGGGUUUACUAUCCUGAUGAAAUUUUUAGAUCAGACACUCUUUAUUUAACUCAGGAUUUAUUUCUUCCAUUUUAUUCUAAUGUUACAGGGUUUCAUACUAUUAAUCAUACGUUUGACAACCCUGUCAUACCUUUUAAGGAUGGUAUUUAUUUUGCUGCCACAGAGAAAUCAAAUGUUGUCCGUGGUUGGGUUUUUGGUUCUACCAUGAACAACAAGUCACAGUCGGUGAUUAUUAUUAACAAUUCUACUAAUGUUGUUAUACGAGCAUGUAACUUUGAAUUGUGUGACAACCCUUUCUUUGCUGUUUCUAAACCCAUGGGUACACAGACACAUACUAUGAUAUUCGAUAAUGCAUUUAAAUGCACUUUCGAGUACAUAUCU
---
# 3UTRBERT
Pre-trained model on 3’ untranslated region (3’UTR) using a masked language modeling (MLM) objective.
## Disclaimer
This is an UNOFFICIAL implementation of the [Deciphering 3’ UTR mediated gene regulation using interpretable deep representation learning](https://doi.org/10.1101/2023.09.08.556883) by Yuning Yang, Gen Li, et al.
The OFFICIAL repository of 3UTRBERT is at [yangyn533/3UTRBERT](https://github.com/yangyn533/3UTRBERT).
> [!TIP]
> The MultiMolecule team has confirmed that the provided model and checkpoints are producing the same intermediate representations as the original implementation.
**The team releasing 3UTRBERT did not write this model card for this model so this model card has been written by the MultiMolecule team.**
## Model Details
3UTRBERT is a [bert](https://huggingface.co/google-bert/bert-base-uncased)-style model pre-trained on a large corpus of 3’ untranslated regions (3’UTRs) in a self-supervised fashion. This means that the model was trained on the raw nucleotides of RNA sequences only, with an automatic process to generate inputs and labels from those texts. Please refer to the [Training Details](#training-details) section for more information on the training process.
### Variants
- **[multimolecule/utrbert-3mer](https://huggingface.co/multimolecule/utrbert-3mer)**: The 3UTRBERT model pre-trained on 3-mer data.
- **[multimolecule/utrbert-4mer](https://huggingface.co/multimolecule/utrbert-4mer)**: The 3UTRBERT model pre-trained on 4-mer data.
- **[multimolecule/utrbert-5mer](https://huggingface.co/multimolecule/utrbert-5mer)**: The 3UTRBERT model pre-trained on 5-mer data.
- **[multimolecule/utrbert-6mer](https://huggingface.co/multimolecule/utrbert-6mer)**: The 3UTRBERT model pre-trained on 6-mer data.
### Model Specification
<table>
<thead>
<tr>
<th>Variants</th>
<th>Num Layers</th>
<th>Hidden Size</th>
<th>Num Heads</th>
<th>Intermediate Size</th>
<th>Num Parameters (M)</th>
<th>FLOPs (G)</th>
<th>MACs (G)</th>
<th>Max Num Tokens</th>
</tr>
</thead>
<tbody>
<tr>
<td>3UTRBERT-3mer</td>
<td rowspan="4">12</td>
<td rowspan="4">768</td>
<td rowspan="4">12</td>
<td rowspan="4">3072</td>
<td>86.14</td>
<td rowspan="4">22.36</td>
<td rowspan="4">11.17</td>
<td rowspan="4">512</td>
</tr>
<tr>
<td>3UTRBERT-4mer</td>
<td>86.53</td>
</tr>
<tr>
<td>3UTRBERT-5mer</td>
<td>88.45</td>
</tr>
<tr>
<td>3UTRBERT-6mer</td>
<td>98.05</td>
</tr>
</tbody>
</table>
### Links
- **Code**: [multimolecule.utrbert](https://github.com/DLS5-Omics/multimolecule/tree/master/multimolecule/models/utrbert)
- **Data**: [multimolecule/gencode-human](https://huggingface.co/datasets/multimolecule/gencode-human)
- **Paper**: [Deciphering 3’ UTR mediated gene regulation using interpretable deep representation learning](https://doi.org/10.1101/2023.09.08.556883)
- **Developed by**: Yuning Yang, Gen Li, Kuan Pang, Wuxinhao Cao, Xiangtao Li, Zhaolei Zhang
- **Model type**: [BERT](https://huggingface.co/google-bert/bert-base-uncased) - [FlashAttention](https://huggingface.co/docs/text-generation-inference/en/conceptual/flash_attention)
- **Original Repository**: [yangyn533/3UTRBERT](https://github.com/yangyn533/3UTRBERT)
## Usage
The model file depends on the [`multimolecule`](https://multimolecule.danling.org) library. You can install it using pip:
```bash
pip install multimolecule
```
### Direct Use
#### Masked Language Modeling
> [!WARNING]
> Default transformers pipeline does not support K-mer tokenization.
You can use this model directly with a pipeline for masked language modeling:
```python
import multimolecule # you must import multimolecule to register models
from transformers import pipeline
predictor = pipeline("fill-mask", model="multimolecule/utrbert-3mer")
output = predictor("gguc<mask><mask><mask>cugguuagaccagaucugagccu")[1]
```
### Downstream Use
#### Extract Features
Here is how to use this model to get the features of a given sequence in PyTorch:
```python
from multimolecule import RnaTokenizer, UtrBertModel
tokenizer = RnaTokenizer.from_pretrained("multimolecule/utrbert-3mer")
model = UtrBertModel.from_pretrained("multimolecule/utrbert-3mer")
text = "UAGCUUAUCAGACUGAUGUUG"
input = tokenizer(text, return_tensors="pt")
output = model(**input)
```
#### Sequence Classification / Regression
> [!NOTE]
> This model is not fine-tuned for any specific task. You will need to fine-tune the model on a downstream task to use it for sequence classification or regression.
Here is how to use this model as backbone to fine-tune for a sequence-level task in PyTorch:
```python
import torch
from multimolecule import RnaTokenizer, UtrBertForSequencePrediction
tokenizer = RnaTokenizer.from_pretrained("multimolecule/utrbert-3mer")
model = UtrBertForSequencePrediction.from_pretrained("multimolecule/utrbert-3mer")
text = "UAGCUUAUCAGACUGAUGUUG"
input = tokenizer(text, return_tensors="pt")
label = torch.tensor([1])
output = model(**input, labels=label)
```
#### Token Classification / Regression
> [!NOTE]
> This model is not fine-tuned for any specific task. You will need to fine-tune the model on a downstream task to use it for token classification or regression.
Here is how to use this model as backbone to fine-tune for a nucleotide-level task in PyTorch:
```python
import torch
from multimolecule import RnaTokenizer, UtrBertForTokenPrediction
tokenizer = RnaTokenizer.from_pretrained("multimolecule/utrbert-3mer")
model = UtrBertForTokenPrediction.from_pretrained("multimolecule/utrbert-3mer")
text = "UAGCUUAUCAGACUGAUGUUG"
input = tokenizer(text, return_tensors="pt")
label = torch.randint(2, (len(text), ))
output = model(**input, labels=label)
```
#### Contact Classification / Regression
> [!NOTE]
> This model is not fine-tuned for any specific task. You will need to fine-tune the model on a downstream task to use it for contact classification or regression.
Here is how to use this model as backbone to fine-tune for a contact-level task in PyTorch:
```python
import torch
from multimolecule import RnaTokenizer, UtrBertForContactPrediction
tokenizer = RnaTokenizer.from_pretrained("multimolecule/utrbert-3mer")
model = UtrBertForContactPrediction.from_pretrained("multimolecule/utrbert-3mer")
text = "UAGCUUAUCAGACUGAUGUUG"
input = tokenizer(text, return_tensors="pt")
label = torch.randint(2, (len(text), len(text)))
output = model(**input, labels=label)
```
## Training Details
3UTRBERT used Masked Language Modeling (MLM) as the pre-training objective: taking a sequence, the model randomly masks 15% of the tokens in the input then runs the entire masked sentence through the model and has to predict the masked tokens. This is comparable to the Cloze task in language modeling.
### Training Data
The 3UTRBERT model was pre-trained on human mRNA transcript sequences from [GENCODE](https://gencodegenes.org).
GENCODE aims to identify all gene features in the human genome using a combination of computational analysis, manual annotation, and experimental validation. The GENCODE release 40 used by this work contains 61,544 genes, and 246,624 transcripts.
3UTRBERT collected the human mRNA transcript sequences from GENCODE, including 108,573 unique mRNA transcripts. Only the longest transcript of each gene was used in the pre-training process. 3UTRBERT only used the 3’ untranslated regions (3’UTRs) of the mRNA transcripts for pre-training to avoid codon constrains in the CDS region, and to reduce increased complexity of the entire mRNA transcripts. The average length of the 3’UTRs was 1,227 nucleotides, while the median length was 631 nucleotides. Each 3’UTR sequence was cut to non-overlapping patches of 510 nucleotides. The remaining sequences were padded to the same length.
Note [`RnaTokenizer`][multimolecule.RnaTokenizer] will convert "T"s to "U"s for you, you may disable this behaviour by passing `replace_T_with_U=False`.
### Training Procedure
#### Preprocessing
3UTRBERT used masked language modeling (MLM) as the pre-training objective. The masking procedure is similar to the one used in BERT:
- 15% of the tokens are masked.
- In 80% of the cases, the masked tokens are replaced by `<mask>`.
- In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace.
- In the 10% remaining cases, the masked tokens are left as is.
Since 3UTRBERT used k-mer tokenizer, it masks the entire k-mer instead of individual nucleotides to avoid information leakage.
For example, if the k-mer is 3, the sequence `"UAGCGUAU"` will be tokenized as `["UAG", "AGC", "GCG", "CGU", "GUA", "UAU"]`. If the nucleotide `"C"` is masked, the adjacent tokens will also be masked, resulting `["UAG", "<mask>", "<mask>", "<mask>", "GUA", "UAU"]`.
#### Pre-training
The model was trained on 4 NVIDIA Quadro RTX 6000 GPUs with 24GiB memories.
- Batch size: 128
- Steps: 200,000
- Optimizer: AdamW(β1=0.9, β2=0.98, e=1e-6)
- Learning rate: 3e-4
- Learning rate scheduler: Linear
- Learning rate warm-up: 10,000 steps
- Weight decay: 0.01
## Citation
**BibTeX**:
```bibtex
@article {yang2023deciphering,
author = {Yang, Yuning and Li, Gen and Pang, Kuan and Cao, Wuxinhao and Li, Xiangtao and Zhang, Zhaolei},
title = {Deciphering 3{\textquoteright} UTR mediated gene regulation using interpretable deep representation learning},
elocation-id = {2023.09.08.556883},
year = {2023},
doi = {10.1101/2023.09.08.556883},
publisher = {Cold Spring Harbor Laboratory},
abstract = {The 3{\textquoteright}untranslated regions (3{\textquoteright}UTRs) of messenger RNAs contain many important cis-regulatory elements that are under functional and evolutionary constraints. We hypothesize that these constraints are similar to grammars and syntaxes in human languages and can be modeled by advanced natural language models such as Transformers, which has been very effective in modeling protein sequence and structures. Here we describe 3UTRBERT, which implements an attention-based language model, i.e., Bidirectional Encoder Representations from Transformers (BERT). 3UTRBERT was pre-trained on aggregated 3{\textquoteright}UTR sequences of human mRNAs in a task-agnostic manner; the pre-trained model was then fine-tuned for specific downstream tasks such as predicting RBP binding sites, m6A RNA modification sites, and predicting RNA sub-cellular localizations. Benchmark results showed that 3UTRBERT generally outperformed other contemporary methods in each of these tasks. We also showed that the self-attention mechanism within 3UTRBERT allows direct visualization of the semantic relationship between sequence elements.Competing Interest StatementThe authors have declared no competing interest.},
URL = {https://www.biorxiv.org/content/early/2023/09/12/2023.09.08.556883},
eprint = {https://www.biorxiv.org/content/early/2023/09/12/2023.09.08.556883.full.pdf},
journal = {bioRxiv}
}
```
## Contact
Please use GitHub issues of [MultiMolecule](https://github.com/DLS5-Omics/multimolecule/issues) for any questions or comments on the model card.
Please contact the authors of the [3UTRBERT paper](https://doi.org/10.1101/2023.09.08.556883) for questions or comments on the paper/model.
## License
This model is licensed under the [AGPL-3.0 License](https://www.gnu.org/licenses/agpl-3.0.html).
```spdx
SPDX-License-Identifier: AGPL-3.0-or-later
```