utrbert-5mer / README.md
ZhiyuanChen's picture
Upload folder using huggingface_hub
7040d97 verified
metadata
datasets:
  - multimolecule/gencode
language: rna
library_name: multimolecule
license: agpl-3.0
mask_token: <mask>
pipeline_tag: fill-mask
tags:
  - Biology
  - RNA
  - 3' UTR
widget:
  - example_title: Human GPI protein p137
    mask_index: 17
    mask_index_1based: 18
    masked_char: U
    output:
      - label: AACCA
        score: 0.923713
      - label: AAGCA
        score: 0.053575
      - label: AAGAA
        score: 0.020241
      - label: AAUCA
        score: 0.000717
      - label: AAACA
        score: 0.000538
    pipeline_tag: fill-mask
    sequence_type: 3UTR
    source_tag: 3' UTR
    task: fill-mask
    text: UUUUUAAAAGGAAAAGA<mask>ACCAAAUGCCUGCUGCUACCACCCUUUUCAAUUGCUAUGUUU
  - example_title: microRNA 21
    mask_index: 13
    mask_index_1based: 14
    masked_char: U
    output:
      - label: AGAUG
        score: 0.99984
      - label: AGACU
        score: 0.000044
      - label: AGACG
        score: 0.000027
      - label: AGACA
        score: 0.000025
      - label: AGACC
        score: 0.00001
    pipeline_tag: fill-mask
    sequence_type: ncRNA
    source_tag: RNA
    task: fill-mask
    text: UAGCUUAUCAGAC<mask>GAUGUUGA
  - example_title: microRNA 146a
    mask_index: 11
    mask_index_1based: 12
    masked_char: U
    output:
      - label: UGACA
        score: 0.754348
      - label: UGCCA
        score: 0.213237
      - label: UGAAA
        score: 0.015207
      - label: UGAGA
        score: 0.00541
      - label: UGGCA
        score: 0.003168
    pipeline_tag: fill-mask
    sequence_type: ncRNA
    source_tag: RNA
    task: fill-mask
    text: UGAGAACUGAA<mask>UCCAUGGGUU
  - example_title: microRNA 155
    mask_index: 10
    mask_index_1based: 11
    masked_char: U
    output:
      - label: CUAUG
        score: 0.607864
      - label: CUGUG
        score: 0.285921
      - label: CUAAG
        score: 0.090377
      - label: CUAAA
        score: 0.002207
      - label: CAGUG
        score: 0.001669
    pipeline_tag: fill-mask
    sequence_type: ncRNA
    source_tag: RNA
    task: fill-mask
    text: UUAAUGCUAA<mask>CGUGAUAGGGGUU
  - example_title: metastasis associated lung adenocarcinoma transcript 1
    mask_index: 27
    mask_index_1based: 28
    masked_char: U
    output:
      - label: GGGUG
        score: 0.770874
      - label: GGCUG
        score: 0.164034
      - label: GGGCG
        score: 0.045813
      - label: GGAUG
        score: 0.003769
      - label: GGGCU
        score: 0.003184
    pipeline_tag: fill-mask
    sequence_type: ncRNA
    source_tag: RNA
    task: fill-mask
    text: AGGCAUUGAGGCAGCCAGCGCAGGGGC<mask>UCUGCUGAGGGGGCAGGCGGAGCUUGAGGAAA
  - example_title: Pvt1 oncogene
    mask_index: 10
    mask_index_1based: 11
    masked_char: U
    output:
      - label: CUCGG
        score: 0.999209
      - label: CCCGG
        score: 0.000742
      - label: GCCGG
        score: 0.000022
      - label: CGCGG
        score: 0.00001
      - label: CUCAG
        score: 0.000007
    pipeline_tag: fill-mask
    sequence_type: ncRNA
    source_tag: RNA
    task: fill-mask
    text: CCCGCGCUCC<mask>CCGGGCAGAGCGCGUGUGGCGGCCGAGCACAUGGGCCCGCGGGCCGGGC
  - example_title: telomerase RNA component
    mask_index: 13
    mask_index_1based: 14
    masked_char: U
    output:
      - label: AGGGC
        score: 0.99999
      - label: GGGGC
        score: 0.000007
      - label: UGGGC
        score: 0.000001
      - label: AGGGG
        score: 0.000001
      - label: AGGGU
        score: 0.000001
    pipeline_tag: fill-mask
    sequence_type: ncRNA
    source_tag: RNA
    task: fill-mask
    text: GGGUUGCGGAGGG<mask>GGGCCUGGGAGGGGUGGUGGCCAUUUUUUGUCUAACCCUAACUGAG
  - example_title: vault RNA 2-1
    mask_index: 10
    mask_index_1based: 11
    masked_char: U
    output:
      - label: GGAGC
        score: 0.999994
      - label: GGAGG
        score: 0.000002
      - label: GGAGA
        score: 0.000001
      - label: GGAGU
        score: 0.000001
      - label: GAAGC
        score: 0
    pipeline_tag: fill-mask
    sequence_type: ncRNA
    source_tag: RNA
    task: fill-mask
    text: >-
      CGGGUCGGAG<mask>UAGCUCAAGCGGUUACCUCCUCAUGCCGGACUUUCUAUCUGUCCAUCUCUGUGCUGGGGUUCGAGACCCGCGGGUGCUUACUGACCCUUUUAUGCAA
  - example_title: brain cytoplasmic RNA 1
    mask_index: 12
    mask_index_1based: 13
    masked_char: U
    output:
      - label: GGGCU
        score: 0.996062
      - label: UGGCU
        score: 0.002014
      - label: CGGCU
        score: 0.001811
      - label: AGGCU
        score: 0.00006
      - label: GCGCU
        score: 0.000027
    pipeline_tag: fill-mask
    sequence_type: ncRNA
    source_tag: RNA
    task: fill-mask
    text: >-
      GGCCGGGCGCGG<mask>GGCUCACGCCUGUAAUCCCAGCUCUCAGGGAGGCUAAGAGGCGGGAGGAUAGCUUGAGCCCAGGAGUUCGAGACCUGCCUGGGCAAUAUAGCGAGACCCCGUUCUCCAGAAAAAGGAAAAAAAAAAACAAAAGACAAAAAAAAAAUAAGCGUAACUUCCCUCAAAGCAACAACCCCCCCCCCCCUUU
  - example_title: HIV-1 TAR-WT
    mask_index: 11
    mask_index_1based: 12
    masked_char: U
    output:
      - label: CUGGA
        score: 0.978694
      - label: CUAGA
        score: 0.020178
      - label: CUGGG
        score: 0.000159
      - label: CUGGC
        score: 0.000119
      - label: UUAGA
        score: 0.000036
    pipeline_tag: fill-mask
    sequence_type: ncRNA
    source_tag: RNA
    task: fill-mask
    text: GGUCUCUCUGG<mask>UAGACCAGAUCUGAGCCUGGGAGCUCUCUGGCUAACUAGGGAACC
  - example_title: prion protein (Kanno blood group)
    mask_index: 10
    mask_index_1based: 11
    masked_char: U
    output:
      - label: AAGGC
        score: 0.662737
      - label: AUGGC
        score: 0.173229
      - label: AACGC
        score: 0.139515
      - label: AGGGC
        score: 0.006084
      - label: UUGGC
        score: 0.004228
    pipeline_tag: fill-mask
    sequence_type: mRNA
    source_tag: RNA
    task: fill-mask
    text: AUGGCGAACC<mask>UGGCUGCUGGAUGCUGGUUCUCUUUGUGGCCACAUGGAGUGACCUGGGCCUCUGC
  - example_title: interleukin 10
    mask_index: 16
    mask_index_1based: 17
    masked_char: U
    output:
      - label: GCCUC
        score: 0.500996
      - label: GCAUC
        score: 0.32872
      - label: GCACC
        score: 0.074776
      - label: GGCUC
        score: 0.066717
      - label: GCAGC
        score: 0.007106
    pipeline_tag: fill-mask
    sequence_type: mRNA
    source_tag: RNA
    task: fill-mask
    text: AUGCACAGCUCAGCAC<mask>GCUCUGUUGCCUGGUCCUCCUGACUGGGGUGAGGGCC
  - example_title: Zaire ebolavirus
    mask_index: 13
    mask_index_1based: 14
    masked_char: U
    output:
      - label: ACAGU
        score: 0.478941
      - label: ACUGU
        score: 0.471011
      - label: AUUGU
        score: 0.030197
      - label: ACAAU
        score: 0.003791
      - label: ACACU
        score: 0.003351
    pipeline_tag: fill-mask
    sequence_type: mRNA
    source_tag: RNA
    task: fill-mask
    text: >-
      AAUGUUCAAACAC<mask>UUGUGAAGCUCUGUUAGCUGAUGGUCUUGCUAAAGCAUUUCCUAGCAAUAUGAUGGUAGUCACAGAGCGUGAGCAAAAAGAAAGCUUAUUGCAUCAAGCAUCAUGGCACCACACAAGUGAUGAUUUUGGUGAGCAUGCCACAGUUAGAGGGAGUAGCUUUGUAACUGAUUUAGAGAAAUACAAUCUUGCAUUUAGAUAUGAGUUUACAGCACCUUUUAUAGAAUAUUGUAACCGUUGCUAUGGUGUUAAGAAUGUUUUUAAUUGGAUGCAUUAUACAAUCCCACAGUGUUAU
  - example_title: SARS coronavirus
    mask_index: 10
    mask_index_1based: 11
    masked_char: U
    output:
      - label: AUUUA
        score: 0.999965
      - label: ACUUA
        score: 0.000028
      - label: AUUUU
        score: 0.000003
      - label: AUUUC
        score: 0.000001
      - label: AUUUG
        score: 0.000001
    pipeline_tag: fill-mask
    sequence_type: mRNA
    source_tag: RNA
    task: fill-mask
    text: >-
      AUGUUUAUUU<mask>CUUAUUAUUUCUUACUCUCACUAGUGGUAGUGACCUUGACCGGUGCACCACUUUUGAUGAUGUUCAAGCUCCUAAUUACACUCAACAUACUUCAUCUAUGAGGGGGGUUUACUAUCCUGAUGAAAUUUUUAGAUCAGACACUCUUUAUUUAACUCAGGAUUUAUUUCUUCCAUUUUAUUCUAAUGUUACAGGGUUUCAUACUAUUAAUCAUACGUUUGACAACCCUGUCAUACCUUUUAAGGAUGGUAUUUAUUUUGCUGCCACAGAGAAAUCAAAUGUUGUCCGUGGUUGGGUUUUUGGUUCUACCAUGAACAACAAGUCACAGUCGGUGAUUAUUAUUAACAAUUCUACUAAUGUUGUUAUACGAGCAUGUAACUUUGAAUUGUGUGACAACCCUUUCUUUGCUGUUUCUAAACCCAUGGGUACACAGACACAUACUAUGAUAUUCGAUAAUGCAUUUAAAUGCACUUUCGAGUACAUAUCU

3UTRBERT

Pre-trained model on 3’ untranslated region (3’UTR) using a masked language modeling (MLM) objective.

Disclaimer

This is an UNOFFICIAL implementation of the Deciphering 3’ UTR mediated gene regulation using interpretable deep representation learning by Yuning Yang, Gen Li, et al.

The OFFICIAL repository of 3UTRBERT is at yangyn533/3UTRBERT.

The MultiMolecule team has confirmed that the provided model and checkpoints are producing the same intermediate representations as the original implementation.

The team releasing 3UTRBERT did not write this model card for this model so this model card has been written by the MultiMolecule team.

Model Details

3UTRBERT is a bert-style model pre-trained on a large corpus of 3’ untranslated regions (3’UTRs) in a self-supervised fashion. This means that the model was trained on the raw nucleotides of RNA sequences only, with an automatic process to generate inputs and labels from those texts. Please refer to the Training Details section for more information on the training process.

Variants

Model Specification

Variants Num Layers Hidden Size Num Heads Intermediate Size Num Parameters (M) FLOPs (G) MACs (G) Max Num Tokens
3UTRBERT-3mer 12 768 12 3072 86.14 22.36 11.17 512
3UTRBERT-4mer 86.53
3UTRBERT-5mer 88.45
3UTRBERT-6mer 98.05

Links

Usage

The model file depends on the multimolecule library. You can install it using pip:

pip install multimolecule

Direct Use

Masked Language Modeling

Default transformers pipeline does not support K-mer tokenization.

You can use this model directly with a pipeline for masked language modeling:

import multimolecule  # you must import multimolecule to register models
from transformers import pipeline

predictor = pipeline("fill-mask", model="multimolecule/utrbert-3mer")
output = predictor("gguc<mask><mask><mask>cugguuagaccagaucugagccu")[1]

Downstream Use

Extract Features

Here is how to use this model to get the features of a given sequence in PyTorch:

from multimolecule import RnaTokenizer, UtrBertModel


tokenizer = RnaTokenizer.from_pretrained("multimolecule/utrbert-3mer")
model = UtrBertModel.from_pretrained("multimolecule/utrbert-3mer")

text = "UAGCUUAUCAGACUGAUGUUG"
input = tokenizer(text, return_tensors="pt")

output = model(**input)

Sequence Classification / Regression

This model is not fine-tuned for any specific task. You will need to fine-tune the model on a downstream task to use it for sequence classification or regression.

Here is how to use this model as backbone to fine-tune for a sequence-level task in PyTorch:

import torch
from multimolecule import RnaTokenizer, UtrBertForSequencePrediction


tokenizer = RnaTokenizer.from_pretrained("multimolecule/utrbert-3mer")
model = UtrBertForSequencePrediction.from_pretrained("multimolecule/utrbert-3mer")

text = "UAGCUUAUCAGACUGAUGUUG"
input = tokenizer(text, return_tensors="pt")
label = torch.tensor([1])

output = model(**input, labels=label)

Token Classification / Regression

This model is not fine-tuned for any specific task. You will need to fine-tune the model on a downstream task to use it for token classification or regression.

Here is how to use this model as backbone to fine-tune for a nucleotide-level task in PyTorch:

import torch
from multimolecule import RnaTokenizer, UtrBertForTokenPrediction


tokenizer = RnaTokenizer.from_pretrained("multimolecule/utrbert-3mer")
model = UtrBertForTokenPrediction.from_pretrained("multimolecule/utrbert-3mer")

text = "UAGCUUAUCAGACUGAUGUUG"
input = tokenizer(text, return_tensors="pt")
label = torch.randint(2, (len(text), ))

output = model(**input, labels=label)

Contact Classification / Regression

This model is not fine-tuned for any specific task. You will need to fine-tune the model on a downstream task to use it for contact classification or regression.

Here is how to use this model as backbone to fine-tune for a contact-level task in PyTorch:

import torch
from multimolecule import RnaTokenizer, UtrBertForContactPrediction


tokenizer = RnaTokenizer.from_pretrained("multimolecule/utrbert-3mer")
model = UtrBertForContactPrediction.from_pretrained("multimolecule/utrbert-3mer")

text = "UAGCUUAUCAGACUGAUGUUG"
input = tokenizer(text, return_tensors="pt")
label = torch.randint(2, (len(text), len(text)))

output = model(**input, labels=label)

Training Details

3UTRBERT used Masked Language Modeling (MLM) as the pre-training objective: taking a sequence, the model randomly masks 15% of the tokens in the input then runs the entire masked sentence through the model and has to predict the masked tokens. This is comparable to the Cloze task in language modeling.

Training Data

The 3UTRBERT model was pre-trained on human mRNA transcript sequences from GENCODE. GENCODE aims to identify all gene features in the human genome using a combination of computational analysis, manual annotation, and experimental validation. The GENCODE release 40 used by this work contains 61,544 genes, and 246,624 transcripts.

3UTRBERT collected the human mRNA transcript sequences from GENCODE, including 108,573 unique mRNA transcripts. Only the longest transcript of each gene was used in the pre-training process. 3UTRBERT only used the 3’ untranslated regions (3’UTRs) of the mRNA transcripts for pre-training to avoid codon constrains in the CDS region, and to reduce increased complexity of the entire mRNA transcripts. The average length of the 3’UTRs was 1,227 nucleotides, while the median length was 631 nucleotides. Each 3’UTR sequence was cut to non-overlapping patches of 510 nucleotides. The remaining sequences were padded to the same length.

Note [RnaTokenizer][multimolecule.RnaTokenizer] will convert "T"s to "U"s for you, you may disable this behaviour by passing replace_T_with_U=False.

Training Procedure

Preprocessing

3UTRBERT used masked language modeling (MLM) as the pre-training objective. The masking procedure is similar to the one used in BERT:

  • 15% of the tokens are masked.
  • In 80% of the cases, the masked tokens are replaced by <mask>.
  • In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace.
  • In the 10% remaining cases, the masked tokens are left as is.

Since 3UTRBERT used k-mer tokenizer, it masks the entire k-mer instead of individual nucleotides to avoid information leakage.

For example, if the k-mer is 3, the sequence "UAGCGUAU" will be tokenized as ["UAG", "AGC", "GCG", "CGU", "GUA", "UAU"]. If the nucleotide "C" is masked, the adjacent tokens will also be masked, resulting ["UAG", "<mask>", "<mask>", "<mask>", "GUA", "UAU"].

Pre-training

The model was trained on 4 NVIDIA Quadro RTX 6000 GPUs with 24GiB memories.

  • Batch size: 128
  • Steps: 200,000
  • Optimizer: AdamW(β1=0.9, β2=0.98, e=1e-6)
  • Learning rate: 3e-4
  • Learning rate scheduler: Linear
  • Learning rate warm-up: 10,000 steps
  • Weight decay: 0.01

Citation

BibTeX:

@article {yang2023deciphering,
    author = {Yang, Yuning and Li, Gen and Pang, Kuan and Cao, Wuxinhao and Li, Xiangtao and Zhang, Zhaolei},
    title = {Deciphering 3{\textquoteright} UTR mediated gene regulation using interpretable deep representation learning},
    elocation-id = {2023.09.08.556883},
    year = {2023},
    doi = {10.1101/2023.09.08.556883},
    publisher = {Cold Spring Harbor Laboratory},
    abstract = {The 3{\textquoteright}untranslated regions (3{\textquoteright}UTRs) of messenger RNAs contain many important cis-regulatory elements that are under functional and evolutionary constraints. We hypothesize that these constraints are similar to grammars and syntaxes in human languages and can be modeled by advanced natural language models such as Transformers, which has been very effective in modeling protein sequence and structures. Here we describe 3UTRBERT, which implements an attention-based language model, i.e., Bidirectional Encoder Representations from Transformers (BERT). 3UTRBERT was pre-trained on aggregated 3{\textquoteright}UTR sequences of human mRNAs in a task-agnostic manner; the pre-trained model was then fine-tuned for specific downstream tasks such as predicting RBP binding sites, m6A RNA modification sites, and predicting RNA sub-cellular localizations. Benchmark results showed that 3UTRBERT generally outperformed other contemporary methods in each of these tasks. We also showed that the self-attention mechanism within 3UTRBERT allows direct visualization of the semantic relationship between sequence elements.Competing Interest StatementThe authors have declared no competing interest.},
    URL = {https://www.biorxiv.org/content/early/2023/09/12/2023.09.08.556883},
    eprint = {https://www.biorxiv.org/content/early/2023/09/12/2023.09.08.556883.full.pdf},
    journal = {bioRxiv}
}

Contact

Please use GitHub issues of MultiMolecule for any questions or comments on the model card.

Please contact the authors of the 3UTRBERT paper for questions or comments on the paper/model.

License

This model is licensed under the AGPL-3.0 License.

SPDX-License-Identifier: AGPL-3.0-or-later