code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def forward(self, inputs, token_types, valid_length, masked_positions): """Getting the scores of the masked positions. Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length) - layout = 'TN' ...
Getting the scores of the masked positions. Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length) - layout = 'TN' Shape (seq_length, batch_size) token_types The type of the token. For example, if ...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/mobilebert.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py
Apache-2.0
def __init__(self, backbone_cfg, weight_initializer=None, bias_initializer=None): """ Parameters ---------- backbone_cfg The cfg of the backbone model weight_initializer bias_initializer """ super().__init__()...
Parameters ---------- backbone_cfg The cfg of the backbone model weight_initializer bias_initializer
__init__
python
dmlc/gluon-nlp
src/gluonnlp/models/mobilebert.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py
Apache-2.0
def forward(self, inputs, token_types, valid_length, masked_positions): """Generate the representation given the inputs. This is used in training or fine-tuning a mobile mobile bert model. Parameters ---------- inputs - layout = 'NT' ...
Generate the representation given the inputs. This is used in training or fine-tuning a mobile mobile bert model. Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length) - layout = 'TN' Shape (seq_length, batch...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/mobilebert.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py
Apache-2.0
def get_pretrained_mobilebert(model_name: str = 'google_uncased_mobilebert', root: str = get_model_zoo_home_dir(), load_backbone: str = True, load_mlm: str = False)\ -> Tuple[CN, HuggingFaceWordPieceTokenizer, str, str]: ...
Get the pretrained mobile bert weights Parameters ---------- model_name The name of the mobile bert model. root The downloading root load_backbone Whether to load the weights of the backbone network load_mlm Whether to load the weights of MLM Returns ---...
get_pretrained_mobilebert
python
dmlc/gluon-nlp
src/gluonnlp/models/mobilebert.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py
Apache-2.0
def __init__(self, vocab_size=50265, units=768, hidden_size=3072, num_layers=12, num_heads=12, max_length=512, hidden_dropout_prob=0.1, attention_dropout_prob=0.1, pos...
Parameters ---------- vocab_size units hidden_size num_layers num_heads max_length hidden_dropout_prob attention_dropout_prob pos_embed_type activation pooler_activation layer_norm_eps embed_initial...
__init__
python
dmlc/gluon-nlp
src/gluonnlp/models/roberta.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py
Apache-2.0
def get_initial_embedding(self, inputs): """Get the initial token embeddings that considers the token type and positional embeddings Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length) - layout = 'TN' Shape ...
Get the initial token embeddings that considers the token type and positional embeddings Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length) - layout = 'TN' Shape (seq_length, batch_size) Returns --...
get_initial_embedding
python
dmlc/gluon-nlp
src/gluonnlp/models/roberta.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py
Apache-2.0
def apply_pooling(self, sequence): """Generate the representation given the inputs. This is used for pre-training or fine-tuning a mobile bert model. Get the first token of the whole sequence which is [CLS] Parameters ---------- sequence - layout = 'NT' ...
Generate the representation given the inputs. This is used for pre-training or fine-tuning a mobile bert model. Get the first token of the whole sequence which is [CLS] Parameters ---------- sequence - layout = 'NT' Shape (batch_size, sequence_length...
apply_pooling
python
dmlc/gluon-nlp
src/gluonnlp/models/roberta.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py
Apache-2.0
def forward(self, inputs, valid_length, masked_positions): """Getting the scores of the masked positions. Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length) - layout = 'TN' Shape (seq_length, batch_size) ...
Getting the scores of the masked positions. Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length) - layout = 'TN' Shape (seq_length, batch_size) valid_length The valid length of each sequence ...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/roberta.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py
Apache-2.0
def get_pretrained_roberta(model_name: str = 'fairseq_roberta_base', root: str = get_model_zoo_home_dir(), load_backbone: bool = True, load_mlm: bool = False) \ -> Tuple[CN, HuggingFaceByteBPETokenizer, str, str]: """Get the pr...
Get the pretrained RoBERTa weights Parameters ---------- model_name The name of the RoBERTa model. root The downloading root load_backbone Whether to load the weights of the backbone network load_mlm Whether to load the weights of MLM Returns ------- ...
get_pretrained_roberta
python
dmlc/gluon-nlp
src/gluonnlp/models/roberta.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py
Apache-2.0
def __init__( self, d_model, d_kv, d_ff, is_decoder, num_heads=12, dropout_prob=0.1, layer_norm_eps=1E-6, activation='relu', init_factor=1.0, layout='NT', dtype='float32' ): """ Parameters ...
Parameters ---------- d_model Equivalent to transformer's `units`. d_kv d_kv * num_heads (see below) = inner_dim. d_ff Equivalent to transformer's `hidden_size`. is_decoder If is_decoder, apply cross-attention. ...
__init__
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def incremental_decode( self, step_hidden_states, step_position_embeddings, past_key_value, mem_states, step_mem_attn_mask ): """Incrementally generate the output given the decoder input. Parameters ---------- step_hidden_states...
Incrementally generate the output given the decoder input. Parameters ---------- step_hidden_states Stepwise hidden states where L_seq = 1 as in `forward` case. - layout = 'NT' Shape (B, 1, d_model) - layout = 'TN' Shape (1,...
incremental_decode
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def forward( self, hidden_states, self_attn_mask, position_embeddings, mem_states=None, mem_attn_mask=None ): """ Parameters ---------- hidden_states - layout = 'NT' Shape (B, L_seq, d_model) ...
Parameters ---------- hidden_states - layout = 'NT' Shape (B, L_seq, d_model) - layout = 'TN' Shape (L_seq, B, d_model) self_attn_mask if is_decoder, it should be a "causal" attention mask. Shape (B, L_se...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def __init__( self, d_model, d_kv, d_ff, num_layers=12, num_heads=12, dropout_prob=0.1, layer_norm_eps=1E-6, activation='relu', init_factor=1.0, layout='NT', dtype='float32' ): """ Parameters...
Parameters ---------- d_model Equivalent to transformer's `units`. d_kv d_kv * num_heads (see below) = inner_dim. d_ff Equivalent to transformer's `hidden_size`. num_layers num_heads dropout_prob We use ...
__init__
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def forward(self, hidden_states, valid_length): """ Parameters ---------- hidden_states - layout = 'NT' Shape (B, L_seq, d_model) - layout = 'TN' Shape (L_seq, B, d_model) valid_length Valid sequence length fo...
Parameters ---------- hidden_states - layout = 'NT' Shape (B, L_seq, d_model) - layout = 'TN' Shape (L_seq, B, d_model) valid_length Valid sequence length for each sample feeded into the encoder. Shape (B...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def __init__( self, d_model, d_kv, d_ff, num_layers=12, num_heads=12, dropout_prob=0.1, layer_norm_eps=1E-6, activation='relu', init_factor=1.0, layout='NT', dtype='float32' ): """ Parameters...
Parameters ---------- d_model Equivalent to transformer's `units`. d_kv d_kv * num_heads (see below) = inner_dim. d_ff Equivalent to transformer's `hidden_size`. num_layers num_heads dropout_prob We use ...
__init__
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def incremental_decode( self, step_hidden_states, position, past_key_values, mem_states, mem_valid_length ): """Incrementally generate the output given the decoder input. Parameters ---------- step_hidden_states Step...
Incrementally generate the output given the decoder input. Parameters ---------- step_hidden_states Stepwise hidden states where L_seq = 1 as in `forward` case. - layout = 'NT' Shape (B, 1, d_model) - layout = 'TN' Shape (1,...
incremental_decode
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def forward(self, hidden_states, valid_length, mem_states, mem_valid_length): """ Parameters ---------- hidden_states - layout = 'NT' Shape (B, L_seq, d_model) - layout = 'TN' Shape (L_seq, B, d_model) valid_length ...
Parameters ---------- hidden_states - layout = 'NT' Shape (B, L_seq, d_model) - layout = 'TN' Shape (L_seq, B, d_model) valid_length Valid sequence length for each sample feeded into the decoder. Shape (B...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def __init__( self, vocab_size=32128, d_model=768, d_kv=64, d_ff=3072, num_layers=12, num_heads=12, dropout_prob=0.1, layer_norm_eps=1E-6, activation='relu', init_factor=1.0, layout='NT', dtype='float32' ...
Parameters ---------- vocab_size vocab_size should be no smaller than len(tokenizer._sp_model). d_model Equivalent to transformer's `units`. d_kv d_kv * num_heads (see below) = inner_dim. d_ff Equivalent to transformer'...
__init__
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def encode(self, src_data, src_valid_length): """Encode the source data to memory states. Parameters ---------- src_data Token ids feeded into the encoder. - layout = 'NT' Shape (B, L_src_seq) - layout = 'TN' Shape ...
Encode the source data to memory states. Parameters ---------- src_data Token ids feeded into the encoder. - layout = 'NT' Shape (B, L_src_seq) - layout = 'TN' Shape (L_src_seq, B) src_valid_length Valid...
encode
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def decode(self, tgt_data, tgt_valid_length, mem_states, mem_valid_length): """Decode based on target data and memory states. Parameters ---------- tgt_data Token ids feeded into the decoder. - layout = 'NT' Shape (B, L_seq) - layo...
Decode based on target data and memory states. Parameters ---------- tgt_data Token ids feeded into the decoder. - layout = 'NT' Shape (B, L_seq) - layout = 'TN' Shape (L_seq, B) tgt_valid_length Valid s...
decode
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def forward(self, src_data, src_valid_length, tgt_data, tgt_valid_length): """ Parameters ---------- src_data Token ids feeded into the encoder. - layout = 'NT' Shape (B, L_src_seq) - layout = 'TN' Shape (L_src_seq, ...
Parameters ---------- src_data Token ids feeded into the encoder. - layout = 'NT' Shape (B, L_src_seq) - layout = 'TN' Shape (L_src_seq, B) src_valid_length Valid sequence length for each sample feeded i...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def state_batch_axis(self): """The returned 4-tuple corresponds to the batch axes of `init_states()` results. Returns ------- enc_out_batch_axis src_valid_length_batch_axis position_batch_axis dec_layer_batch_axes """ if self.model.layout == 'NT...
The returned 4-tuple corresponds to the batch axes of `init_states()` results. Returns ------- enc_out_batch_axis src_valid_length_batch_axis position_batch_axis dec_layer_batch_axes
state_batch_axis
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def init_states(self, src_data, src_valid_length): """Initialize the states required for incremental decoding. Parameters ---------- src_data Token ids feeded into the encoder. - layout = 'NT' Shape (B, L_src_seq) - layout = 'TN' ...
Initialize the states required for incremental decoding. Parameters ---------- src_data Token ids feeded into the encoder. - layout = 'NT' Shape (B, L_src_seq) - layout = 'TN' Shape (L_src_seq, B) src_valid_length ...
init_states
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def forward(self, step_data, past_states): """ Parameters ---------- step_data Stepwise batched token ids for incremental decoding. Shape (B,) past_states A 4-tuple containing states of last incremental decoding step. ...
Parameters ---------- step_data Stepwise batched token ids for incremental decoding. Shape (B,) past_states A 4-tuple containing states of last incremental decoding step. 1. mem_states - layout = 'NT' ...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/t5.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
Apache-2.0
def transformer_base(): """Configuration of Transformer WMT EN-DE Base""" cfg = CN() cfg.MODEL = CN() cfg.MODEL.src_vocab_size = -1 cfg.MODEL.tgt_vocab_size = -1 cfg.MODEL.max_src_length = -1 cfg.MODEL.max_tgt_length = -1 cfg.MODEL.scale_embed = True cfg.MODEL.pos_embed_type = "sinus...
Configuration of Transformer WMT EN-DE Base
transformer_base
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def __init__(self, units: int = 512, hidden_size: int = 2048, num_heads: int = 8, attention_dropout_prob: float = 0.1, hidden_dropout_prob: float = 0.1, activation_dropout_prob: float = 0.0, layer_norm...
Parameters ---------- units hidden_size num_heads attention_dropout_prob hidden_dropout_prob activation_dropout_prob layer_norm_eps pre_norm Whether to attach the normalization layer before attention layer If pre_n...
__init__
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def forward(self, data, attn_mask): """ Parameters ---------- data - layout = 'NT' Shape (batch_size, seq_length, C_in) - layout = 'TN' Shape (seq_length, batch_size, C_in) attn_mask Shape (batch_size, seq_leng...
Parameters ---------- data - layout = 'NT' Shape (batch_size, seq_length, C_in) - layout = 'TN' Shape (seq_length, batch_size, C_in) attn_mask Shape (batch_size, seq_length, seq_length) Returns ------...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def __init__(self, num_layers=6, recurrent=False, units=512, hidden_size=2048, num_heads=8, activation_dropout=0.0, dropout=0.1, use_qkv_bias=True, attention_dropout=0.1, layer_norm_eps=1E-5, data_norm=False, pre_norm=False, weight_initializer=None, bi...
Parameters ---------- num_layers : The number of layers recurrent : bool Whether the layers share weights or not units hidden_size num_heads dropout layer_norm_eps data_norm Whether to apply LayerNorm t...
__init__
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def forward(self, data, valid_length): """ Parameters ---------- data : - layout = 'NT' Shape (batch_size, seq_length, C) - layout = 'TN' Shape (seq_length, batch_size, C) valid_length : Shape (batch_size,) ...
Parameters ---------- data : - layout = 'NT' Shape (batch_size, seq_length, C) - layout = 'TN' Shape (seq_length, batch_size, C) valid_length : Shape (batch_size,) Returns ------- out ...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def __init__(self, units: int = 512, mem_units: Optional[int] = None, hidden_size: int = 2048, num_heads: int = 8, activation_dropout: float = 0.0, dropout: float = 0.1, attention_dropout: float = 0.1, ...
Parameters ---------- units mem_units The number of units in the memory. By default, it is initialized to be the same as the units. hidden_size num_heads activation_dropout dropout attention_dropout layer_norm_eps ...
__init__
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def forward(self, data, mem, self_causal_mask, mem_attn_mask): """ Parameters ---------- data - layout = 'NT' Shape (batch_size, seq_length, C_in) - layout = 'TN' Shape (seq_length, batch_size, C_in) mem - layo...
Parameters ---------- data - layout = 'NT' Shape (batch_size, seq_length, C_in) - layout = 'TN' Shape (seq_length, batch_size, C_in) mem - layout = 'NT' Shape (batch_size, mem_length, C_mem) ...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def init_states(self, batch_size, ctx, dtype='float32'): """Initialize the states required for incremental decoding Returns ------- init_key - layout = 'NT' Shape (batch_size, 0, N, C_key) - layout = 'TN' Shape (0, batch_size, N, C...
Initialize the states required for incremental decoding Returns ------- init_key - layout = 'NT' Shape (batch_size, 0, N, C_key) - layout = 'TN' Shape (0, batch_size, N, C_key) init_value - layout = 'NT' ...
init_states
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def incremental_decode(self, data, states, mem, mem_valid_length, mem_attn_mask=None): """Incrementally generate the output given the decoder input. Parameters ---------- data Shape (batch_size, C_in) states The previous states, contains 1. l...
Incrementally generate the output given the decoder input. Parameters ---------- data Shape (batch_size, C_in) states The previous states, contains 1. layout = 'NT': - prev_multi_key Shape (batch_size, prev_seq_len...
incremental_decode
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def forward(self, data, valid_length, mem_data, mem_valid_length): """ Parameters ---------- data - layout = 'NT' Shape (batch_size, seq_length, C_in) - layout = 'TN' Shape (seq_length, batch_size, C_in) valid_length ...
Parameters ---------- data - layout = 'NT' Shape (batch_size, seq_length, C_in) - layout = 'TN' Shape (seq_length, batch_size, C_in) valid_length Shape (batch_size,) mem_data - layout = 'NT' ...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def init_states(self, batch_size, ctx, dtype='float32'): """Initialize the states required for incremental decoding Returns ------- states A list of states, each includes: - init_key - layout = 'NT' Shape (batch_si...
Initialize the states required for incremental decoding Returns ------- states A list of states, each includes: - init_key - layout = 'NT' Shape (batch_size, 0, N, C_key) - layout = 'TN' ...
init_states
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def incremental_decode(self, data, states, mem, mem_valid_length): """Incrementally generate the output given the decoder input. Parameters ---------- data Shape (batch_size, C_in) states The previous states, contain a list of 1. layout = 'NT...
Incrementally generate the output given the decoder input. Parameters ---------- data Shape (batch_size, C_in) states The previous states, contain a list of 1. layout = 'NT' - prev_multi_key Shape (batch_size, prev...
incremental_decode
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def __init__(self, src_vocab_size: int, tgt_vocab_size: int, max_src_length: Optional[int] = None, max_tgt_length: Optional[int] = None, scale_embed: bool = True, pos_embed_type="sinusoidal", shared_embed: bool = True,...
Parameters ---------- src_vocab_size The vocabulary size of the source language tgt_vocab_size The vocabulary size of the target language max_src_length The maximal length of the source sequence. If it's negative, we will use trea...
__init__
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def encode(self, src_data, src_valid_length): """Encode the source data to memory Parameters ---------- src_data - layout = 'NT' Shape (batch_size, src_length) - layout = 'TN' Shape (src_length, batch_size) src_valid_lengt...
Encode the source data to memory Parameters ---------- src_data - layout = 'NT' Shape (batch_size, src_length) - layout = 'TN' Shape (src_length, batch_size) src_valid_length Shape (batch_size,) Returns ...
encode
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def decode_seq(self, tgt_data, tgt_valid_length, mem_data, mem_valid_length): """Decode a sequence of inputs Parameters ---------- tgt_data - layout = 'NT' Shape (batch_size, tgt_length) - layout = 'TN' Shape (tgt_length, batch_siz...
Decode a sequence of inputs Parameters ---------- tgt_data - layout = 'NT' Shape (batch_size, tgt_length) - layout = 'TN' Shape (tgt_length, batch_size) tgt_valid_length Shape (batch_size,) mem_data ...
decode_seq
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def forward(self, src_data, src_valid_length, tgt_data, tgt_valid_length): """ Parameters ---------- src_data - layout = 'NT' Shape (batch_size, src_length) - layout = 'TN' Shape (src_length, batch_size) src_valid_length ...
Parameters ---------- src_data - layout = 'NT' Shape (batch_size, src_length) - layout = 'TN' Shape (src_length, batch_size) src_valid_length Shape (batch_size,) tgt_data - layout = 'NT' ...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def state_batch_axis(self) -> Tuple[int, int, int, List]: """Return a data structure that stores the batch axis of the internal states of the inference model. Returns ------- enc_out_batch_axis src_valid_length_batch_axis position_batch_axis dec_layer_ba...
Return a data structure that stores the batch axis of the internal states of the inference model. Returns ------- enc_out_batch_axis src_valid_length_batch_axis position_batch_axis dec_layer_batch_axis
state_batch_axis
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def init_states(self, src_data, src_valid_length): # TODO(sxjscience) Revisit here, support auxiliary states? """Initialize the states required for incremental decoding Parameters ---------- src_data - layout = 'NT' Shape (batch_size, src_length) ...
Initialize the states required for incremental decoding Parameters ---------- src_data - layout = 'NT' Shape (batch_size, src_length) - layout = 'TN' Shape (src_length, batch_size) src_valid_length Shape (batch_size,) ...
init_states
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def forward(self, step_data, states): """ Parameters ---------- step_data Shape (batch_size,) states It includes : - layout = 'NT' - mem_data : (batch_size, src_length, C_mem) - mem_valid_length : (b...
Parameters ---------- step_data Shape (batch_size,) states It includes : - layout = 'NT' - mem_data : (batch_size, src_length, C_mem) - mem_valid_length : (batch_size,) - position : (bat...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
Apache-2.0
def forward(self, data, mem, rel_positions, mask, query_r_bias, query_k_bias): """ Parameters ---------- data The input data. - layout = 'NT' Shape (batch_size, query_length, units) - layout = 'TN' Shape (query_length,...
Parameters ---------- data The input data. - layout = 'NT' Shape (batch_size, query_length, units) - layout = 'TN' Shape (query_length, batch_size, units) mem The memory. - layout = 'NT' ...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer_xl.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py
Apache-2.0
def forward(self, data, mem_l, rel_positions, mask): """ Parameters ---------- data - layout = 'NT' Shape (batch_size, query_length) - layout = 'TN' Shape (query_length, batch_size) mem_l Contains a list of mem...
Parameters ---------- data - layout = 'NT' Shape (batch_size, query_length) - layout = 'TN' Shape (query_length, batch_size) mem_l Contains a list of memory objects, each one will contain: - layout = 'NT'...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer_xl.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py
Apache-2.0
def init_states(self, batch_size, ctx): """Initialize the states Parameters ---------- batch_size ctx ctx of the initialized Returns ------- mems A list of memory states - layout = 'NT' Shape (B, T, C)...
Initialize the states Parameters ---------- batch_size ctx ctx of the initialized Returns ------- mems A list of memory states - layout = 'NT' Shape (B, T, C) - layout = 'TN' Shape ...
init_states
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer_xl.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py
Apache-2.0
def set_mem_length(self, mem_length: int): """ Parameters ---------- mem_length The memory length of the model """ self._cfg.defrost() self._cfg.MODEL.mem_length = mem_length self._cfg.freeze()
Parameters ---------- mem_length The memory length of the model
set_mem_length
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer_xl.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py
Apache-2.0
def forward(self, data, target, mem_l, rel_positions=None, data_mem_mask=None, causal_only=False, detach_memory=True): """ Parameters ---------- data The input data - layout = 'NT' Shape (B, T) - layout = 'TN' ...
Parameters ---------- data The input data - layout = 'NT' Shape (B, T) - layout = 'TN' Shape (T, B) target The ground truth - layout = 'NT' Shape (B, T) - layout =...
forward
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer_xl.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py
Apache-2.0
def step_forward(self, step_data, mem_l): """Forward for just one step Parameters ---------- step_data Shape (B,) mem_l A list of memory objects - layout = 'NT' Shape (B, T_mem, units) - layout = 'TN' ...
Forward for just one step Parameters ---------- step_data Shape (B,) mem_l A list of memory objects - layout = 'NT' Shape (B, T_mem, units) - layout = 'TN' Shape (T_mem, B, units) Returns -...
step_forward
python
dmlc/gluon-nlp
src/gluonnlp/models/transformer_xl.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py
Apache-2.0
def get_pretrained_xlmr(model_name: str = 'fairseq_xlmr_base', root: str = get_model_zoo_home_dir(), load_backbone: bool = True, load_mlm: bool = False) \ -> Tuple[CN, SentencepieceTokenizer, str, str]: """Get the pretrained XLM-R weigh...
Get the pretrained XLM-R weights Parameters ---------- model_name The name of the xlmr model. root The downloading root load_backbone Whether to load the weights of the backbone network load_mlm Whether to load the weights of MLM Returns ------- cfg ...
get_pretrained_xlmr
python
dmlc/gluon-nlp
src/gluonnlp/models/xlmr.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/xlmr.py
Apache-2.0
def gen_self_attn_mask(data, valid_length=None, attn_type: str = 'full', layout: str = 'NT'): """Generate the mask used for the encoder, i.e, self-attention. In our implementation, 1 --> not masked, 0 --> masked Let's consider the data wi...
Generate the mask used for the encoder, i.e, self-attention. In our implementation, 1 --> not masked, 0 --> masked Let's consider the data with two samples: data = [['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP' ], ['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', ...
gen_self_attn_mask
python
dmlc/gluon-nlp
src/gluonnlp/torch/attention_cell.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/attention_cell.py
Apache-2.0
def gen_mem_attn_mask(mem, mem_valid_length, data, data_valid_length=None, layout: str = 'NT'): """Generate the mask used for the decoder. All query slots are attended to the memory slots. In our implementation, 1 --> not masked, 0 --> masked Let's consider the data + mem with a batch...
Generate the mask used for the decoder. All query slots are attended to the memory slots. In our implementation, 1 --> not masked, 0 --> masked Let's consider the data + mem with a batch of two samples: mem = [['I', 'can', 'now', 'use'], ['May', 'the', 'force', '<PAD>']] mem_valid_lengt...
gen_mem_attn_mask
python
dmlc/gluon-nlp
src/gluonnlp/torch/attention_cell.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/attention_cell.py
Apache-2.0
def masked_softmax(att_score, mask, axis: int = -1): """Ignore the masked elements when calculating the softmax. The mask can be broadcastable. Parameters ---------- att_score : Symborl or NDArray Shape (..., length, ...) mask : Symbol or NDArray or None Shape (..., length, ......
Ignore the masked elements when calculating the softmax. The mask can be broadcastable. Parameters ---------- att_score : Symborl or NDArray Shape (..., length, ...) mask : Symbol or NDArray or None Shape (..., length, ...) 1 --> The element is not masked 0 --> The ...
masked_softmax
python
dmlc/gluon-nlp
src/gluonnlp/torch/attention_cell.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/attention_cell.py
Apache-2.0
def masked_logsoftmax(att_score, mask, axis: int = -1): """Ignore the masked elements when calculating the softmax. The mask can be broadcastable. Parameters ---------- att_score : Symborl or NDArray Shape (..., length, ...) mask : Symbol or NDArray or None Shape (..., length, ...) ...
Ignore the masked elements when calculating the softmax. The mask can be broadcastable. Parameters ---------- att_score : Symborl or NDArray Shape (..., length, ...) mask : Symbol or NDArray or None Shape (..., length, ...) mask = 1 --> not masked mask = 0 --> masked ...
masked_logsoftmax
python
dmlc/gluon-nlp
src/gluonnlp/torch/attention_cell.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/attention_cell.py
Apache-2.0
def multi_head_dot_attn(query, key, value, mask=None, edge_scores=None, dropout: float = 0.0, scaled: bool = True, normalized: bool = False, eps: float = 1E-6, layout: str = 'N...
Multihead dot product attention between the query, key, value. scaled is False, normalized is False: D(h_q, h_k) = <h_q, h_k> scaled is True, normalized is False: D(h_q, h_k) = <h_q, h_k> / sqrt(dim_q) scaled is False, normalized is True: D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||>...
multi_head_dot_attn
python
dmlc/gluon-nlp
src/gluonnlp/torch/attention_cell.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/attention_cell.py
Apache-2.0
def relative_position_bucket(relative_position, bidirectional: bool = True, num_buckets: int = 32, max_distance: int = 128): """Map the relative position to buckets. The major difference between our implementation and that in [mesh_tensorflow](https://github.com/tensorflow/mesh...
Map the relative position to buckets. The major difference between our implementation and that in [mesh_tensorflow](https://github.com/tensorflow/mesh/blob/c59988047e49b4d2af05603e3170724cdbadc467/mesh_tensorflow/transformer/transformer_layers.py#L595-L637) is that we use 'query_i - mem_j' as the (i, j)-th...
relative_position_bucket
python
dmlc/gluon-nlp
src/gluonnlp/torch/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py
Apache-2.0
def get_activation(act, inplace=False): """ Parameters ---------- act Name of the activation inplace Whether to perform inplace activation Returns ------- activation_layer The activation """ if act is None: return lambda x: x if isinstance(ac...
Parameters ---------- act Name of the activation inplace Whether to perform inplace activation Returns ------- activation_layer The activation
get_activation
python
dmlc/gluon-nlp
src/gluonnlp/torch/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py
Apache-2.0
def get_norm_layer(normalization: str = 'layer_norm', axis: int = -1, epsilon: float = 1e-5, in_channels: int = 0, **kwargs): """Get the normalization layer based on the provided type Parameters ---------- normalization The type of the layer normalization from ['layer_norm'] ...
Get the normalization layer based on the provided type Parameters ---------- normalization The type of the layer normalization from ['layer_norm'] axis The axis to normalize the epsilon The epsilon of the normalization layer in_channels Input channel Returns...
get_norm_layer
python
dmlc/gluon-nlp
src/gluonnlp/torch/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py
Apache-2.0
def __init__(self, units: int = 512, hidden_size: int = 2048, activation_dropout: float = 0.0, dropout: float = 0.1, gated_proj: bool = False, activation='relu', normalization: str = 'layer_norm', layer_norm_eps: float = 1E-5, pre_norm: bool = False): """ ...
Parameters ---------- units hidden_size activation_dropout dropout activation normalization layer_norm or no_norm layer_norm_eps pre_norm Pre-layer normalization as proposed in the paper: "[ACL2018] The ...
__init__
python
dmlc/gluon-nlp
src/gluonnlp/torch/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py
Apache-2.0
def forward(self, data): """ Parameters ---------- data : Shape (B, seq_length, C_in) Returns ------- out : Shape (B, seq_length, C_out) """ residual = data if self._pre_norm: data = self.layer_norm(dat...
Parameters ---------- data : Shape (B, seq_length, C_in) Returns ------- out : Shape (B, seq_length, C_out)
forward
python
dmlc/gluon-nlp
src/gluonnlp/torch/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py
Apache-2.0
def __init__(self, units: int, learnable=False): """Use a geometric sequence of timescales. It is calculated as [sin(wi x), cos(wi x), sin(wi x), cos(wi x), ...] By default, we initialize wi to be (1 / 10000) ^ (1 / (units//2 - 1)) Parameters ---------- units ...
Use a geometric sequence of timescales. It is calculated as [sin(wi x), cos(wi x), sin(wi x), cos(wi x), ...] By default, we initialize wi to be (1 / 10000) ^ (1 / (units//2 - 1)) Parameters ---------- units The number of units for positional embedding ...
__init__
python
dmlc/gluon-nlp
src/gluonnlp/torch/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py
Apache-2.0
def forward(self, positions): """ Parameters ---------- positions : th.Tensor Shape (..., ) Returns ------- ret : Shape (..., units) """ emb = positions.unsqueeze(-1) * self.freq sin_emb = th.sin(emb) cos_e...
Parameters ---------- positions : th.Tensor Shape (..., ) Returns ------- ret : Shape (..., units)
forward
python
dmlc/gluon-nlp
src/gluonnlp/torch/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py
Apache-2.0
def to_torch_dtype(dtype): """Convert the dtype to pytorch data type Parameters ---------- dtype The input dtype Returns ------- ret Converted dtype """ if isinstance(dtype, th.dtype) or dtype is None: return dtype dtype = np.dtype(dtype) if dtype in...
Convert the dtype to pytorch data type Parameters ---------- dtype The input dtype Returns ------- ret Converted dtype
to_torch_dtype
python
dmlc/gluon-nlp
src/gluonnlp/torch/utils.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py
Apache-2.0
def to_numpy_dtype(dtype): """Convert the dtype to numpy dtype Parameters ---------- dtype Input dtype Returns ------- ret The converted dtype """ if dtype is None: return None if dtype in torch_dtype_to_numpy_dict: return torch_dtype_to_numpy_di...
Convert the dtype to numpy dtype Parameters ---------- dtype Input dtype Returns ------- ret The converted dtype
to_numpy_dtype
python
dmlc/gluon-nlp
src/gluonnlp/torch/utils.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py
Apache-2.0
def share_parameters(source, target): """Share parameters recursively from source model to target model. For example, if you want ``dense1`` to share ``dense0``'s weights, you can do:: dense0 = nn.Linear(20) dense1 = nn.Linear(20) share_parameters(dense0, dense) which equals to ...
Share parameters recursively from source model to target model. For example, if you want ``dense1`` to share ``dense0``'s weights, you can do:: dense0 = nn.Linear(20) dense1 = nn.Linear(20) share_parameters(dense0, dense) which equals to dense1.weight = dense0.weight d...
share_parameters
python
dmlc/gluon-nlp
src/gluonnlp/torch/utils.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py
Apache-2.0
def _named_members(module, get_members_fn, prefix='', recurse=True): r"""Helper method for yielding various names + members of modules. Unlike upstream torch implementation, this implementation returns members that are known under multiple names, such as shared parameters. """ ...
Helper method for yielding various names + members of modules. Unlike upstream torch implementation, this implementation returns members that are known under multiple names, such as shared parameters.
_named_members
python
dmlc/gluon-nlp
src/gluonnlp/torch/utils.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py
Apache-2.0
def move_to(obj, device=None): """ Parameters ---------- obj Nested torch object device The target device Returns ------- new_obj The objects that have been moved to device. """ if th.is_tensor(obj): return obj.to(device) elif isinstance(obj,...
Parameters ---------- obj Nested torch object device The target device Returns ------- new_obj The objects that have been moved to device.
move_to
python
dmlc/gluon-nlp
src/gluonnlp/torch/utils.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py
Apache-2.0
def _pad_arrs_to_max_length(arrs, pad_val, dtype, batch_dim=0, round_to=None): """Inner Implementation of the Pad batchify Parameters ---------- arrs List of arrays pad_val The padding value dtype The type of the tensor batch_dim The dimension to insert the b...
Inner Implementation of the Pad batchify Parameters ---------- arrs List of arrays pad_val The padding value dtype The type of the tensor batch_dim The dimension to insert the batch dimension. This controls how we should construct the mini-batch. roun...
_pad_arrs_to_max_length
python
dmlc/gluon-nlp
src/gluonnlp/torch/data/batchify.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py
Apache-2.0
def __call__(self, data): """Batchify the input data. The input can be list of numpy.ndarray, list of numbers or list of th.Tensor. The arrays will be padded to the largest dimension at `axis` and then stacked to form the final output. Parameters ---------- data...
Batchify the input data. The input can be list of numpy.ndarray, list of numbers or list of th.Tensor. The arrays will be padded to the largest dimension at `axis` and then stacked to form the final output. Parameters ---------- data : List[np.ndarray] or List[List[dtyp...
__call__
python
dmlc/gluon-nlp
src/gluonnlp/torch/data/batchify.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py
Apache-2.0
def _stack_arrs(arrs, batch_dim, dtype): """ Parameters ---------- arrs batch_dim The batch dimension dtype torch dtype Returns ------- stacked_arr The resulting stacked array """ if isinstance(arrs[0], np.ndarray): stacked_arr = np.stack(ar...
Parameters ---------- arrs batch_dim The batch dimension dtype torch dtype Returns ------- stacked_arr The resulting stacked array
_stack_arrs
python
dmlc/gluon-nlp
src/gluonnlp/torch/data/batchify.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py
Apache-2.0
def __call__(self, data): """Batchify the input data. Parameters ---------- data : list The samples to batchfy. Each sample should contain N attributes. Returns ------- ret : tuple A tuple of length N. Contains the batchified result of ea...
Batchify the input data. Parameters ---------- data : list The samples to batchfy. Each sample should contain N attributes. Returns ------- ret : tuple A tuple of length N. Contains the batchified result of each attribute in the input.
__call__
python
dmlc/gluon-nlp
src/gluonnlp/torch/data/batchify.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py
Apache-2.0
def __call__(self, data: t_List[t_Dict]) -> t_Dict: """ Parameters ---------- data The samples to batchify. Each sample should be a dictionary Returns ------- ret The resulting dictionary that stores the merged samples. """ ...
Parameters ---------- data The samples to batchify. Each sample should be a dictionary Returns ------- ret The resulting dictionary that stores the merged samples.
__call__
python
dmlc/gluon-nlp
src/gluonnlp/torch/data/batchify.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py
Apache-2.0
def __call__(self, data: t_List[t_NamedTuple]) -> t_NamedTuple: """Batchify the input data. Parameters ---------- data The samples to batchfy. Each sample should be a namedtuple. Returns ------- ret A namedtuple of length N. Contains the ba...
Batchify the input data. Parameters ---------- data The samples to batchfy. Each sample should be a namedtuple. Returns ------- ret A namedtuple of length N. Contains the batchified result of each attribute in the input.
__call__
python
dmlc/gluon-nlp
src/gluonnlp/torch/data/batchify.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py
Apache-2.0
def forward(self, data, valid_length): """ Generate the representation given the inputs. This is used in training or fine-tuning a bert model. Parameters ---------- F data - layout = 'NT' Shape (batch_size, seq_length, C) ...
Generate the representation given the inputs. This is used in training or fine-tuning a bert model. Parameters ---------- F data - layout = 'NT' Shape (batch_size, seq_length, C) - layout = 'TN' Shape (seq_length,...
forward
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/bert.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
Apache-2.0
def forward(self, inputs, token_types, valid_length): # pylint: disable=arguments-differ """Generate the representation given the inputs. This is used in training or fine-tuning a bert model. Parameters ---------- inputs - layout = 'NT' Shape...
Generate the representation given the inputs. This is used in training or fine-tuning a bert model. Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length) - layout = 'TN' Shape (seq_length, batch_size) ...
forward
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/bert.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
Apache-2.0
def get_initial_embedding(self, inputs, token_types=None): """Get the initial token embeddings that considers the token type and positional embeddings Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length) - layout = 'TN' ...
Get the initial token embeddings that considers the token type and positional embeddings Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length) - layout = 'TN' Shape (seq_length, batch_size) token_types ...
get_initial_embedding
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/bert.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
Apache-2.0
def apply_pooling(self, sequence): """Generate the representation given the inputs. This is used for pre-training or fine-tuning a bert model. Get the first token of the whole sequence which is [CLS] sequence - layout = 'NT' Shape (batch_size, sequence_lengt...
Generate the representation given the inputs. This is used for pre-training or fine-tuning a bert model. Get the first token of the whole sequence which is [CLS] sequence - layout = 'NT' Shape (batch_size, sequence_length, units) - layout = 'TN' ...
apply_pooling
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/bert.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
Apache-2.0
def from_cfg(cls, cfg, use_pooler=True) -> 'BertModel': """ Parameters ---------- cfg Configuration use_pooler Whether to output the pooled feature Returns ------- ret The constructed BertModel """ cfg ...
Parameters ---------- cfg Configuration use_pooler Whether to output the pooled feature Returns ------- ret The constructed BertModel
from_cfg
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/bert.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
Apache-2.0
def forward(self, inputs, token_types, valid_length, masked_positions): """Getting the scores of the masked positions. Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length) - layout = 'TN' Shape (seq_length, b...
Getting the scores of the masked positions. Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length) - layout = 'TN' Shape (seq_length, batch_size) token_types If the inputs contain two sequences, we...
forward
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/bert.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
Apache-2.0
def __init__(self, backbone_cfg): """ Parameters ---------- backbone_cfg The cfg of the backbone model """ super().__init__() self.backbone_model = BertModel.from_cfg(backbone_cfg) # Construct nsp_classifier for next sentence prediction ...
Parameters ---------- backbone_cfg The cfg of the backbone model
__init__
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/bert.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
Apache-2.0
def forward(self, inputs, token_types, valid_length, masked_positions): """Generate the representation given the inputs. This is used in training or fine-tuning a bert model. Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length)...
Generate the representation given the inputs. This is used in training or fine-tuning a bert model. Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length) - layout = 'TN' Shape (seq_length, batch_size) ...
forward
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/bert.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
Apache-2.0
def __init__(self, backbone_cfg): """ Parameters ---------- backbone_cfg The cfg of the backbone model """ super().__init__() self.backbone_model = BertModel.from_cfg(backbone_cfg) self.quickthought = th.nn.Sequential( th.nn.Linea...
Parameters ---------- backbone_cfg The cfg of the backbone model
__init__
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/bert.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
Apache-2.0
def forward(self, inputs, token_types, valid_length, masked_positions): """Generate the representation given the inputs. This is used in training or fine-tuning a bert model. Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length)...
Generate the representation given the inputs. This is used in training or fine-tuning a bert model. Parameters ---------- inputs - layout = 'NT' Shape (batch_size, seq_length) - layout = 'TN' Shape (seq_length, batch_size) ...
forward
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/bert.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
Apache-2.0
def __init__(self, units: int = 512, hidden_size: int = 2048, num_heads: int = 8, attention_dropout_prob: float = 0.1, hidden_dropout_prob: float = 0.1, activation_dropout_prob: float = 0.0, layer_norm_eps: float = 1e-12, pre_norm: bool = False, use_qkv_bias: bool = Tr...
Parameters ---------- units hidden_size num_heads attention_dropout_prob hidden_dropout_prob activation_dropout_prob layer_norm_eps pre_norm Whether to attach the normalization layer before attention layer If pre_no...
__init__
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
Apache-2.0
def forward(self, data, attn_mask): """ Parameters ---------- data : If layout == 'NT' Shape (batch_size, seq_length, C_in) Else Shape (seq_length, batch_size, C_in) attn_mask : Shape (batch_size, seq_length, seq...
Parameters ---------- data : If layout == 'NT' Shape (batch_size, seq_length, C_in) Else Shape (seq_length, batch_size, C_in) attn_mask : Shape (batch_size, seq_length, seq_length) Returns ------- ...
forward
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
Apache-2.0
def __init__(self, units: int = 512, mem_units: Optional[int] = None, hidden_size: int = 2048, num_heads: int = 8, activation_dropout: float = 0.0, dropout: float = 0.1, attention_dropout: float = 0.1, layer_norm_eps: float = 1E-5, activation: str = 'relu', gated_proj:...
Parameters ---------- units mem_units The number of units in the memory. By default, it is initialized to be the same as the units. hidden_size num_heads activation_dropout dropout attention_dropout layer_norm_eps ...
__init__
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
Apache-2.0
def forward(self, data, mem, self_causal_mask, mem_attn_mask): """ Parameters ---------- data : - layout = 'NT' Shape (batch_size, seq_length, C_in) - layout = 'TN' Shape (seq_length, batch_size, C_in) mem : - la...
Parameters ---------- data : - layout = 'NT' Shape (batch_size, seq_length, C_in) - layout = 'TN' Shape (seq_length, batch_size, C_in) mem : - layout = 'NT' Shape (batch_size, mem_length, C_mem) ...
forward
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
Apache-2.0
def init_states(self, batch_size, device=None, dtype='float32'): """Initialize the states required for incremental decoding Parameters ---------- batch_size device dtype Returns ------- init_key - layout = 'NT' Shape (...
Initialize the states required for incremental decoding Parameters ---------- batch_size device dtype Returns ------- init_key - layout = 'NT' Shape (batch_size, 0, N, C_key) - layout = 'TN' Shape (...
init_states
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
Apache-2.0
def incremental_decode(self, data, states, mem, mem_valid_length, mem_attn_mask=None): """Incrementally generate the output given the decoder input. Parameters ---------- data Shape (batch_size, C_in) states The previous states, contains 1. la...
Incrementally generate the output given the decoder input. Parameters ---------- data Shape (batch_size, C_in) states The previous states, contains 1. layout = 'NT': - prev_multi_key Shape (batch_size, prev_seq_leng...
incremental_decode
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
Apache-2.0
def forward(self, data, valid_length, mem_data, mem_valid_length): """Run forward Parameters ---------- data - layout = 'NT' Shape (batch_size, seq_length, C_in) - layout = 'TN' Shape (seq_length, batch_size, C_in) valid_le...
Run forward Parameters ---------- data - layout = 'NT' Shape (batch_size, seq_length, C_in) - layout = 'TN' Shape (seq_length, batch_size, C_in) valid_length Shape (batch_size,) mem_data - layout = '...
forward
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
Apache-2.0
def init_states(self, batch_size, device=None, dtype='float32'): """Initialize the states required for incremental decoding Parameters ---------- batch_size The batch size device The device dtype The data type of the states Re...
Initialize the states required for incremental decoding Parameters ---------- batch_size The batch size device The device dtype The data type of the states Returns ------- states A list of states, each incl...
init_states
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
Apache-2.0
def incremental_decode(self, data, states, mem, mem_valid_length): """Incrementally generate the output given the decoder input. Parameters ---------- data Shape (batch_size, C_in) states The previous states, contain a list of 1. layout = 'NT'...
Incrementally generate the output given the decoder input. Parameters ---------- data Shape (batch_size, C_in) states The previous states, contain a list of 1. layout = 'NT' - prev_multi_key Shape (batch_size, prev_...
incremental_decode
python
dmlc/gluon-nlp
src/gluonnlp/torch/models/transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
Apache-2.0
def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for...
Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss.
step
python
dmlc/gluon-nlp
src/gluonnlp/torch/optimizers/fused_lans.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/optimizers/fused_lans.py
Apache-2.0
def get_warmup_linear_const_decay_poly_schedule(optimizer, total_steps, warmup_ratio=0.002, const_ratio=0., degree=1.0, last_epoch=-1): """Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup ...
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer and a constant period. Args: optimizer (:class:`~torch.optim.Optimizer`): ...
get_warmup_linear_const_decay_poly_schedule
python
dmlc/gluon-nlp
src/gluonnlp/torch/optimizers/schedules.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/optimizers/schedules.py
Apache-2.0
def clone_merge(self, cfg_filename_or_other_cfg): """Create a new cfg by cloning and merging with the given cfg Parameters ---------- cfg_filename_or_other_cfg Returns ------- """ ret = self.clone() if isinstance(cfg_filename_or_other_cfg, str):...
Create a new cfg by cloning and merging with the given cfg Parameters ---------- cfg_filename_or_other_cfg Returns -------
clone_merge
python
dmlc/gluon-nlp
src/gluonnlp/utils/config.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/config.py
Apache-2.0
def glob(url, separator=','): """Return a list of paths matching a pathname pattern. The pattern may contain simple shell-style wildcards. Input may also include multiple patterns, separated by separator. Parameters ---------- url : str The name of the files separator : str, defaul...
Return a list of paths matching a pathname pattern. The pattern may contain simple shell-style wildcards. Input may also include multiple patterns, separated by separator. Parameters ---------- url : str The name of the files separator : str, default is ',' The separator in url...
glob
python
dmlc/gluon-nlp
src/gluonnlp/utils/misc.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
Apache-2.0
def file_line_number(path: str) -> int: """ Parameters ---------- path The path to calculate the number of lines in a file. Returns ------- ret The number of lines """ ret = 0 with open(path, 'rb') as f: for _ in f: ret += 1 return re...
Parameters ---------- path The path to calculate the number of lines in a file. Returns ------- ret The number of lines
file_line_number
python
dmlc/gluon-nlp
src/gluonnlp/utils/misc.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
Apache-2.0
def md5sum(filename): """Calculate the md5sum of a file Parameters ---------- filename Name of the file Returns ------- ret The md5sum """ with open(filename, mode='rb') as f: d = hashlib.md5() for buf in iter(functools.partial(f.read, 1024*100), b''...
Calculate the md5sum of a file Parameters ---------- filename Name of the file Returns ------- ret The md5sum
md5sum
python
dmlc/gluon-nlp
src/gluonnlp/utils/misc.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
Apache-2.0
def sha1sum(filename): """Calculate the sha1sum of a file Parameters ---------- filename Name of the file Returns ------- ret The sha1sum """ with open(filename, mode='rb') as f: d = hashlib.sha1() for buf in iter(functools.partial(f.read, 1024*100),...
Calculate the sha1sum of a file Parameters ---------- filename Name of the file Returns ------- ret The sha1sum
sha1sum
python
dmlc/gluon-nlp
src/gluonnlp/utils/misc.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
Apache-2.0
def logging_config(folder: Optional[str] = None, name: Optional[str] = None, logger: logging.Logger = logging.root, level: int = logging.INFO, console_level: int = logging.INFO, console: bool = True, overwr...
Config the logging module. It will set the logger to save to the specified file path. Parameters ---------- folder The folder to save the log name Name of the saved logger The logger level Logging level console_level Logging level of the console log ...
logging_config
python
dmlc/gluon-nlp
src/gluonnlp/utils/misc.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
Apache-2.0