code
stringlengths
17
6.64M
class NormAdd(nn.Module): 'aka PreNorm' def __init__(self, input_dim: int, dropout: float): super(NormAdd, self).__init__() self.dropout = nn.Dropout(dropout) self.ln = nn.LayerNorm(input_dim) def forward(self, X: Tensor, sublayer: nn.Module) -> Tensor: return (X + self.dropout(sublayer(self.ln(X))))
class AddNorm(nn.Module): 'aka PosNorm' def __init__(self, input_dim: int, dropout: float): super(AddNorm, self).__init__() self.dropout = nn.Dropout(dropout) self.ln = nn.LayerNorm(input_dim) def forward(self, X: Tensor, sublayer: nn.Module) -> Tensor: return self.ln((X + self.dropout(sublayer(X))))
class MultiHeadedAttention(nn.Module): def __init__(self, input_dim: int, n_heads: int, use_bias: bool, dropout: float, query_dim: Optional[int]=None, use_linear_attention: bool=False, use_flash_attention: bool=False): super(MultiHeadedAttention, self).__init__() assert ((input_dim % n_heads) == 0), "'input_dim' must be divisible by 'n_heads'" self.use_linear_attention = use_linear_attention self.use_flash_attention = use_flash_attention self.head_dim = (input_dim // n_heads) self.n_heads = n_heads self.dropout_p = dropout self.dropout = nn.Dropout(dropout) query_dim = (query_dim if (query_dim is not None) else input_dim) self.q_proj = nn.Linear(query_dim, input_dim, bias=use_bias) self.kv_proj = nn.Linear(input_dim, (input_dim * 2), bias=use_bias) self.out_proj = (nn.Linear(input_dim, query_dim, bias=use_bias) if (n_heads > 1) else None) def forward(self, X_Q: Tensor, X_KV: Optional[Tensor]=None) -> Tensor: q = self.q_proj(X_Q) X_KV = (X_KV if (X_KV is not None) else X_Q) (k, v) = self.kv_proj(X_KV).chunk(2, dim=(- 1)) (q, k, v) = map((lambda t: einops.rearrange(t, 'b m (h d) -> b h m d', h=self.n_heads)), (q, k, v)) if self.use_flash_attention: attn_output = F.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=(self.dropout_p if self.training else 0), is_causal=False) self.attn_weights: Optional[Tensor] = None elif self.use_linear_attention: attn_output = self._linear_attention(q, k, v) self.attn_weights = None else: (self.attn_weights, attn_output) = self._standard_attention(q, k, v) output = einops.rearrange(attn_output, 'b h s d -> b s (h d)', h=self.n_heads) if (self.out_proj is not None): output = self.out_proj(output) return output def _standard_attention(self, q: Tensor, k: Tensor, v: Tensor) -> Tuple[(Tensor, Tensor)]: "'Standard' multihead attention implemenation from [Attention Is All You\n Need](https://arxiv.org/abs/1706.03762)\n " scores = (einsum('b h s d, b h l d -> b h s l', q, k) / math.sqrt(self.head_dim)) attn_weights = scores.softmax(dim=(- 1)) attn_output = einsum('b h s l, b h l d -> b h s d', self.dropout(attn_weights), v) return (attn_weights, attn_output) @staticmethod def _linear_attention(q: Tensor, k: Tensor, v: Tensor) -> Tensor: 'Liner attention implemenation from [Transformers are RNNs: Fast\n Autoregressive Transformers with Linear Attention]\n (https://arxiv.org/abs/2006.16236)\n ' (q, k) = ((nn.functional.elu(q) + 1), (nn.functional.elu(k) + 1)) scores = einsum('b h s e, b h l d -> b h e d', k, v) z = (1 / (torch.einsum('b h m d, b h d -> b h m', q, k.sum(dim=2)) + 1e-06)) attn_output = torch.einsum('b h m d, b h e d, b h m -> b h m d', q, scores, z) return attn_output
class LinearAttentionLinformer(nn.Module): 'Linear Attention implementation from [Linformer: Self-Attention with\n Linear Complexity](https://arxiv.org/abs/2006.04768)\n ' def __init__(self, input_dim: int, n_feats: int, n_heads: int, use_bias: bool, dropout: float, kv_compression_factor: float, kv_sharing: bool): super(LinearAttentionLinformer, self).__init__() assert ((input_dim % n_heads) == 0), "'input_dim' must be divisible by 'n_heads'" self.n_feats = n_feats self.head_dim = (input_dim // n_heads) self.n_heads = n_heads self.kv_compression_factor = kv_compression_factor self.share_kv = kv_sharing dim_k = int((self.kv_compression_factor * self.n_feats)) self.dropout = nn.Dropout(dropout) self.qkv_proj = nn.Linear(input_dim, (input_dim * 3), bias=use_bias) self.E = nn.init.xavier_uniform_(nn.Parameter(torch.zeros(n_feats, dim_k))) if (not kv_sharing): self.F = nn.init.xavier_uniform_(nn.Parameter(torch.zeros(n_feats, dim_k))) else: self.F = self.E self.out_proj = (nn.Linear(input_dim, input_dim, bias=use_bias) if (n_heads > 1) else None) def forward(self, X: Tensor) -> Tensor: (q, k, v) = self.qkv_proj(X).chunk(3, dim=(- 1)) q = einops.rearrange(q, 'b s (h d) -> b h s d', h=self.n_heads) k = einsum('b s i, s k -> b k i', k, self.E) v = einsum('b s i, s k -> b k i', v, self.F) k = einops.rearrange(k, 'b k (h d) -> b h k d', d=self.head_dim) v = einops.rearrange(v, 'b k (h d) -> b h k d', d=self.head_dim) scores = (einsum('b h s d, b h k d -> b h s k', q, k) / math.sqrt(self.head_dim)) attn_weights = scores.softmax(dim=(- 1)) self.attn_weights = attn_weights output = einsum('b h s k, b h k d -> b h s d', self.dropout(attn_weights), v) output = einops.rearrange(output, 'b h s d -> b s (h d)') if (self.out_proj is not None): output = self.out_proj(output) return output
class AdditiveAttention(nn.Module): 'Additive Attention Implementation from [FastFormer]\n (https://arxiv.org/abs/2108.09084)\n ' def __init__(self, input_dim: int, n_heads: int, use_bias: bool, dropout: float, share_qv_weights: bool): super(AdditiveAttention, self).__init__() assert ((input_dim % n_heads) == 0), "'input_dim' must be divisible by 'n_heads'" self.head_dim = (input_dim // n_heads) self.n_heads = n_heads self.share_qv_weights = share_qv_weights self.dropout = nn.Dropout(dropout) if share_qv_weights: self.qv_proj = nn.Linear(input_dim, input_dim, bias=use_bias) else: self.q_proj = nn.Linear(input_dim, input_dim, bias=use_bias) self.v_proj = nn.Linear(input_dim, input_dim, bias=use_bias) self.k_proj = nn.Linear(input_dim, input_dim, bias=use_bias) self.W_q = nn.Linear(input_dim, n_heads) self.W_k = nn.Linear(input_dim, n_heads) self.r_out = nn.Linear(input_dim, input_dim) def forward(self, X: Tensor) -> Tensor: q = (self.qv_proj(X) if self.share_qv_weights else self.q_proj(X)) v = (self.qv_proj(X) if self.share_qv_weights else self.v_proj(X)) k = self.k_proj(X) alphas = (self.W_q(q) / math.sqrt(self.head_dim)).softmax(dim=1) q_r = einops.rearrange(q, 'b s (h d) -> b s h d', h=self.n_heads) global_query = einsum(' b s h, b s h d -> b h d', alphas, q_r) global_query = einops.rearrange(global_query, 'b h d -> b () (h d)') p = (k * global_query) betas = (self.W_k(p) / math.sqrt(self.head_dim)).softmax(dim=1) p_r = einops.rearrange(p, 'b s (h d) -> b s h d', h=self.n_heads) global_key = einsum(' b s h, b s h d -> b h d', betas, p_r) global_key = einops.rearrange(global_key, 'b h d -> b () (h d)') u = (v * global_key) self.attn_weights = (einops.rearrange(alphas, 'b s h -> b h s'), einops.rearrange(betas, 'b s h -> b h s')) output = (q + self.dropout(self.r_out(u))) return output
class TransformerEncoder(nn.Module): def __init__(self, input_dim: int, n_heads: int, use_bias: bool, attn_dropout: float, ff_dropout: float, ff_factor: int, activation: str, use_linear_attention: bool, use_flash_attention: bool): super(TransformerEncoder, self).__init__() self.attn = MultiHeadedAttention(input_dim, n_heads, use_bias, attn_dropout, None, use_linear_attention, use_flash_attention) self.ff = FeedForward(input_dim, ff_dropout, ff_factor, activation) self.attn_addnorm = AddNorm(input_dim, attn_dropout) self.ff_addnorm = AddNorm(input_dim, ff_dropout) def forward(self, X: Tensor) -> Tensor: x = self.attn_addnorm(X, self.attn) return self.ff_addnorm(x, self.ff)
class SaintEncoder(nn.Module): def __init__(self, input_dim: int, n_heads: int, use_bias: bool, attn_dropout: float, ff_dropout: float, ff_factor: int, activation: str, n_feat: int): super(SaintEncoder, self).__init__() self.n_feat = n_feat self.col_attn = MultiHeadedAttention(input_dim, n_heads, use_bias, attn_dropout) self.col_attn_ff = FeedForward(input_dim, ff_dropout, ff_factor, activation) self.col_attn_addnorm = AddNorm(input_dim, attn_dropout) self.col_attn_ff_addnorm = AddNorm(input_dim, ff_dropout) self.row_attn = MultiHeadedAttention((n_feat * input_dim), n_heads, use_bias, attn_dropout) self.row_attn_ff = FeedForward((n_feat * input_dim), ff_dropout, ff_factor, activation) self.row_attn_addnorm = AddNorm((n_feat * input_dim), attn_dropout) self.row_attn_ff_addnorm = AddNorm((n_feat * input_dim), ff_dropout) def forward(self, X: Tensor) -> Tensor: x = self.col_attn_addnorm(X, self.col_attn) x = self.col_attn_ff_addnorm(x, self.col_attn_ff) x = einops.rearrange(x, 'b n d -> 1 b (n d)') x = self.row_attn_addnorm(x, self.row_attn) x = self.row_attn_ff_addnorm(x, self.row_attn_ff) x = einops.rearrange(x, '1 b (n d) -> b n d', n=self.n_feat) return x
class FTTransformerEncoder(nn.Module): def __init__(self, input_dim: int, n_feats: int, n_heads: int, use_bias: bool, attn_dropout: float, ff_dropout: float, ff_factor: float, kv_compression_factor: float, kv_sharing: bool, activation: str, first_block: bool): super(FTTransformerEncoder, self).__init__() self.first_block = first_block self.attn = LinearAttentionLinformer(input_dim, n_feats, n_heads, use_bias, attn_dropout, kv_compression_factor, kv_sharing) self.ff = FeedForward(input_dim, ff_dropout, ff_factor, activation) self.attn_normadd = NormAdd(input_dim, attn_dropout) self.ff_normadd = NormAdd(input_dim, ff_dropout) def forward(self, X: Tensor) -> Tensor: if self.first_block: x = (X + self.attn(X)) else: x = self.attn_normadd(X, self.attn) return self.ff_normadd(x, self.ff)
class PerceiverEncoder(nn.Module): def __init__(self, input_dim: int, n_heads: int, use_bias: bool, attn_dropout: float, ff_dropout: float, ff_factor: int, activation: str, query_dim: Optional[int]=None): super(PerceiverEncoder, self).__init__() self.attn = MultiHeadedAttention(input_dim, n_heads, use_bias, attn_dropout, query_dim) attn_dim_out = (query_dim if (query_dim is not None) else input_dim) self.ff = FeedForward(attn_dim_out, ff_dropout, ff_factor, activation) self.ln_q = nn.LayerNorm(attn_dim_out) self.ln_kv = nn.LayerNorm(input_dim) self.norm_attn_dropout = nn.Dropout(attn_dropout) self.ff_norm = nn.LayerNorm(attn_dim_out) self.norm_ff_dropout = nn.Dropout(ff_dropout) def forward(self, X_Q: Tensor, X_KV: Optional[Tensor]=None) -> Tensor: x = self.ln_q(X_Q) y = (None if (X_KV is None) else self.ln_kv(X_KV)) x = (x + self.norm_attn_dropout(self.attn(x, y))) return (x + self.norm_ff_dropout(self.ff(self.ff_norm(x))))
class FastFormerEncoder(nn.Module): def __init__(self, input_dim: int, n_heads: int, use_bias: bool, attn_dropout: float, ff_dropout: float, ff_factor: int, share_qv_weights: bool, activation: str): super(FastFormerEncoder, self).__init__() self.attn = AdditiveAttention(input_dim, n_heads, use_bias, attn_dropout, share_qv_weights) self.ff = FeedForward(input_dim, ff_dropout, ff_factor, activation) self.attn_addnorm = AddNorm(input_dim, attn_dropout) self.ff_addnorm = AddNorm(input_dim, ff_dropout) def forward(self, X: Tensor) -> Tensor: x = self.attn_addnorm(X, self.attn) return self.ff_addnorm(x, self.ff)
class TabPerceiver(BaseTabularModelWithAttention): 'Defines an adaptation of a [Perceiver](https://arxiv.org/abs/2103.03206)\n that can be used as the `deeptabular` component of a Wide & Deep model\n or independently by itself.\n\n :information_source: **NOTE**: while there are scientific publications for\n the `TabTransformer`, `SAINT` and `FTTransformer`, the `TabPerceiver`\n and the `TabFastFormer` are our own adaptations of the\n [Perceiver](https://arxiv.org/abs/2103.03206) and the\n [FastFormer](https://arxiv.org/abs/2108.09084) for tabular data.\n\n Parameters\n ----------\n column_idx: Dict\n Dict containing the index of the columns that will be passed through\n the model. Required to slice the tensors. e.g.\n _{\'education\': 0, \'relationship\': 1, \'workclass\': 2, ...}_\n cat_embed_input: List, Optional, default = None\n List of Tuples with the column name and number of unique values for\n each categorical component e.g. _[(education, 11), ...]_\n cat_embed_dropout: float, default = 0.1\n Categorical embeddings dropout\n use_cat_bias: bool, default = False,\n Boolean indicating if bias will be used for the categorical embeddings\n cat_embed_activation: Optional, str, default = None,\n Activation function for the categorical embeddings, if any. _\'tanh\'_,\n _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported.\n full_embed_dropout: bool, default = False\n Boolean indicating if an entire embedding (i.e. the representation of\n one column) will be dropped in the batch. See:\n `pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`.\n If `full_embed_dropout = True`, `cat_embed_dropout` is ignored.\n shared_embed: bool, default = False\n The idea behind `shared_embed` is described in the Appendix A in the\n [TabTransformer paper](https://arxiv.org/abs/2012.06678): the\n goal of having column embedding is to enable the model to distinguish\n the classes in one column from those in the other columns. In other\n words, the idea is to let the model learn which column is embedded\n at the time.\n add_shared_embed: bool, default = False,\n The two embedding sharing strategies are: 1) add the shared embeddings\n to the column embeddings or 2) to replace the first\n `frac_shared_embed` with the shared embeddings.\n See `pytorch_widedeep.models.transformers._layers.SharedEmbeddings`\n frac_shared_embed: float, default = 0.25\n The fraction of embeddings that will be shared (if `add_shared_embed\n = False`) by all the different categories for one particular\n column.\n continuous_cols: List, Optional, default = None\n List with the name of the numeric (aka continuous) columns\n cont_norm_layer: str, default = "batchnorm"\n Type of normalization layer applied to the continuous features. Options\n are: _\'layernorm\'_, _\'batchnorm\'_ or None.\n cont_embed_dropout: float, default = 0.1,\n Continuous embeddings dropout\n use_cont_bias: bool, default = True,\n Boolean indicating if bias will be used for the continuous embeddings\n cont_embed_activation: str, default = None\n Activation function to be applied to the continuous embeddings, if\n any. _\'tanh\'_, _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported.\n input_dim: int, default = 32\n The so-called *dimension of the model*. Is the number of embeddings\n used to encode the categorical and/or continuous columns.\n n_cross_attns: int, default = 1\n Number of times each perceiver block will cross attend to the input\n data (i.e. number of cross attention components per perceiver block).\n This should normally be 1. However, in the paper they describe some\n architectures (normally computer vision-related problems) where the\n Perceiver attends multiple times to the input array. Therefore, maybe\n multiple cross attention to the input array is also useful in some\n cases for tabular data :shrug: .\n n_cross_attn_heads: int, default = 4\n Number of attention heads for the cross attention component\n n_latents: int, default = 16\n Number of latents. This is the $N$ parameter in the paper. As\n indicated in the paper, this number should be significantly lower\n than $M$ (the number of columns in the dataset). Setting $N$ closer\n to $M$ defies the main purpose of the Perceiver, which is to overcome\n the transformer quadratic bottleneck\n latent_dim: int, default = 128\n Latent dimension.\n n_latent_heads: int, default = 4\n Number of attention heads per Latent Transformer\n n_latent_blocks: int, default = 4\n Number of transformer encoder blocks (normalised MHA + normalised FF)\n per Latent Transformer\n n_perceiver_blocks: int, default = 4\n Number of Perceiver blocks defined as [Cross Attention + Latent\n Transformer]\n share_weights: Boolean, default = False\n Boolean indicating if the weights will be shared between Perceiver\n blocks\n attn_dropout: float, default = 0.2\n Dropout that will be applied to the Multi-Head Attention layers\n ff_dropout: float, default = 0.1\n Dropout that will be applied to the FeedForward network\n ff_factor: float, default = 4\n Multiplicative factor applied to the first layer of the FF network in\n each Transformer block, This is normally set to 4.\n transformer_activation: str, default = "gelu"\n Transformer Encoder activation function. _\'tanh\'_, _\'relu\'_,\n _\'leaky_relu\'_, _\'gelu\'_, _\'geglu\'_ and _\'reglu\'_ are supported\n mlp_hidden_dims: List, Optional, default = None\n MLP hidden dimensions. If not provided it will default to $[l, 4\n \\times l, 2 \\times l]$ where $l$ is the MLP\'s input dimension\n mlp_activation: str, default = "relu"\n MLP activation function. _\'tanh\'_, _\'relu\'_, _\'leaky_relu\'_ and\n _\'gelu\'_ are supported\n mlp_dropout: float, default = 0.1\n Dropout that will be applied to the final MLP\n mlp_batchnorm: bool, default = False\n Boolean indicating whether or not to apply batch normalization to the\n dense layers\n mlp_batchnorm_last: bool, default = False\n Boolean indicating whether or not to apply batch normalization to the\n last of the dense layers\n mlp_linear_first: bool, default = False\n Boolean indicating whether the order of the operations in the dense\n layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->\n LIN -> ACT]`\n\n Attributes\n ----------\n cat_and_cont_embed: nn.Module\n This is the module that processes the categorical and continuous columns\n encoder: nn.ModuleDict\n ModuleDict with the Perceiver blocks\n latents: nn.Parameter\n Latents that will be used for prediction\n mlp: nn.Module\n MLP component in the model\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import TabPerceiver\n >>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)\n >>> colnames = [\'a\', \'b\', \'c\', \'d\', \'e\']\n >>> cat_embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)]\n >>> continuous_cols = [\'e\']\n >>> column_idx = {k:v for v,k in enumerate(colnames)}\n >>> model = TabPerceiver(column_idx=column_idx, cat_embed_input=cat_embed_input,\n ... continuous_cols=continuous_cols, n_latents=2, latent_dim=16,\n ... n_perceiver_blocks=2)\n >>> out = model(X_tab)\n ' def __init__(self, column_idx: Dict[(str, int)], cat_embed_input: Optional[List[Tuple[(str, int)]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, full_embed_dropout: bool=False, shared_embed: bool=False, add_shared_embed: bool=False, frac_shared_embed: float=0.25, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str=None, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, input_dim: int=32, n_cross_attns: int=1, n_cross_attn_heads: int=4, n_latents: int=16, latent_dim: int=128, n_latent_heads: int=4, n_latent_blocks: int=4, n_perceiver_blocks: int=4, share_weights: bool=False, attn_dropout: float=0.1, ff_dropout: float=0.1, ff_factor: int=4, transformer_activation: str='geglu', mlp_hidden_dims: Optional[List[int]]=None, mlp_activation: str='relu', mlp_dropout: float=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=True): super(TabPerceiver, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, full_embed_dropout=full_embed_dropout, shared_embed=shared_embed, add_shared_embed=add_shared_embed, frac_shared_embed=frac_shared_embed, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=True, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation, input_dim=input_dim) self.n_cross_attns = n_cross_attns self.n_cross_attn_heads = n_cross_attn_heads self.n_latents = n_latents self.latent_dim = latent_dim self.n_latent_heads = n_latent_heads self.n_latent_blocks = n_latent_blocks self.n_perceiver_blocks = n_perceiver_blocks self.share_weights = share_weights self.attn_dropout = attn_dropout self.ff_dropout = ff_dropout self.ff_factor = ff_factor self.transformer_activation = transformer_activation self.mlp_hidden_dims = mlp_hidden_dims self.mlp_activation = mlp_activation self.mlp_dropout = mlp_dropout self.mlp_batchnorm = mlp_batchnorm self.mlp_batchnorm_last = mlp_batchnorm_last self.mlp_linear_first = mlp_linear_first self.latents = nn.init.trunc_normal_(nn.Parameter(torch.empty(n_latents, latent_dim))) self.encoder = nn.ModuleDict() first_perceiver_block = self._build_perceiver_block() self.encoder['perceiver_block0'] = first_perceiver_block if share_weights: for n in range(1, n_perceiver_blocks): self.encoder[('perceiver_block' + str(n))] = first_perceiver_block else: for n in range(1, n_perceiver_blocks): self.encoder[('perceiver_block' + str(n))] = self._build_perceiver_block() self.mlp_first_hidden_dim = self.latent_dim if (mlp_hidden_dims is not None): self.mlp = MLP(([self.mlp_first_hidden_dim] + mlp_hidden_dims), mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first) else: self.mlp = None def forward(self, X: Tensor) -> Tensor: x_emb = self._get_embeddings(X) x = einops.repeat(self.latents, 'n d -> b n d', b=X.shape[0]) for n in range(self.n_perceiver_blocks): cross_attns = self.encoder[('perceiver_block' + str(n))]['cross_attns'] latent_transformer = self.encoder[('perceiver_block' + str(n))]['latent_transformer'] for cross_attn in cross_attns: x = cross_attn(x, x_emb) x = latent_transformer(x) x = x.mean(dim=1) if (self.mlp is not None): x = self.mlp(x) return x @property def output_dim(self) -> int: 'The output dimension of the model. This is a required property\n neccesary to build the `WideDeep` class\n ' return (self.mlp_hidden_dims[(- 1)] if (self.mlp_hidden_dims is not None) else self.mlp_first_hidden_dim) @property def attention_weights(self) -> List: 'List with the attention weights. If the weights are not shared\n between perceiver blocks each element of the list will be a list\n itself containing the Cross Attention and Latent Transformer\n attention weights respectively\n\n The shape of the attention weights is:\n\n - Cross Attention: $(N, C, L, F)$\n\n - Latent Attention: $(N, T, L, L)$\n\n WHere $N$ is the batch size, $C$ is the number of Cross Attention\n heads, $L$ is the number of Latents, $F$ is the number of\n features/columns in the dataset and $T$ is the number of Latent\n Attention heads\n ' if self.share_weights: cross_attns = self.encoder['perceiver_block0']['cross_attns'] latent_transformer = self.encoder['perceiver_block0']['latent_transformer'] attention_weights = self._extract_attn_weights(cross_attns, latent_transformer) else: attention_weights = [] for n in range(self.n_perceiver_blocks): cross_attns = self.encoder[('perceiver_block' + str(n))]['cross_attns'] latent_transformer = self.encoder[('perceiver_block' + str(n))]['latent_transformer'] attention_weights.append(self._extract_attn_weights(cross_attns, latent_transformer)) return attention_weights def _build_perceiver_block(self) -> nn.ModuleDict: perceiver_block = nn.ModuleDict() cross_attns = nn.ModuleList() for _ in range(self.n_cross_attns): cross_attns.append(PerceiverEncoder(self.input_dim, self.n_cross_attn_heads, False, self.attn_dropout, self.ff_dropout, self.ff_factor, self.transformer_activation, self.latent_dim)) perceiver_block['cross_attns'] = cross_attns latent_transformer = nn.Sequential() for i in range(self.n_latent_blocks): latent_transformer.add_module(('latent_block' + str(i)), PerceiverEncoder(self.latent_dim, self.n_latent_heads, False, self.attn_dropout, self.ff_dropout, self.ff_factor, self.transformer_activation)) perceiver_block['latent_transformer'] = latent_transformer return perceiver_block @staticmethod def _extract_attn_weights(cross_attns, latent_transformer) -> List: attention_weights = [] for cross_attn in cross_attns: attention_weights.append(cross_attn.attn.attn_weights) for latent_block in latent_transformer: attention_weights.append(latent_block.attn.attn_weights) return attention_weights
class ContextAttentionEncoder(nn.Module): def __init__(self, rnn: nn.Module, input_dim: int, attn_dropout: float, attn_concatenate: bool, with_addnorm: bool, sum_along_seq: bool): super(ContextAttentionEncoder, self).__init__() self.rnn = rnn self.bidirectional = self.rnn.bidirectional self.attn_concatenate = attn_concatenate self.with_addnorm = with_addnorm if with_addnorm: self.attn_addnorm = AddNorm(input_dim, attn_dropout) self.attn = ContextAttention(input_dim, attn_dropout, sum_along_seq) def forward(self, X: Tensor, h: Tensor, c: Tensor) -> Tuple[(Tensor, Tensor, Tensor)]: if isinstance(self.rnn, nn.LSTM): (o, (h, c)) = self.rnn(X, (h, c)) elif isinstance(self.rnn, nn.GRU): (o, h) = self.rnn(X, h) attn_inp = self._process_rnn_outputs(o, h) if self.with_addnorm: out = self.attn_addnorm(attn_inp, self.attn) else: out = self.attn(attn_inp) return (out, c, h) def _process_rnn_outputs(self, output: Tensor, hidden: Tensor) -> Tensor: if self.attn_concatenate: if self.bidirectional: bi_hidden = torch.cat((hidden[(- 2)], hidden[(- 1)]), dim=1) attn_inp = torch.cat([output, bi_hidden.unsqueeze(1).expand_as(output)], dim=2) else: attn_inp = torch.cat([output, hidden[(- 1)].unsqueeze(1).expand_as(output)], dim=2) else: attn_inp = output return attn_inp
class AttentiveRNN(BasicRNN): 'Text classifier/regressor comprised by a stack of RNNs\n (LSTMs or GRUs) plus an attention layer. This model can be used as the\n `deeptext` component of a Wide & Deep model or independently by\n itself.\n\n In addition, there is the option to add a Fully Connected (FC) set of dense\n layers on top of attention layer\n\n Parameters\n ----------\n vocab_size: int\n Number of words in the vocabulary\n embed_dim: int, Optional, default = None\n Dimension of the word embeddings if non-pretained word vectors are\n used\n embed_matrix: np.ndarray, Optional, default = None\n Pretrained word embeddings\n embed_trainable: bool, default = True\n Boolean indicating if the pretrained embeddings are trainable\n rnn_type: str, default = \'lstm\'\n String indicating the type of RNN to use. One of _\'lstm\'_ or _\'gru\'_\n hidden_dim: int, default = 64\n Hidden dim of the RNN\n n_layers: int, default = 3\n Number of recurrent layers\n rnn_dropout: float, default = 0.1\n Dropout for each RNN layer except the last layer\n bidirectional: bool, default = True\n Boolean indicating whether the staked RNNs are bidirectional\n use_hidden_state: str, default = True\n Boolean indicating whether to use the final hidden state or the RNN\'s\n output as predicting features. Typically the former is used.\n padding_idx: int, default = 1\n index of the padding token in the padded-tokenised sequences. The\n `TextPreprocessor` class within this library uses fastai\'s\n tokenizer where the token index 0 is reserved for the _\'unknown\'_\n word token. Therefore, the default value is set to 1.\n attn_concatenate: bool, default = True\n Boolean indicating if the input to the attention mechanism will be the\n output of the RNN or the output of the RNN concatenated with the last\n hidden state.\n attn_dropout: float, default = 0.1\n Internal dropout for the attention mechanism\n head_hidden_dims: List, Optional, default = None\n List with the sizes of the dense layers in the head e.g: _[128, 64]_\n head_activation: str, default = "relu"\n Activation function for the dense layers in the head. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported\n head_dropout: float, Optional, default = None\n Dropout of the dense layers in the head\n head_batchnorm: bool, default = False\n Boolean indicating whether or not to include batch normalization in\n the dense layers that form the _\'rnn_mlp\'_\n head_batchnorm_last: bool, default = False\n Boolean indicating whether or not to apply batch normalization to the\n last of the dense layers in the head\n head_linear_first: bool, default = False\n Boolean indicating whether the order of the operations in the dense\n layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->\n LIN -> ACT]`\n\n Attributes\n ----------\n word_embed: nn.Module\n word embedding matrix\n rnn: nn.Module\n Stack of RNNs\n rnn_mlp: nn.Module\n Stack of dense layers on top of the RNN. This will only exists if\n `head_layers_dim` is not `None`\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import AttentiveRNN\n >>> X_text = torch.cat((torch.zeros([5,1]), torch.empty(5, 4).random_(1,4)), axis=1)\n >>> model = AttentiveRNN(vocab_size=4, hidden_dim=4, n_layers=2, padding_idx=0, embed_dim=4)\n >>> out = model(X_text)\n ' def __init__(self, vocab_size: int, embed_dim: Optional[int]=None, embed_matrix: Optional[np.ndarray]=None, embed_trainable: bool=True, rnn_type: str='lstm', hidden_dim: int=64, n_layers: int=3, rnn_dropout: float=0.1, bidirectional: bool=False, use_hidden_state: bool=True, padding_idx: int=1, attn_concatenate: bool=True, attn_dropout: float=0.1, head_hidden_dims: Optional[List[int]]=None, head_activation: str='relu', head_dropout: Optional[float]=None, head_batchnorm: bool=False, head_batchnorm_last: bool=False, head_linear_first: bool=False): super(AttentiveRNN, self).__init__(vocab_size=vocab_size, embed_dim=embed_dim, embed_matrix=embed_matrix, embed_trainable=embed_trainable, rnn_type=rnn_type, hidden_dim=hidden_dim, n_layers=n_layers, rnn_dropout=rnn_dropout, bidirectional=bidirectional, use_hidden_state=use_hidden_state, padding_idx=padding_idx, head_hidden_dims=head_hidden_dims, head_activation=head_activation, head_dropout=head_dropout, head_batchnorm=head_batchnorm, head_batchnorm_last=head_batchnorm_last, head_linear_first=head_linear_first) self.attn_concatenate = attn_concatenate self.attn_dropout = attn_dropout if (bidirectional and attn_concatenate): self.rnn_output_dim = (hidden_dim * 4) elif (bidirectional or attn_concatenate): self.rnn_output_dim = (hidden_dim * 2) else: self.rnn_output_dim = hidden_dim self.attn = ContextAttention(self.rnn_output_dim, attn_dropout, sum_along_seq=True) if (self.head_hidden_dims is not None): head_hidden_dims = ([self.rnn_output_dim] + head_hidden_dims) self.rnn_mlp = MLP(head_hidden_dims, head_activation, head_dropout, head_batchnorm, head_batchnorm_last, head_linear_first) def _process_rnn_outputs(self, output: Tensor, hidden: Tensor) -> Tensor: if self.attn_concatenate: if self.bidirectional: bi_hidden = torch.cat((hidden[(- 2)], hidden[(- 1)]), dim=1) attn_inp = torch.cat([output, bi_hidden.unsqueeze(1).expand_as(output)], dim=2) else: attn_inp = torch.cat([output, hidden[(- 1)].unsqueeze(1).expand_as(output)], dim=2) else: attn_inp = output return self.attn(attn_inp) @property def attention_weights(self) -> List: 'List with the attention weights\n\n The shape of the attention weights is $(N, S)$, where $N$ is the batch\n size and $S$ is the length of the sequence\n ' return self.attn.attn_weights
class BasicRNN(BaseWDModelComponent): 'Standard text classifier/regressor comprised by a stack of RNNs\n (LSTMs or GRUs) that can be used as the `deeptext` component of a Wide &\n Deep model or independently by itself.\n\n In addition, there is the option to add a Fully Connected (FC) set of\n dense layers on top of the stack of RNNs\n\n Parameters\n ----------\n vocab_size: int\n Number of words in the vocabulary\n embed_dim: int, Optional, default = None\n Dimension of the word embeddings if non-pretained word vectors are\n used\n embed_matrix: np.ndarray, Optional, default = None\n Pretrained word embeddings\n embed_trainable: bool, default = True\n Boolean indicating if the pretrained embeddings are trainable\n rnn_type: str, default = \'lstm\'\n String indicating the type of RNN to use. One of _\'lstm\'_ or _\'gru\'_\n hidden_dim: int, default = 64\n Hidden dim of the RNN\n n_layers: int, default = 3\n Number of recurrent layers\n rnn_dropout: float, default = 0.1\n Dropout for each RNN layer except the last layer\n bidirectional: bool, default = True\n Boolean indicating whether the staked RNNs are bidirectional\n use_hidden_state: str, default = True\n Boolean indicating whether to use the final hidden state or the RNN\'s\n output as predicting features. Typically the former is used.\n padding_idx: int, default = 1\n index of the padding token in the padded-tokenised sequences. The\n `TextPreprocessor` class within this library uses fastai\'s tokenizer\n where the token index 0 is reserved for the _\'unknown\'_ word token.\n Therefore, the default value is set to 1.\n head_hidden_dims: List, Optional, default = None\n List with the sizes of the dense layers in the head e.g: _[128, 64]_\n head_activation: str, default = "relu"\n Activation function for the dense layers in the head. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported\n head_dropout: float, Optional, default = None\n Dropout of the dense layers in the head\n head_batchnorm: bool, default = False\n Boolean indicating whether or not to include batch normalization in\n the dense layers that form the _\'rnn_mlp\'_\n head_batchnorm_last: bool, default = False\n Boolean indicating whether or not to apply batch normalization to the\n last of the dense layers in the head\n head_linear_first: bool, default = False\n Boolean indicating whether the order of the operations in the dense\n layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->\n LIN -> ACT]`\n\n Attributes\n ----------\n word_embed: nn.Module\n word embedding matrix\n rnn: nn.Module\n Stack of RNNs\n rnn_mlp: nn.Module\n Stack of dense layers on top of the RNN. This will only exists if\n `head_layers_dim` is not None\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import BasicRNN\n >>> X_text = torch.cat((torch.zeros([5,1]), torch.empty(5, 4).random_(1,4)), axis=1)\n >>> model = BasicRNN(vocab_size=4, hidden_dim=4, n_layers=2, padding_idx=0, embed_dim=4)\n >>> out = model(X_text)\n ' def __init__(self, vocab_size: int, embed_dim: Optional[int]=None, embed_matrix: Optional[np.ndarray]=None, embed_trainable: bool=True, rnn_type: str='lstm', hidden_dim: int=64, n_layers: int=3, rnn_dropout: float=0.1, bidirectional: bool=False, use_hidden_state: bool=True, padding_idx: int=1, head_hidden_dims: Optional[List[int]]=None, head_activation: str='relu', head_dropout: Optional[float]=None, head_batchnorm: bool=False, head_batchnorm_last: bool=False, head_linear_first: bool=False): super(BasicRNN, self).__init__() if ((embed_dim is None) and (embed_matrix is None)): raise ValueError("If no 'embed_matrix' is passed, the embedding dimension mustbe specified with 'embed_dim'") if (rnn_type.lower() not in ['lstm', 'gru']): raise ValueError(f"'rnn_type' must be 'lstm' or 'gru', got {rnn_type} instead") if ((embed_dim is not None) and (embed_matrix is not None) and (not (embed_dim == embed_matrix.shape[1]))): warnings.warn('the input embedding dimension {} and the dimension of the pretrained embeddings {} do not match. The pretrained embeddings dimension ({}) will be used'.format(embed_dim, embed_matrix.shape[1], embed_matrix.shape[1]), UserWarning) self.vocab_size = vocab_size self.embed_trainable = embed_trainable self.embed_dim = embed_dim self.rnn_type = rnn_type self.hidden_dim = hidden_dim self.n_layers = n_layers self.rnn_dropout = rnn_dropout self.bidirectional = bidirectional self.use_hidden_state = use_hidden_state self.padding_idx = padding_idx self.head_hidden_dims = head_hidden_dims self.head_activation = head_activation self.head_dropout = head_dropout self.head_batchnorm = head_batchnorm self.head_batchnorm_last = head_batchnorm_last self.head_linear_first = head_linear_first if (embed_matrix is not None): (self.word_embed, self.embed_dim) = self._set_embeddings(embed_matrix) else: self.word_embed = nn.Embedding(self.vocab_size, self.embed_dim, padding_idx=self.padding_idx) rnn_params = {'input_size': self.embed_dim, 'hidden_size': hidden_dim, 'num_layers': n_layers, 'bidirectional': bidirectional, 'dropout': rnn_dropout, 'batch_first': True} if (self.rnn_type.lower() == 'lstm'): self.rnn: Union[(nn.LSTM, nn.GRU)] = nn.LSTM(**rnn_params) elif (self.rnn_type.lower() == 'gru'): self.rnn = nn.GRU(**rnn_params) self.rnn_output_dim = ((hidden_dim * 2) if bidirectional else hidden_dim) if (self.head_hidden_dims is not None): head_hidden_dims = ([self.rnn_output_dim] + head_hidden_dims) self.rnn_mlp: Union[(MLP, nn.Identity)] = MLP(head_hidden_dims, head_activation, head_dropout, head_batchnorm, head_batchnorm_last, head_linear_first) else: self.rnn_mlp = nn.Identity() def forward(self, X: Tensor) -> Tensor: embed = self.word_embed(X.long()) if (self.rnn_type.lower() == 'lstm'): (o, (h, c)) = self.rnn(embed) elif (self.rnn_type.lower() == 'gru'): (o, h) = self.rnn(embed) processed_outputs = self._process_rnn_outputs(o, h) return self.rnn_mlp(processed_outputs) @property def output_dim(self) -> int: 'The output dimension of the model. This is a required property\n neccesary to build the `WideDeep` class\n ' return (self.head_hidden_dims[(- 1)] if (self.head_hidden_dims is not None) else self.rnn_output_dim) def _set_embeddings(self, embed_matrix: Optional[np.ndarray]=None) -> Tuple[(nn.Module, int)]: if (embed_matrix is not None): assert (embed_matrix.dtype == 'float32'), "'embed_matrix' must be of dtype 'float32', got dtype '{}'".format(str(embed_matrix.dtype)) word_embed = nn.Embedding(self.vocab_size, embed_matrix.shape[1], padding_idx=self.padding_idx) if self.embed_trainable: word_embed.weight = nn.Parameter(torch.tensor(embed_matrix), requires_grad=True) else: word_embed.weight = nn.Parameter(torch.tensor(embed_matrix), requires_grad=False) embed_dim = embed_matrix.shape[1] else: word_embed = nn.Embedding(self.vocab_size, self.embed_dim, padding_idx=self.padding_idx) embed_dim = self.embed_dim return (word_embed, embed_dim) def _process_rnn_outputs(self, output: Tensor, hidden: Tensor) -> Tensor: output = output.permute(1, 0, 2) if self.bidirectional: processed_outputs = (torch.cat((hidden[(- 2)], hidden[(- 1)]), dim=1) if self.use_hidden_state else output[(- 1)]) else: processed_outputs = (hidden[(- 1)] if self.use_hidden_state else output[(- 1)]) return processed_outputs
class StackedAttentiveRNN(BaseWDModelComponent): 'Text classifier/regressor comprised by a stack of blocks:\n `[RNN + Attention]`. This can be used as the `deeptext` component of a\n Wide & Deep model or independently by itself.\n\n In addition, there is the option to add a Fully Connected (FC) set of\n dense layers on top of the attentiob blocks\n\n Parameters\n ----------\n vocab_size: int\n Number of words in the vocabulary\n embed_dim: int, Optional, default = None\n Dimension of the word embeddings if non-pretained word vectors are\n used\n embed_matrix: np.ndarray, Optional, default = None\n Pretrained word embeddings\n embed_trainable: bool, default = True\n Boolean indicating if the pretrained embeddings are trainable\n rnn_type: str, default = \'lstm\'\n String indicating the type of RNN to use. One of \'lstm\' or \'gru\'\n hidden_dim: int, default = 64\n Hidden dim of the RNN\n bidirectional: bool, default = True\n Boolean indicating whether the staked RNNs are bidirectional\n padding_idx: int, default = 1\n index of the padding token in the padded-tokenised sequences. The\n `TextPreprocessor` class within this library uses fastai\'s\n tokenizer where the token index 0 is reserved for the _\'unknown\'_\n word token. Therefore, the default value is set to 1.\n n_blocks: int, default = 3\n Number of attention blocks. Each block is comprised by an RNN and a\n Context Attention Encoder\n attn_concatenate: bool, default = True\n Boolean indicating if the input to the attention mechanism will be the\n output of the RNN or the output of the RNN concatenated with the last\n hidden state or simply\n attn_dropout: float, default = 0.1\n Internal dropout for the attention mechanism\n with_addnorm: bool, default = False\n Boolean indicating if the output of each block will be added to the\n input and normalised\n head_hidden_dims: List, Optional, default = None\n List with the sizes of the dense layers in the head e.g: [128, 64]\n head_activation: str, default = "relu"\n Activation function for the dense layers in the head. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported\n head_dropout: float, Optional, default = None\n Dropout of the dense layers in the head\n head_batchnorm: bool, default = False\n Boolean indicating whether or not to include batch normalization in\n the dense layers that form the _\'rnn_mlp\'_\n head_batchnorm_last: bool, default = False\n Boolean indicating whether or not to apply batch normalization to the\n last of the dense layers in the head\n head_linear_first: bool, default = False\n Boolean indicating whether the order of the operations in the dense\n layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->\n LIN -> ACT]`\n\n Attributes\n ----------\n word_embed: nn.Module\n word embedding matrix\n rnn: nn.Module\n Stack of RNNs\n rnn_mlp: nn.Module\n Stack of dense layers on top of the RNN. This will only exists if\n `head_layers_dim` is not `None`\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import StackedAttentiveRNN\n >>> X_text = torch.cat((torch.zeros([5,1]), torch.empty(5, 4).random_(1,4)), axis=1)\n >>> model = StackedAttentiveRNN(vocab_size=4, hidden_dim=4, padding_idx=0, embed_dim=4)\n >>> out = model(X_text)\n ' def __init__(self, vocab_size: int, embed_dim: Optional[int]=None, embed_matrix: Optional[np.ndarray]=None, embed_trainable: bool=True, rnn_type: str='lstm', hidden_dim: int=64, bidirectional: bool=False, padding_idx: int=1, n_blocks: int=3, attn_concatenate: bool=False, attn_dropout: float=0.1, with_addnorm: bool=False, head_hidden_dims: Optional[List[int]]=None, head_activation: str='relu', head_dropout: Optional[float]=None, head_batchnorm: bool=False, head_batchnorm_last: bool=False, head_linear_first: bool=False): super(StackedAttentiveRNN, self).__init__() if ((embed_dim is not None) and (embed_matrix is not None) and (not (embed_dim == embed_matrix.shape[1]))): warnings.warn('the input embedding dimension {} and the dimension of the pretrained embeddings {} do not match. The pretrained embeddings dimension ({}) will be used'.format(embed_dim, embed_matrix.shape[1], embed_matrix.shape[1]), UserWarning) if (rnn_type.lower() not in ['lstm', 'gru']): raise ValueError(f"'rnn_type' must be 'lstm' or 'gru', got {rnn_type} instead") self.vocab_size = vocab_size self.embed_trainable = embed_trainable self.embed_dim = embed_dim self.rnn_type = rnn_type self.hidden_dim = hidden_dim self.bidirectional = bidirectional self.padding_idx = padding_idx self.n_blocks = n_blocks self.attn_concatenate = attn_concatenate self.attn_dropout = attn_dropout self.with_addnorm = with_addnorm self.head_hidden_dims = head_hidden_dims self.head_activation = head_activation self.head_dropout = head_dropout self.head_batchnorm = head_batchnorm self.head_batchnorm_last = head_batchnorm_last self.head_linear_first = head_linear_first (self.word_embed, self.embed_dim) = self._set_embeddings(embed_matrix) if (bidirectional and attn_concatenate): self.rnn_output_dim = (hidden_dim * 4) elif (bidirectional or attn_concatenate): self.rnn_output_dim = (hidden_dim * 2) else: self.rnn_output_dim = hidden_dim if (self.rnn_output_dim != self.embed_dim): self.embed_proj: Union[(nn.Linear, nn.Identity)] = nn.Linear(self.embed_dim, self.rnn_output_dim) else: self.embed_proj = nn.Identity() rnn_params = {'input_size': self.rnn_output_dim, 'hidden_size': hidden_dim, 'bidirectional': bidirectional, 'batch_first': True} if (self.rnn_type.lower() == 'lstm'): self.rnn: Union[(nn.LSTM, nn.GRU)] = nn.LSTM(**rnn_params) elif (self.rnn_type.lower() == 'gru'): self.rnn = nn.GRU(**rnn_params) self.attention_blks = nn.ModuleList() for i in range(n_blocks): self.attention_blks.append(ContextAttentionEncoder(self.rnn, self.rnn_output_dim, attn_dropout, attn_concatenate, with_addnorm=(with_addnorm if (i != (n_blocks - 1)) else False), sum_along_seq=(i == (n_blocks - 1)))) if (self.head_hidden_dims is not None): head_hidden_dims = ([self.rnn_output_dim] + head_hidden_dims) self.rnn_mlp: Union[(MLP, nn.Identity)] = MLP(head_hidden_dims, head_activation, head_dropout, head_batchnorm, head_batchnorm_last, head_linear_first) else: self.rnn_mlp = nn.Identity() def forward(self, X: Tensor) -> Tensor: x = self.embed_proj(self.word_embed(X.long())) h = nn.init.zeros_(torch.Tensor((2 if self.bidirectional else 1), X.shape[0], self.hidden_dim)).to(x.device) if (self.rnn_type == 'lstm'): c = nn.init.zeros_(torch.Tensor((2 if self.bidirectional else 1), X.shape[0], self.hidden_dim)).to(x.device) else: c = None for blk in self.attention_blks: (x, h, c) = blk(x, h, c) return self.rnn_mlp(x) @property def output_dim(self) -> int: 'The output dimension of the model. This is a required property\n neccesary to build the `WideDeep` class\n ' return (self.head_hidden_dims[(- 1)] if (self.head_hidden_dims is not None) else self.rnn_output_dim) @property def attention_weights(self) -> List: 'List with the attention weights per block\n\n The shape of the attention weights is $(N, S)$ Where $N$ is the batch\n size and $S$ is the length of the sequence\n ' return [blk.attn.attn_weights for blk in self.attention_blks] def _set_embeddings(self, embed_matrix: Union[(Any, np.ndarray)]) -> Tuple[(nn.Module, int)]: if isinstance(embed_matrix, np.ndarray): assert (embed_matrix.dtype == 'float32'), "'embed_matrix' must be of dtype 'float32', got dtype '{}'".format(str(embed_matrix.dtype)) word_embed = nn.Embedding(self.vocab_size, embed_matrix.shape[1], padding_idx=self.padding_idx) if self.embed_trainable: word_embed.weight = nn.Parameter(torch.tensor(embed_matrix), requires_grad=True) else: word_embed.weight = nn.Parameter(torch.tensor(embed_matrix), requires_grad=False) embed_dim = embed_matrix.shape[1] else: word_embed = nn.Embedding(self.vocab_size, self.embed_dim, padding_idx=self.padding_idx) embed_dim = self.embed_dim return (word_embed, embed_dim)
class WideDeep(nn.Module): 'Main collector class that combines all `wide`, `deeptabular`\n `deeptext` and `deepimage` models.\n\n Note that all models described so far in this library must be passed to\n the `WideDeep` class once constructed. This is because the models output\n the last layer before the prediction layer. Such prediction layer is\n added by the `WideDeep` class as it collects the components for every\n data mode.\n\n There are two options to combine these models that correspond to the\n two main architectures that `pytorch-widedeep` can build.\n\n - Directly connecting the output of the model components to an ouput neuron(s).\n\n - Adding a `Fully-Connected Head` (FC-Head) on top of the deep models.\n This FC-Head will combine the output form the `deeptabular`, `deeptext` and\n `deepimage` and will be then connected to the output neuron(s).\n\n Parameters\n ----------\n wide: nn.Module, Optional, default = None\n `Wide` model. This is a linear model where the non-linearities are\n captured via crossed-columns.\n deeptabular: BaseWDModelComponent, Optional, default = None\n Currently this library implements a number of possible architectures\n for the `deeptabular` component. See the documenation of the\n package.\n deeptext: BaseWDModelComponent, Optional, default = None\n Currently this library implements a number of possible architectures\n for the `deeptext` component. See the documenation of the\n package.\n deepimage: BaseWDModelComponent, Optional, default = None\n Currently this library uses `torchvision` and implements a number of\n possible architectures for the `deepimage` component. See the\n documenation of the package.\n deephead: BaseWDModelComponent, Optional, default = None\n Alternatively, the user can pass a custom model that will receive the\n output of the deep component. If `deephead` is not None all the\n previous fc-head parameters will be ignored\n head_hidden_dims: List, Optional, default = None\n List with the sizes of the dense layers in the head e.g: [128, 64]\n head_activation: str, default = "relu"\n Activation function for the dense layers in the head. Currently\n `\'tanh\'`, `\'relu\'`, `\'leaky_relu\'` and `\'gelu\'` are supported\n head_dropout: float, Optional, default = None\n Dropout of the dense layers in the head\n head_batchnorm: bool, default = False\n Boolean indicating whether or not to include batch normalization in\n the dense layers that form the `\'rnn_mlp\'`\n head_batchnorm_last: bool, default = False\n Boolean indicating whether or not to apply batch normalization to the\n last of the dense layers in the head\n head_linear_first: bool, default = False\n Boolean indicating whether the order of the operations in the dense\n layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->\n LIN -> ACT]`\n enforce_positive: bool, default = False\n Boolean indicating if the output from the final layer must be\n positive. This is important if you are using loss functions with\n non-negative input restrictions, e.g. RMSLE, or if you know your\n predictions are bounded in between 0 and inf\n enforce_positive_activation: str, default = "softplus"\n Activation function to enforce that the final layer has a positive\n output. `\'softplus\'` or `\'relu\'` are supported.\n pred_dim: int, default = 1\n Size of the final wide and deep output layer containing the\n predictions. `1` for regression and binary classification or number\n of classes for multiclass classification.\n with_fds: bool, default = False\n Boolean indicating if Feature Distribution Smoothing (FDS) will be\n applied before the final prediction layer. Only available for\n regression problems.\n See [Delving into Deep Imbalanced Regression](https://arxiv.org/abs/2102.09554) for details.\n\n Other Parameters\n ----------------\n **fds_config: dict, default = None\n Dictionary with the parameters to be used when using Feature\n Distribution Smoothing. Please, see the docs for the `FDSLayer`.\n <br/>\n :information_source: **NOTE**: Feature Distribution Smoothing\n is available when using **ONLY** a `deeptabular` component\n <br/>\n :information_source: **NOTE**: We consider this feature absolutely\n experimental and we recommend the user to not use it unless the\n corresponding [publication](https://arxiv.org/abs/2102.09554) is\n well understood\n\n\n Examples\n --------\n\n >>> from pytorch_widedeep.models import TabResnet, Vision, BasicRNN, Wide, WideDeep\n >>> embed_input = [(u, i, j) for u, i, j in zip(["a", "b", "c"][:4], [4] * 3, [8] * 3)]\n >>> column_idx = {k: v for v, k in enumerate(["a", "b", "c"])}\n >>> wide = Wide(10, 1)\n >>> deeptabular = TabResnet(blocks_dims=[8, 4], column_idx=column_idx, cat_embed_input=embed_input)\n >>> deeptext = BasicRNN(vocab_size=10, embed_dim=4, padding_idx=0)\n >>> deepimage = Vision()\n >>> model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage)\n\n\n :information_source: **NOTE**: It is possible to use custom components to\n build Wide & Deep models. Simply, build them and pass them as the\n corresponding parameters. Note that the custom models MUST return a last\n layer of activations(i.e. not the final prediction) so that these\n activations are collected by `WideDeep` and combined accordingly. In\n addition, the models MUST also contain an attribute `output_dim` with\n the size of these last layers of activations. See for example\n `pytorch_widedeep.models.tab_mlp.TabMlp`\n ' @Alias('pred_dim', ['num_class', 'pred_size']) def __init__(self, wide: Optional[nn.Module]=None, deeptabular: Optional[BaseWDModelComponent]=None, deeptext: Optional[BaseWDModelComponent]=None, deepimage: Optional[BaseWDModelComponent]=None, deephead: Optional[BaseWDModelComponent]=None, head_hidden_dims: Optional[List[int]]=None, head_activation: str='relu', head_dropout: float=0.1, head_batchnorm: bool=False, head_batchnorm_last: bool=False, head_linear_first: bool=True, enforce_positive: bool=False, enforce_positive_activation: str='softplus', pred_dim: int=1, with_fds: bool=False, **fds_config): super(WideDeep, self).__init__() self._check_inputs(wide, deeptabular, deeptext, deepimage, deephead, head_hidden_dims, pred_dim, with_fds) self.wd_device: str = None self.pred_dim = pred_dim self.with_fds = with_fds self.enforce_positive = enforce_positive self.with_deephead = ((deephead is not None) or (head_hidden_dims is not None)) if ((deephead is None) and (head_hidden_dims is not None)): self.deephead = self._build_deephead(deeptabular, deeptext, deepimage, head_hidden_dims, head_activation, head_dropout, head_batchnorm, head_batchnorm_last, head_linear_first) elif (deephead is not None): self.deephead = nn.Sequential(deephead, nn.Linear(deephead.output_dim, self.pred_dim)) else: self.deephead = None self.wide = wide (self.deeptabular, self.deeptext, self.deepimage) = self._set_model_components(deeptabular, deeptext, deepimage, self.with_deephead) if self.with_fds: self.fds_layer = FDSLayer(feature_dim=self.deeptabular.output_dim, **fds_config) if self.enforce_positive: self.enf_pos = get_activation_fn(enforce_positive_activation) def forward(self, X: Dict[(str, Tensor)], y: Optional[Tensor]=None, epoch: Optional[int]=None) -> Union[(Tensor, Tuple[(Tensor, Tensor)])]: if self.with_fds: return self._forward_deep_with_fds(X, y, epoch) wide_out = self._forward_wide(X) if self.with_deephead: deep = self._forward_deephead(X, wide_out) else: deep = self._forward_deep(X, wide_out) if self.enforce_positive: return self.enf_pos(deep) else: return deep def _build_deephead(self, deeptabular: Optional[BaseWDModelComponent], deeptext: Optional[BaseWDModelComponent], deepimage: Optional[BaseWDModelComponent], head_hidden_dims: Optional[List[int]], head_activation: str, head_dropout: float, head_batchnorm: bool, head_batchnorm_last: bool, head_linear_first: bool) -> nn.Sequential: deep_dim = 0 if (deeptabular is not None): deep_dim += deeptabular.output_dim if (deeptext is not None): deep_dim += deeptext.output_dim if (deepimage is not None): deep_dim += deepimage.output_dim head_hidden_dims = ([deep_dim] + head_hidden_dims) deephead = nn.Sequential(MLP(head_hidden_dims, head_activation, head_dropout, head_batchnorm, head_batchnorm_last, head_linear_first), nn.Linear(head_hidden_dims[(- 1)], self.pred_dim)) return deephead def _set_model_components(self, deeptabular: Optional[BaseWDModelComponent], deeptext: Optional[BaseWDModelComponent], deepimage: Optional[BaseWDModelComponent], with_deephead: bool) -> Tuple[(Optional[WDModel], Optional[WDModel], Optional[WDModel])]: if (deeptabular is not None): self.is_tabnet = (deeptabular.__class__.__name__ == 'TabNet') else: self.is_tabnet = False if (deeptabular is not None): if (not self.with_fds): if self.is_tabnet: deeptabular_ = (nn.Sequential(deeptabular, TabNetPredLayer(deeptabular.output_dim, self.pred_dim)) if (not with_deephead) else deeptabular) else: deeptabular_ = (nn.Sequential(deeptabular, nn.Linear(deeptabular.output_dim, self.pred_dim)) if (not with_deephead) else deeptabular) else: deeptabular_ = deeptabular else: deeptabular_ = None if (deeptext is not None): deeptext_ = (nn.Sequential(deeptext, nn.Linear(deeptext.output_dim, self.pred_dim)) if (not with_deephead) else deeptext) else: deeptext_ = None if (deepimage is not None): deepimage_ = (nn.Sequential(deepimage, nn.Linear(deepimage.output_dim, self.pred_dim)) if (not with_deephead) else deepimage) else: deepimage_ = None return (deeptabular_, deeptext_, deepimage_) def _forward_wide(self, X: Dict[(str, Tensor)]) -> Tensor: if (self.wide is not None): out = self.wide(X['wide']) else: batch_size = X[list(X.keys())[0]].size(0) out = torch.zeros(batch_size, self.pred_dim).to(self.wd_device) return out def _forward_deephead(self, X: Dict[(str, Tensor)], wide_out: Tensor) -> Union[(Tensor, Tuple[(Tensor, Tensor)])]: if (self.deeptabular is not None): if self.is_tabnet: tab_out = self.deeptabular(X['deeptabular']) (deepside, M_loss) = (tab_out[0], tab_out[1]) else: deepside = self.deeptabular(X['deeptabular']) else: deepside = torch.FloatTensor().to(self.wd_device) if (self.deeptext is not None): deepside = torch.cat([deepside, self.deeptext(X['deeptext'])], axis=1) if (self.deepimage is not None): deepside = torch.cat([deepside, self.deepimage(X['deepimage'])], axis=1) deepside_out = self.deephead(deepside) if self.is_tabnet: res: Union[(Tensor, Tuple[(Tensor, Tensor)])] = (wide_out.add_(deepside_out), M_loss) else: res = wide_out.add_(deepside_out) return res def _forward_deep(self, X: Dict[(str, Tensor)], wide_out: Tensor) -> Union[(Tensor, Tuple[(Tensor, Tensor)])]: if (self.deeptabular is not None): if self.is_tabnet: (tab_out, M_loss) = self.deeptabular(X['deeptabular']) wide_out.add_(tab_out) else: wide_out.add_(self.deeptabular(X['deeptabular'])) if (self.deeptext is not None): wide_out.add_(self.deeptext(X['deeptext'])) if (self.deepimage is not None): wide_out.add_(self.deepimage(X['deepimage'])) if self.is_tabnet: res: Union[(Tensor, Tuple[(Tensor, Tensor)])] = (wide_out, M_loss) else: res = wide_out return res def _forward_deep_with_fds(self, X: Dict[(str, Tensor)], y: Optional[Tensor]=None, epoch: Optional[int]=None) -> Union[(Tensor, Tuple[(Tensor, Tensor)])]: res = self.fds_layer(self.deeptabular(X['deeptabular']), y, epoch) if self.enforce_positive: if isinstance(res, Tuple): out: Union[(Tensor, Tuple[(Tensor, Tensor)])] = (res[0], self.enf_pos(res[1])) else: out = self.enf_pos(res) else: out = res return out @staticmethod def _check_inputs(wide, deeptabular, deeptext, deepimage, deephead, head_hidden_dims, pred_dim, with_fds): if (wide is not None): assert (wide.wide_linear.weight.size(1) == pred_dim), "the 'pred_dim' of the wide component ({}) must be equal to the 'pred_dim' of the deep component and the overall model itself ({})".format(wide.wide_linear.weight.size(1), pred_dim) if ((deeptabular is not None) and (not hasattr(deeptabular, 'output_dim'))): raise AttributeError("deeptabular model must have an 'output_dim' attribute or property. See pytorch-widedeep.models.deep_text.DeepText") if (deeptabular is not None): is_tabnet = (deeptabular.__class__.__name__ == 'TabNet') has_wide_text_or_image = ((wide is not None) or (deeptext is not None) or (deepimage is not None)) if (is_tabnet and has_wide_text_or_image): warnings.warn("'WideDeep' is a model comprised by multiple components and the 'deeptabular' component is 'TabNet'. We recommend using 'TabNet' in isolation. The reasons are: i)'TabNet' uses sparse regularization which partially losses its purpose when used in combination with other components. If you still want to use a multiple component model with 'TabNet', consider setting 'lambda_sparse' to 0 during training. ii) The feature importances will be computed only for TabNet but the model will comprise multiple components. Therefore, such importances will partially lose their 'meaning'.", UserWarning) if ((deeptext is not None) and (not hasattr(deeptext, 'output_dim'))): raise AttributeError("deeptext model must have an 'output_dim' attribute or property. See pytorch-widedeep.models.deep_text.DeepText") if ((deepimage is not None) and (not hasattr(deepimage, 'output_dim'))): raise AttributeError("deepimage model must have an 'output_dim' attribute or property. See pytorch-widedeep.models.deep_text.DeepText") if ((deephead is not None) and (head_hidden_dims is not None)): raise ValueError("both 'deephead' and 'head_hidden_dims' are not None. Use one of the other, but not both") if ((head_hidden_dims is not None) and (not deeptabular) and (not deeptext) and (not deepimage)): raise ValueError("if 'head_hidden_dims' is not None, at least one deep component must be used") if (deephead is not None): if (not hasattr(deephead, 'output_dim')): raise AttributeError("As any other custom model passed to 'WideDeep', 'deephead' must have an 'output_dim' attribute or property. ") deephead_inp_feat = next(deephead.parameters()).size(1) output_dim = 0 if (deeptabular is not None): output_dim += deeptabular.output_dim if (deeptext is not None): output_dim += deeptext.output_dim if (deepimage is not None): output_dim += deepimage.output_dim assert (deephead_inp_feat == output_dim), "if a custom 'deephead' is used its input features ({}) must be equal to the output features of the deep component ({})".format(deephead_inp_feat, output_dim) if (with_fds and (((wide is not None) or (deeptext is not None) or (deepimage is not None) or (deephead is not None)) or (pred_dim != 1))): raise ValueError('Feature Distribution Smoothing (FDS) is supported when using only a deeptabular component and for regression problems.')
class BasePreprocessor(): 'Base Class of All Preprocessors.' def __init__(self, *args): pass def fit(self, df: pd.DataFrame): raise NotImplementedError('Preprocessor must implement this method') def transform(self, df: pd.DataFrame): raise NotImplementedError('Preprocessor must implement this method') def fit_transform(self, df: pd.DataFrame): raise NotImplementedError('Preprocessor must implement this method')
def check_is_fitted(estimator: Union[(BasePreprocessor, Any)], attributes: List[str]=None, all_or_any: str='all', condition: bool=True): 'Checks if an estimator is fitted\n\n Parameters\n ----------\n estimator: ``BasePreprocessor``,\n An object of type ``BasePreprocessor``\n attributes: List, default = None\n List of strings with the attributes to check for\n all_or_any: str, default = "all"\n whether all or any of the attributes in the list must be present\n condition: bool, default = True,\n If not attribute list is passed, this condition that must be True for\n the estimator to be considered as fitted\n ' estimator_name: str = estimator.__class__.__name__ error_msg = "This {} instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.".format(estimator_name) if ((attributes is not None) and (all_or_any == 'all')): if (not all([hasattr(estimator, attr) for attr in attributes])): raise NotFittedError(error_msg) elif ((attributes is not None) and (all_or_any == 'any')): if (not any([hasattr(estimator, attr) for attr in attributes])): raise NotFittedError(error_msg) elif (not condition): raise NotFittedError(error_msg)
class ImagePreprocessor(BasePreprocessor): "Preprocessor to prepare the ``deepimage`` input dataset.\n\n The Preprocessing consists simply on resizing according to their\n aspect ratio\n\n Parameters\n ----------\n img_col: str\n name of the column with the images filenames\n img_path: str\n path to the dicrectory where the images are stored\n width: int, default=224\n width of the resulting processed image.\n height: int, default=224\n width of the resulting processed image.\n verbose: int, default 1\n Enable verbose output.\n\n Attributes\n ----------\n aap: AspectAwarePreprocessor\n an instance of `pytorch_widedeep.utils.image_utils.AspectAwarePreprocessor`\n spp: SimplePreprocessor\n an instance of `pytorch_widedeep.utils.image_utils.SimplePreprocessor`\n normalise_metrics: Dict\n Dict containing the normalisation metrics of the image dataset, i.e.\n mean and std for the R, G and B channels\n\n Examples\n --------\n >>> import pandas as pd\n >>>\n >>> from pytorch_widedeep.preprocessing import ImagePreprocessor\n >>>\n >>> path_to_image1 = 'tests/test_data_utils/images/galaxy1.png'\n >>> path_to_image2 = 'tests/test_data_utils/images/galaxy2.png'\n >>>\n >>> df_train = pd.DataFrame({'images_column': [path_to_image1]})\n >>> df_test = pd.DataFrame({'images_column': [path_to_image2]})\n >>> img_preprocessor = ImagePreprocessor(img_col='images_column', img_path='.', verbose=0)\n >>> resized_images = img_preprocessor.fit_transform(df_train)\n >>> new_resized_images = img_preprocessor.transform(df_train)\n\n :information_source: **NOTE**:\n Normalising metrics will only be computed when the ``fit_transform``\n method is run. Running ``transform`` only will not change the computed\n metrics and running ``fit`` only simply instantiates the resizing\n functions.\n " def __init__(self, img_col: str, img_path: str, width: int=224, height: int=224, verbose: int=1): super(ImagePreprocessor, self).__init__() self.img_col = img_col self.img_path = img_path self.width = width self.height = height self.verbose = verbose self.aap = AspectAwarePreprocessor(self.width, self.height) self.spp = SimplePreprocessor(self.width, self.height) self.compute_normalising_computed = False def fit(self, df: pd.DataFrame) -> BasePreprocessor: return self def transform_sample(self, img: np.ndarray) -> np.ndarray: aspect_r = (img.shape[0] / img.shape[1]) if (aspect_r != 1.0): resized_img = self.aap.preprocess(img) else: resized_img = self.spp.preprocess(img) return resized_img def transform(self, df: pd.DataFrame) -> np.ndarray: 'Resizes the images to the input height and width.\n\n\n Parameters\n ----------\n df: pd.DataFrame\n Input pandas dataframe with the `img_col`\n\n Returns\n -------\n np.ndarray\n Resized images to the input height and width\n ' image_list = df[self.img_col].tolist() if self.verbose: print('Reading Images from {}'.format(self.img_path)) imgs = [cv2.imread('/'.join([self.img_path, img])) for img in image_list] aspect = [(im.shape[0], im.shape[1]) for im in imgs] aspect_r = [(a[0] / a[1]) for a in aspect] diff_idx = [i for (i, r) in enumerate(aspect_r) if (r != 1.0)] if self.verbose: print('Resizing') resized_imgs = [] for (i, img) in tqdm(enumerate(imgs), total=len(imgs), disable=(self.verbose != 1)): if (i in diff_idx): resized_imgs.append(self.aap.preprocess(img)) else: resized_imgs.append(self.spp.preprocess(img)) if (not self.compute_normalising_computed): if self.verbose: print('Computing normalisation metrics') (mean_R, mean_G, mean_B) = ([], [], []) (std_R, std_G, std_B) = ([], [], []) for rsz_img in resized_imgs: ((mean_b, mean_g, mean_r), (std_b, std_g, std_r)) = cv2.meanStdDev(rsz_img) mean_R.append(mean_r) mean_G.append(mean_g) mean_B.append(mean_b) std_R.append(std_r) std_G.append(std_g) std_B.append(std_b) self.normalise_metrics = dict(mean={'R': (np.mean(mean_R) / 255.0), 'G': (np.mean(mean_G) / 255.0), 'B': (np.mean(mean_B) / 255.0)}, std={'R': (np.mean(std_R) / 255.0), 'G': (np.mean(std_G) / 255.0), 'B': (np.mean(std_B) / 255.0)}) self.compute_normalising_computed = True return np.asarray(resized_imgs) def fit_transform(self, df: pd.DataFrame) -> np.ndarray: 'Combines `fit` and `transform`\n\n Parameters\n ----------\n df: pd.DataFrame\n Input pandas dataframe\n\n Returns\n -------\n np.ndarray\n Resized images to the input height and width\n ' return self.fit(df).transform(df) def inverse_transform(self, transformed_image): raise NotImplementedError("'inverse_transform' method is not implemented for 'ImagePreprocessor'") def __repr__(self) -> str: list_of_params: List[str] = [] list_of_params.append('img_col={img_col}') list_of_params.append('img_path={img_path}') list_of_params.append('width={width}') list_of_params.append('height={height}') list_of_params.append('verbose={verbose}') all_params = ', '.join(list_of_params) return f'ImagePreprocessor({all_params.format(**self.__dict__)})'
def embed_sz_rule(n_cat: int, embedding_rule: Literal[('google', 'fastai_old', 'fastai_new')]='fastai_new') -> int: "Rule of thumb to pick embedding size corresponding to ``n_cat``. Default rule is taken\n from recent fastai's Tabular API. The function also includes previously used rule by fastai\n and rule included in the Google's Tensorflow documentation\n\n Parameters\n ----------\n n_cat: int\n number of unique categorical values in a feature\n embedding_rule: str, default = fastai_old\n rule of thumb to be used for embedding vector size\n " if (embedding_rule == 'google'): return int(round((n_cat ** 0.25))) elif (embedding_rule == 'fastai_old'): return int(min(50, ((n_cat // 2) + 1))) else: return int(min(600, round((1.6 * (n_cat ** 0.56)))))
class Quantizer(): "Helper class to perform the quantization of continuous columns. It is\n included in this docs for completion, since depending on the value of the\n parameter `'quantization_setup'` of the `TabPreprocessor` class, that\n class might have an attribute of type `Quantizer`. However, this class is\n designed to always run internally within the `TabPreprocessor` class.\n\n Parameters\n ----------\n quantization_setup: Dict, default = None\n Dictionary where the keys are the column names to quantize and the\n values are the either integers indicating the number of bins or a\n list of scalars indicating the bin edges.\n " def __init__(self, quantization_setup: Dict[(str, Union[(int, List[float])])], **kwargs): self.quantization_setup = quantization_setup self.quant_args = kwargs self.is_fitted = False def fit(self, df: pd.DataFrame) -> 'Quantizer': self.bins: Dict[(str, List[float])] = {} for (col, bins) in self.quantization_setup.items(): (_, self.bins[col]) = pd.cut(df[col], bins, retbins=True, labels=False, **self.quant_args) self.inversed_bins: Dict[(str, Dict[(int, float)])] = {} for (col, bins) in self.bins.items(): self.inversed_bins[col] = {k: v for (k, v) in list(zip(range(len(bins)), [((a + b) / 2.0) for (a, b) in zip(bins, bins[1:])]))} self.is_fitted = True return self def transform(self, df: pd.DataFrame) -> pd.DataFrame: check_is_fitted(self, condition=self.is_fitted) dfc = df.copy() for (col, bins) in self.bins.items(): dfc[col] = pd.cut(dfc[col], bins, labels=False, **self.quant_args) return dfc def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame: return self.fit(df).transform(df) def __repr__(self) -> str: return f'Quantizer(quantization_setup={self.quantization_setup})'
class TabPreprocessor(BasePreprocessor): 'Preprocessor to prepare the `deeptabular` component input dataset\n\n Parameters\n ----------\n cat_embed_cols: List, default = None\n List containing the name of the categorical columns that will be\n represented by embeddings (e.g. _[\'education\', \'relationship\', ...]_) or\n a Tuple with the name and the embedding dimension (e.g.: _[\n (\'education\',32), (\'relationship\',16), ...]_)\n continuous_cols: List, default = None\n List with the name of the continuous cols\n quantization_setup: int or Dict, default = None\n Continuous columns can be turned into categorical via `pd.cut`. If\n `quantization_setup` is an `int`, all continuous columns will be\n quantized using this value as the number of bins. Alternatively, a\n dictionary where the keys are the column names to quantize and the\n values are the either integers indicating the number of bins or a\n list of scalars indicating the bin edges.\n cols_to_scale: List or str, default = None,\n List with the names of the columns that will be standarised via\n sklearn\'s `StandardScaler`. It can also be the string `\'all\'` in\n which case all the continuous cols will be scaled.\n scale: bool, default = False\n :information_source: **note**: this arg will be removed in the next\n release. Please use `cols_to_scale` instead. <br/>\n Bool indicating whether or not to scale/standarise continuous cols. It\n is important to emphasize that all the DL models for tabular data in\n the library also include the possibility of normalising the input\n continuous features via a `BatchNorm` or a `LayerNorm`. <br/>\n Param alias: `scale_cont_cols`.\n already_standard: List, default = None\n :information_source: **note**: this arg will be removed in the next\n release. Please use `cols_to_scale` instead. <br/>\n List with the name of the continuous cols that do not need to be\n scaled/standarised.\n auto_embed_dim: bool, default = True\n Boolean indicating whether the embedding dimensions will be\n automatically defined via rule of thumb. See `embedding_rule`\n below.\n embedding_rule: str, default = \'fastai_new\'\n If `auto_embed_dim=True`, this is the choice of embedding rule of\n thumb. Choices are:\n\n - _fastai_new_: $min(600, round(1.6 \\times n_{cat}^{0.56}))$\n\n - _fastai_old_: $min(50, (n_{cat}//{2})+1)$\n\n - _google_: $min(600, round(n_{cat}^{0.24}))$\n default_embed_dim: int, default=16\n Dimension for the embeddings if the embed_dim is not provided in the\n `cat_embed_cols` parameter and `auto_embed_dim` is set to\n `False`.\n with_attention: bool, default = False\n Boolean indicating whether the preprocessed data will be passed to an\n attention-based model (more precisely a model where all embeddings\n must have the same dimensions). If `True`, the param `cat_embed_cols`\n must just be a list containing just the categorical column names:\n e.g.\n _[\'education\', \'relationship\', ...]_. This is because they will all be\n encoded using embeddings of the same dim, which will be specified\n later when the model is defined. <br/> Param alias:\n `for_transformer`\n with_cls_token: bool, default = False\n Boolean indicating if a `\'[CLS]\'` token will be added to the dataset\n when using attention-based models. The final hidden state\n corresponding to this token is used as the aggregated representation\n for classification and regression tasks. If not, the categorical\n (and continuous embeddings if present) will be concatenated before\n being passed to the final MLP (if present).\n shared_embed: bool, default = False\n Boolean indicating if the embeddings will be "shared" when using\n attention-based models. The idea behind `shared_embed` is\n described in the Appendix A in the [TabTransformer paper](https://arxiv.org/abs/2012.06678):\n _\'The goal of having column embedding is to enable the model to\n distinguish the classes in one column from those in the other\n columns\'_. In other words, the idea is to let the model learn which\n column is embedded at the time. See: `pytorch_widedeep.models.transformers._layers.SharedEmbeddings`.\n verbose: int, default = 1\n\n Other Parameters\n ----------------\n **kwargs: dict\n `pd.cut` and `StandardScaler` related args\n\n Attributes\n ----------\n embed_dim: Dict\n Dictionary where keys are the embed cols and values are the embedding\n dimensions. If `with_attention` is set to `True` this attribute\n is not generated during the `fit` process\n label_encoder: LabelEncoder\n see `pytorch_widedeep.utils.dense_utils.LabelEncder`\n cat_embed_input: List\n List of Tuples with the column name, number of individual values for\n that column and, If `with_attention` is set to `False`, the\n corresponding embeddings dim, e.g. _[(\'education\', 16, 10),\n (\'relationship\', 6, 8), ...]_.\n standardize_cols: List\n List of the columns that will be standarized\n scaler: StandardScaler\n an instance of `sklearn.preprocessing.StandardScaler`\n column_idx: Dict\n Dictionary where keys are column names and values are column indexes.\n This is neccesary to slice tensors\n quantizer: Quantizer\n an instance of `Quantizer`\n\n Examples\n --------\n >>> import pandas as pd\n >>> import numpy as np\n >>> from pytorch_widedeep.preprocessing import TabPreprocessor\n >>> df = pd.DataFrame({\'color\': [\'r\', \'b\', \'g\'], \'size\': [\'s\', \'n\', \'l\'], \'age\': [25, 40, 55]})\n >>> cat_embed_cols = [(\'color\',5), (\'size\',5)]\n >>> cont_cols = [\'age\']\n >>> deep_preprocessor = TabPreprocessor(cat_embed_cols=cat_embed_cols, continuous_cols=cont_cols)\n >>> X_tab = deep_preprocessor.fit_transform(df)\n >>> deep_preprocessor.embed_dim\n {\'color\': 5, \'size\': 5}\n >>> deep_preprocessor.column_idx\n {\'color\': 0, \'size\': 1, \'age\': 2}\n >>> cont_df = pd.DataFrame({"col1": np.random.rand(10), "col2": np.random.rand(10) + 1})\n >>> cont_cols = ["col1", "col2"]\n >>> tab_preprocessor = TabPreprocessor(continuous_cols=cont_cols, quantization_setup=3)\n >>> ft_cont_df = tab_preprocessor.fit_transform(cont_df)\n >>> # or...\n >>> quantization_setup = {\'col1\': [0., 0.4, 1.], \'col2\': [1., 1.4, 2.]}\n >>> tab_preprocessor2 = TabPreprocessor(continuous_cols=cont_cols, quantization_setup=quantization_setup)\n >>> ft_cont_df2 = tab_preprocessor2.fit_transform(cont_df)\n ' @Alias('with_attention', 'for_transformer') @Alias('cat_embed_cols', 'embed_cols') @Alias('scale', 'scale_cont_cols') @Alias('quantization_setup', 'cols_and_bins') def __init__(self, cat_embed_cols: Optional[Union[(List[str], List[Tuple[(str, int)]])]]=None, continuous_cols: Optional[List[str]]=None, quantization_setup: Optional[Union[(int, Dict[(str, Union[(int, List[float])])])]]=None, cols_to_scale: Optional[Union[(List[str], str)]]=None, auto_embed_dim: bool=True, embedding_rule: Literal[('google', 'fastai_old', 'fastai_new')]='fastai_new', default_embed_dim: int=16, with_attention: bool=False, with_cls_token: bool=False, shared_embed: bool=False, verbose: int=1, *, scale: bool=False, already_standard: List[str]=None, **kwargs): super(TabPreprocessor, self).__init__() self.continuous_cols = continuous_cols self.quantization_setup = quantization_setup self.cols_to_scale = cols_to_scale self.scale = scale self.already_standard = already_standard self.auto_embed_dim = auto_embed_dim self.embedding_rule = embedding_rule self.default_embed_dim = default_embed_dim self.with_attention = with_attention self.with_cls_token = with_cls_token self.shared_embed = shared_embed self.verbose = verbose self.quant_args = {k: v for (k, v) in kwargs.items() if (k in pd.cut.__code__.co_varnames)} self.scale_args = {k: v for (k, v) in kwargs.items() if (k in StandardScaler().get_params())} self._check_inputs(cat_embed_cols) if with_cls_token: self.cat_embed_cols = ((['cls_token'] + cat_embed_cols) if (cat_embed_cols is not None) else ['cls_token']) else: self.cat_embed_cols = cat_embed_cols self.is_fitted = False def fit(self, df: pd.DataFrame) -> BasePreprocessor: 'Fits the Preprocessor and creates required attributes\n\n Parameters\n ----------\n df: pd.DataFrame\n Input pandas dataframe\n\n Returns\n -------\n TabPreprocessor\n `TabPreprocessor` fitted object\n ' df_adj = (self._insert_cls_token(df) if self.with_cls_token else df.copy()) self.column_idx: Dict[(str, int)] = {} if (self.cat_embed_cols is not None): df_emb = self._prepare_embed(df_adj) self.label_encoder = LabelEncoder(columns_to_encode=df_emb.columns.tolist(), shared_embed=self.shared_embed, with_attention=self.with_attention) self.label_encoder.fit(df_emb) self.cat_embed_input: List = [] for (k, v) in self.label_encoder.encoding_dict.items(): if self.with_attention: self.cat_embed_input.append((k, len(v))) else: self.cat_embed_input.append((k, len(v), self.embed_dim[k])) self.column_idx.update({k: v for (v, k) in enumerate(df_emb.columns)}) if (self.continuous_cols is not None): df_cont = self._prepare_continuous(df_adj) if (self.standardize_cols is not None): self.scaler = StandardScaler(**self.scale_args).fit(df_cont[self.standardize_cols].values) elif self.verbose: warnings.warn('Continuous columns will not be normalised') self.column_idx.update({k: (v + len(self.column_idx)) for (v, k) in enumerate(df_cont)}) if (self.cols_and_bins is not None): self.quantizer = Quantizer(self.cols_and_bins, **self.quant_args) self.is_fitted = True return self def transform(self, df: pd.DataFrame) -> np.ndarray: 'Returns the processed `dataframe` as a np.ndarray\n\n Parameters\n ----------\n df: pd.DataFrame\n Input pandas dataframe\n\n Returns\n -------\n np.ndarray\n transformed input dataframe\n ' check_is_fitted(self, condition=self.is_fitted) df_adj = (self._insert_cls_token(df) if self.with_cls_token else df.copy()) if (self.cat_embed_cols is not None): df_emb = self._prepare_embed(df_adj) df_emb = self.label_encoder.transform(df_emb) if (self.continuous_cols is not None): df_cont = self._prepare_continuous(df_adj) if self.standardize_cols: df_cont[self.standardize_cols] = self.scaler.transform(df_cont[self.standardize_cols].values) if (self.cols_and_bins is not None): if self.quantizer.is_fitted: df_cont = self.quantizer.transform(df_cont) else: df_cont = self.quantizer.fit_transform(df_cont) try: df_deep = pd.concat([df_emb, df_cont], axis=1) except NameError: try: df_deep = df_emb.copy() except NameError: df_deep = df_cont.copy() _column_idx = {k: v for (v, k) in enumerate(df_deep.columns)} assert (_column_idx == self.column_idx) return df_deep.values def transform_sample(self, df: pd.DataFrame) -> pd.DataFrame: return self.transform(df).astype('float')[0] def inverse_transform(self, encoded: np.ndarray) -> pd.DataFrame: 'Takes as input the output from the `transform` method and it will\n return the original values.\n\n Parameters\n ----------\n encoded: np.ndarray\n array with the output of the `transform` method\n\n Returns\n -------\n pd.DataFrame\n Pandas dataframe with the original values\n ' decoded = pd.DataFrame(encoded, columns=self.column_idx.keys()) if (self.cat_embed_cols is not None): decoded = self.label_encoder.inverse_transform(decoded) if (self.continuous_cols is not None): if (self.cols_and_bins is not None): if self.verbose: print('Note that quantized cols will be turned into the mid point of the corresponding bin') for (k, v) in self.quantizer.inversed_bins.items(): decoded[k] = decoded[k].map(v) try: decoded[self.standardize_cols] = self.scaler.inverse_transform(decoded[self.standardize_cols]) except Exception: pass if ('cls_token' in decoded.columns): decoded.drop('cls_token', axis=1, inplace=True) return decoded def fit_transform(self, df: pd.DataFrame) -> np.ndarray: 'Combines `fit` and `transform`\n\n Parameters\n ----------\n df: pd.DataFrame\n Input pandas dataframe\n\n Returns\n -------\n np.ndarray\n transformed input dataframe\n ' return self.fit(df).transform(df) def _insert_cls_token(self, df: pd.DataFrame) -> pd.DataFrame: df_cls = df.copy() df_cls.insert(loc=0, column='cls_token', value='[CLS]') return df_cls def _prepare_embed(self, df: pd.DataFrame) -> pd.DataFrame: if self.with_attention: return df[self.cat_embed_cols] else: if isinstance(self.cat_embed_cols[0], tuple): self.embed_dim: Dict = dict(self.cat_embed_cols) embed_colname = [emb[0] for emb in self.cat_embed_cols] elif self.auto_embed_dim: n_cats = {col: df[col].nunique() for col in self.cat_embed_cols} self.embed_dim = {col: embed_sz_rule(n_cat, self.embedding_rule) for (col, n_cat) in n_cats.items()} embed_colname = self.cat_embed_cols else: self.embed_dim = {e: self.default_embed_dim for e in self.cat_embed_cols} embed_colname = self.cat_embed_cols return df[embed_colname] def _prepare_continuous(self, df: pd.DataFrame) -> pd.DataFrame: if self.is_fitted: return df[self.continuous_cols] else: if (self.cols_to_scale is not None): self.standardize_cols = (self.cols_to_scale if (self.cols_to_scale != 'all') else self.continuous_cols) elif self.scale: if (self.already_standard is not None): self.standardize_cols = [c for c in self.continuous_cols if (c not in self.already_standard)] else: self.standardize_cols = self.continuous_cols else: self.standardize_cols = None if (self.quantization_setup is not None): if isinstance(self.quantization_setup, int): self.cols_and_bins: Dict[(str, Union[(int, List[float])])] = {} for col in self.continuous_cols: self.cols_and_bins[col] = self.quantization_setup else: self.cols_and_bins = self.quantization_setup.copy() else: self.cols_and_bins = None return df[self.continuous_cols] def _check_inputs(self, cat_embed_cols): if (self.scale or (self.already_standard is not None)): warnings.warn("'scale' and 'already_standard' will be deprecated in the next release. Please use 'cols_to_scale' instead", DeprecationWarning, stacklevel=2) if self.scale: if (self.already_standard is not None): standardize_cols = [c for c in self.continuous_cols if (c not in self.already_standard)] else: standardize_cols = self.continuous_cols elif (self.cols_to_scale is not None): standardize_cols = self.cols_to_scale else: standardize_cols = None if (standardize_cols is not None): if isinstance(self.quantization_setup, int): cols_to_quantize_and_standardize = [c for c in standardize_cols if (c in self.continuous_cols)] elif isinstance(self.quantization_setup, dict): cols_to_quantize_and_standardize = [c for c in standardize_cols if (c in self.quantization_setup)] else: cols_to_quantize_and_standardize = None if (cols_to_quantize_and_standardize is not None): warnings.warn(f'the following columns: {cols_to_quantize_and_standardize} will be first scaled using a StandardScaler and then quantized. Make sure this is what you really want') if (self.with_cls_token and (not self.with_attention)): warnings.warn("If 'with_cls_token' is set to 'True', 'with_attention' will be automatically ", "to 'True' if is 'False'") self.with_attention = True if ((cat_embed_cols is None) and (self.continuous_cols is None)): raise ValueError("'cat_embed_cols' and 'continuous_cols' are 'None'. Please, define at least one of the two.") if ((cat_embed_cols is not None) and (self.continuous_cols is not None) and (len(np.intersect1d(cat_embed_cols, self.continuous_cols)) > 0)): overlapping_cols = list(np.intersect1d(cat_embed_cols, self.continuous_cols)) raise ValueError('Currently passing columns as both categorical and continuum is not supported. Please, choose one or the other for the following columns: {}'.format(', '.join(overlapping_cols))) transformer_error_message = "If with_attention is 'True' cat_embed_cols must be a list of strings with the columns to be encoded as embeddings." if (self.with_attention and (cat_embed_cols is not None) and isinstance(cat_embed_cols[0], tuple)): raise ValueError(transformer_error_message) def __repr__(self) -> str: list_of_params: List[str] = [] if (self.cat_embed_cols is not None): list_of_params.append('cat_embed_cols={cat_embed_cols}') if (self.continuous_cols is not None): list_of_params.append('continuous_cols={continuous_cols}') if (self.quantization_setup is not None): list_of_params.append('quantization_setup={quantization_setup}') if (self.cols_to_scale is not None): list_of_params.append('cols_to_scale={cols_to_scale}') if (not self.auto_embed_dim): list_of_params.append('auto_embed_dim={auto_embed_dim}') if (self.embedding_rule != 'fastai_new'): list_of_params.append("embedding_rule='{embedding_rule}'") if (self.default_embed_dim != 16): list_of_params.append('default_embed_dim={default_embed_dim}') if self.with_attention: list_of_params.append('with_attention={with_attention}') if self.with_cls_token: list_of_params.append('with_cls_token={with_cls_token}') if self.shared_embed: list_of_params.append('shared_embed={shared_embed}') if (self.verbose != 1): list_of_params.append('verbose={verbose}') if self.scale: list_of_params.append('scale={scale}') if (self.already_standard is not None): list_of_params.append('already_standard={already_standard}') if (len(self.quant_args) > 0): list_of_params.append(', '.join([((f'{k}' + '=') + f'{v}') for (k, v) in self.quant_args.items()])) if (len(self.scale_args) > 0): list_of_params.append(', '.join([((f'{k}' + '=') + f'{v}') for (k, v) in self.scale_args.items()])) all_params = ', '.join(list_of_params) return f'TabPreprocessor({all_params.format(**self.__dict__)})'
class ChunkTabPreprocessor(TabPreprocessor): 'Preprocessor to prepare the `deeptabular` component input dataset\n\n Parameters\n ----------\n n_chunks: int\n Number of chunks that the tabular dataset is divided by.\n cat_embed_cols: List, default = None\n List containing the name of the categorical columns that will be\n represented by embeddings (e.g. _[\'education\', \'relationship\', ...]_) or\n a Tuple with the name and the embedding dimension (e.g.: _[\n (\'education\',32), (\'relationship\',16), ...]_)\n continuous_cols: List, default = None\n List with the name of the continuous cols\n cols_and_bins: Dict, default = None\n Continuous columns can be turned into categorical via\n `pd.cut`. \'cols_and_bins\' is dictionary where the keys are the column\n names to quantize and the values are a list of scalars indicating the\n bin edges.\n cols_to_scale: List, default = None,\n List with the names of the columns that will be standarised via\n sklearn\'s `StandardScaler`\n default_embed_dim: int, default=16\n Dimension for the embeddings if the embed_dim is not provided in the\n `cat_embed_cols` parameter and `auto_embed_dim` is set to\n `False`.\n with_attention: bool, default = False\n Boolean indicating whether the preprocessed data will be passed to an\n attention-based model (more precisely a model where all embeddings\n must have the same dimensions). If `True`, the param `cat_embed_cols`\n must just be a list containing just the categorical column names:\n e.g.\n _[\'education\', \'relationship\', ...]_. This is because they will all be\n encoded using embeddings of the same dim, which will be specified\n later when the model is defined. <br/> Param alias:\n `for_transformer`\n with_cls_token: bool, default = False\n Boolean indicating if a `\'[CLS]\'` token will be added to the dataset\n when using attention-based models. The final hidden state\n corresponding to this token is used as the aggregated representation\n for classification and regression tasks. If not, the categorical\n (and continuous embeddings if present) will be concatenated before\n being passed to the final MLP (if present).\n shared_embed: bool, default = False\n Boolean indicating if the embeddings will be "shared" when using\n attention-based models. The idea behind `shared_embed` is\n described in the Appendix A in the [TabTransformer paper](https://arxiv.org/abs/2012.06678):\n _\'The goal of having column embedding is to enable the model to\n distinguish the classes in one column from those in the other\n columns\'_. In other words, the idea is to let the model learn which\n column is embedded at the time. See: `pytorch_widedeep.models.transformers._layers.SharedEmbeddings`.\n verbose: int, default = 1\n\n Other Parameters\n ----------------\n **kwargs: dict\n `pd.cut` and `StandardScaler` related args\n\n Attributes\n ----------\n embed_dim: Dict\n Dictionary where keys are the embed cols and values are the embedding\n dimensions. If `with_attention` is set to `True` this attribute\n is not generated during the `fit` process\n label_encoder: LabelEncoder\n see `pytorch_widedeep.utils.dense_utils.LabelEncder`\n cat_embed_input: List\n List of Tuples with the column name, number of individual values for\n that column and, If `with_attention` is set to `False`, the\n corresponding embeddings dim, e.g. _[(\'education\', 16, 10),\n (\'relationship\', 6, 8), ...]_.\n standardize_cols: List\n List of the columns that will be standarized\n scaler: StandardScaler\n an instance of `sklearn.preprocessing.StandardScaler`\n if \'cols_to_scale\' is not None or \'scale\' is \'True\'\n column_idx: Dict\n Dictionary where keys are column names and values are column indexes.\n This is neccesary to slice tensors\n quantizer: Quantizer\n an instance of `Quantizer`\n\n Examples\n --------\n >>> import pandas as pd\n >>> import numpy as np\n >>> from pytorch_widedeep.preprocessing import ChunkTabPreprocessor\n >>> np.random.seed(42)\n >>> chunk_df = pd.DataFrame({\'cat_col\': np.random.choice([\'A\', \'B\', \'C\'], size=8),\n ... \'cont_col\': np.random.uniform(1, 100, size=8)})\n >>> cat_embed_cols = [(\'cat_col\',4)]\n >>> cont_cols = [\'cont_col\']\n >>> tab_preprocessor = ChunkTabPreprocessor(\n ... n_chunks=1, cat_embed_cols=cat_embed_cols, continuous_cols=cont_cols\n ... )\n >>> X_tab = tab_preprocessor.fit_transform(chunk_df)\n >>> tab_preprocessor.embed_dim\n {\'cat_col\': 4}\n >>> tab_preprocessor.column_idx\n {\'cat_col\': 0, \'cont_col\': 1}\n ' @Alias('with_attention', 'for_transformer') @Alias('cat_embed_cols', 'embed_cols') @Alias('scale', 'scale_cont_cols') @Alias('cols_and_bins', 'quantization_setup') def __init__(self, n_chunks: int, cat_embed_cols: Optional[Union[(List[str], List[Tuple[(str, int)]])]]=None, continuous_cols: Optional[List[str]]=None, cols_and_bins: Optional[Dict[(str, List[float])]]=None, cols_to_scale: Optional[Union[(List[str], str)]]=None, default_embed_dim: int=16, with_attention: bool=False, with_cls_token: bool=False, shared_embed: bool=False, verbose: int=1, *, scale: bool=False, already_standard: List[str]=None, **kwargs): super(ChunkTabPreprocessor, self).__init__(cat_embed_cols=cat_embed_cols, continuous_cols=continuous_cols, quantization_setup=None, cols_to_scale=cols_to_scale, auto_embed_dim=False, embedding_rule='google', default_embed_dim=default_embed_dim, with_attention=with_attention, with_cls_token=with_cls_token, shared_embed=shared_embed, verbose=verbose, scale=scale, already_standard=already_standard, **kwargs) self.n_chunks = n_chunks self.chunk_counter = 0 self.cols_and_bins = cols_and_bins if (self.cols_and_bins is not None): self.quantizer = Quantizer(self.cols_and_bins, **self.quant_args) self.embed_prepared = False self.continuous_prepared = False def partial_fit(self, chunk: pd.DataFrame) -> 'ChunkTabPreprocessor': self.chunk_counter += 1 chunk_adj = (self._insert_cls_token(chunk) if self.with_cls_token else chunk.copy()) self.column_idx: Dict[(str, int)] = {} if (self.cat_embed_cols is not None): if (not self.embed_prepared): chunk_emb = self._prepare_embed(chunk_adj) self.label_encoder = LabelEncoder(columns_to_encode=chunk_emb.columns.tolist(), shared_embed=self.shared_embed, with_attention=self.with_attention) self.label_encoder.partial_fit(chunk_emb) else: chunk_emb = chunk_adj[self.cat_embed_cols] self.label_encoder.partial_fit(chunk_emb) self.column_idx.update({k: v for (v, k) in enumerate(chunk_emb.columns)}) if (self.continuous_cols is not None): if (not self.continuous_prepared): chunk_cont = self._prepare_continuous(chunk_adj) else: chunk_cont = chunk[self.continuous_cols] if (self.standardize_cols is not None): self.scaler.partial_fit(chunk_cont[self.standardize_cols].values) self.column_idx.update({k: (v + len(self.column_idx)) for (v, k) in enumerate(chunk_cont.columns)}) if (self.chunk_counter == self.n_chunks): self.cat_embed_input: List[Union[(Tuple[(str, int)], Tuple[(str, int, int)])]] = [] for (k, v) in self.label_encoder.encoding_dict.items(): if self.with_attention: self.cat_embed_input.append((k, len(v))) else: self.cat_embed_input.append((k, len(v), self.embed_dim[k])) self.is_fitted = True return self def fit(self, chunk: pd.DataFrame) -> 'ChunkTabPreprocessor': return self.partial_fit(chunk) def _prepare_embed(self, chunk: pd.DataFrame) -> pd.DataFrame: if self.with_attention: embed_colname = self.cat_embed_cols elif isinstance(self.cat_embed_cols[0], tuple): self.embed_dim: Dict = dict(self.cat_embed_cols) embed_colname = [emb[0] for emb in self.cat_embed_cols] else: self.embed_dim = {e: self.default_embed_dim for e in self.cat_embed_cols} embed_colname = self.cat_embed_cols self.embed_prepared = True return chunk[embed_colname] def _prepare_continuous(self, chunk: pd.DataFrame) -> pd.DataFrame: if (not hasattr(self, 'standardize_cols')): if (self.cols_to_scale is not None): self.standardize_cols = (self.cols_to_scale if (self.cols_to_scale != 'all') else self.continuous_cols) elif self.scale: if (self.already_standard is not None): self.standardize_cols = [c for c in self.continuous_cols if (c not in self.already_standard)] else: self.standardize_cols = self.continuous_cols else: self.standardize_cols = None if (not hasattr(self, 'scaler')): self.scaler = StandardScaler(**self.scale_args) elif self.verbose: warnings.warn('Continuous columns will not be normalised') self.continuous_prepared = True return chunk[self.continuous_cols] def __repr__(self) -> str: list_of_params: List[str] = [] if (self.n_chunks is not None): list_of_params.append('n_chunks={n_chunks}') if (self.cat_embed_cols is not None): list_of_params.append('cat_embed_cols={cat_embed_cols}') if (self.continuous_cols is not None): list_of_params.append('continuous_cols={continuous_cols}') if (self.cols_and_bins is not None): list_of_params.append('cols_and_bins={cols_and_bins}') if (self.cols_to_scale is not None): list_of_params.append('cols_to_scale={cols_to_scale}') if (self.default_embed_dim != 16): list_of_params.append('default_embed_dim={default_embed_dim}') if self.with_attention: list_of_params.append('with_attention={with_attention}') if self.with_cls_token: list_of_params.append('with_cls_token={with_cls_token}') if self.shared_embed: list_of_params.append('shared_embed={shared_embed}') if (self.verbose != 1): list_of_params.append('verbose={verbose}') if self.scale: list_of_params.append('scale={scale}') if (self.already_standard is not None): list_of_params.append('already_standard={already_standard}') if (len(self.quant_args) > 0): list_of_params.append(', '.join([((f'{k}' + '=') + f'{v}') for (k, v) in self.quant_args.items()])) if (len(self.scale_args) > 0): list_of_params.append(', '.join([((f'{k}' + '=') + f'{v}') for (k, v) in self.scale_args.items()])) all_params = ', '.join(list_of_params) return f'ChunkTabPreprocessor({all_params.format(**self.__dict__)})'
class TextPreprocessor(BasePreprocessor): 'Preprocessor to prepare the ``deeptext`` input dataset\n\n Parameters\n ----------\n text_col: str\n column in the input dataframe containing the texts\n max_vocab: int, default=30000\n Maximum number of tokens in the vocabulary\n min_freq: int, default=5\n Minimum frequency for a token to be part of the vocabulary\n maxlen: int, default=80\n Maximum length of the tokenized sequences\n pad_first: bool, default = True\n Indicates whether the padding index will be added at the beginning or the\n end of the sequences\n pad_idx: int, default = 1\n padding index. Fastai\'s Tokenizer leaves 0 for the \'unknown\' token.\n already_processed: bool, Optional, default = False\n Boolean indicating if the sequence of elements is already processed or\n prepared. If this is the case, this Preprocessor will simply tokenize\n and pad the sequence. <br/>\n\n Param aliases: `not_text`. <br/>\n\n This parameter is thought for those cases where the input sequences\n are already fully processed or are directly not text (e.g. IDs)\n word_vectors_path: str, Optional\n Path to the pretrained word vectors\n n_cpus: int, Optional, default = None\n number of CPUs to used during the tokenization process\n verbose: int, default 1\n Enable verbose output.\n\n Attributes\n ----------\n vocab: Vocab\n an instance of `pytorch_widedeep.utils.fastai_transforms.Vocab`\n embedding_matrix: np.ndarray\n Array with the pretrained embeddings\n\n Examples\n ---------\n >>> import pandas as pd\n >>> from pytorch_widedeep.preprocessing import TextPreprocessor\n >>> df_train = pd.DataFrame({\'text_column\': ["life is like a box of chocolates",\n ... "You never know what you\'re gonna get"]})\n >>> text_preprocessor = TextPreprocessor(text_col=\'text_column\', max_vocab=25, min_freq=1, maxlen=10)\n >>> text_preprocessor.fit_transform(df_train)\n The vocabulary contains 24 tokens\n array([[ 1, 1, 1, 1, 10, 11, 12, 13, 14, 15],\n [ 5, 9, 16, 17, 18, 9, 19, 20, 21, 22]], dtype=int32)\n >>> df_te = pd.DataFrame({\'text_column\': [\'you never know what is in the box\']})\n >>> text_preprocessor.transform(df_te)\n array([[ 1, 1, 9, 16, 17, 18, 11, 0, 0, 13]], dtype=int32)\n ' @Alias('already_processed', 'not_text') def __init__(self, text_col: str, max_vocab: int=30000, min_freq: int=5, maxlen: int=80, pad_first: bool=True, pad_idx: int=1, already_processed: Optional[bool]=False, word_vectors_path: Optional[str]=None, n_cpus: Optional[int]=None, verbose: int=1): super(TextPreprocessor, self).__init__() self.text_col = text_col self.max_vocab = max_vocab self.min_freq = min_freq self.maxlen = maxlen self.pad_first = pad_first self.pad_idx = pad_idx self.already_processed = already_processed self.word_vectors_path = word_vectors_path self.verbose = verbose self.n_cpus = (n_cpus if (n_cpus is not None) else os.cpu_count()) self.is_fitted = False def fit(self, df: pd.DataFrame) -> BasePreprocessor: 'Builds the vocabulary\n\n Parameters\n ----------\n df: pd.DataFrame\n Input pandas dataframe\n\n Returns\n -------\n TextPreprocessor\n `TextPreprocessor` fitted object\n ' texts = self._read_texts(df) tokens = get_texts(texts, self.already_processed, self.n_cpus) self.vocab: TVocab = Vocab(max_vocab=self.max_vocab, min_freq=self.min_freq, pad_idx=self.pad_idx).fit(tokens) if self.verbose: print('The vocabulary contains {} tokens'.format(len(self.vocab.stoi))) if (self.word_vectors_path is not None): self.embedding_matrix = build_embeddings_matrix(self.vocab, self.word_vectors_path, self.min_freq) self.is_fitted = True return self def transform(self, df: pd.DataFrame) -> np.ndarray: "Returns the padded, _'numericalised'_ sequences\n\n Parameters\n ----------\n df: pd.DataFrame\n Input pandas dataframe\n\n Returns\n -------\n np.ndarray\n Padded, _'numericalised'_ sequences\n " check_is_fitted(self, attributes=['vocab']) texts = self._read_texts(df) tokens = get_texts(texts, self.already_processed, self.n_cpus) return self._pad_sequences(tokens) def transform_sample(self, text: str) -> np.ndarray: "Returns the padded, _'numericalised'_ sequence\n\n Parameters\n ----------\n text: str\n text to be tokenized and padded\n\n Returns\n -------\n np.ndarray\n Padded, _'numericalised'_ sequence\n " check_is_fitted(self, attributes=['vocab']) tokens = get_texts([text], self.already_processed, self.n_cpus) return self._pad_sequences(tokens)[0] def fit_transform(self, df: pd.DataFrame) -> np.ndarray: "Combines `fit` and `transform`\n\n Parameters\n ----------\n df: pd.DataFrame\n Input pandas dataframe\n\n Returns\n -------\n np.ndarray\n Padded, _'numericalised'_ sequences\n " return self.fit(df).transform(df) def inverse_transform(self, padded_seq: np.ndarray) -> pd.DataFrame: "Returns the original text plus the added 'special' tokens\n\n Parameters\n ----------\n padded_seq: np.ndarray\n array with the output of the `transform` method\n\n Returns\n -------\n pd.DataFrame\n Pandas dataframe with the original text plus the added 'special' tokens\n " texts = [self.vocab.inverse_transform(num) for num in padded_seq] return pd.DataFrame({self.text_col: texts}) def _pad_sequences(self, tokens: List[List[str]]) -> np.ndarray: sequences = [self.vocab.transform(t) for t in tokens] padded_seq = np.array([pad_sequences(s, maxlen=self.maxlen, pad_first=self.pad_first, pad_idx=self.pad_idx) for s in sequences]) return padded_seq def _read_texts(self, df: pd.DataFrame, root_dir: Optional[str]=None) -> List[str]: if (root_dir is not None): if (not os.path.exists(root_dir)): raise ValueError('root_dir does not exist. Please create it before fitting the preprocessor') texts_fnames = df[self.text_col].tolist() texts: List[str] = [] for texts_fname in texts_fnames: with open(os.path.join(root_dir, texts_fname), 'r') as f: texts.append(f.read().replace('\n', '')) else: texts = df[self.text_col].tolist() return texts def __repr__(self) -> str: list_of_params: List[str] = ['text_col={text_col}'] list_of_params.append('max_vocab={max_vocab}') list_of_params.append('min_freq={min_freq}') list_of_params.append('maxlen={maxlen}') list_of_params.append('pad_first={pad_first}') list_of_params.append('pad_idx={pad_idx}') list_of_params.append('already_processed={already_processed}') if (self.word_vectors_path is not None): list_of_params.append('word_vectors_path={word_vectors_path}') if (self.n_cpus is not None): list_of_params.append('n_cpus={n_cpus}') if (self.verbose is not None): list_of_params.append('verbose={verbose}') all_params = ', '.join(list_of_params) return f'TextPreprocessor({all_params.format(**self.__dict__)})'
class ChunkTextPreprocessor(TextPreprocessor): 'Preprocessor to prepare the ``deeptext`` input dataset\n\n Parameters\n ----------\n text_col: str\n column in the input dataframe containing either the texts or the\n filenames where the text documents are stored\n n_chunks: int\n Number of chunks that the text dataset is divided by.\n root_dir: str, Optional, default = None\n If \'text_col\' contains the filenames with the text documents, this is\n the path to the directory where those documents are stored.\n max_vocab: int, default=30000\n Maximum number of tokens in the vocabulary\n min_freq: int, default=5\n Minimum frequency for a token to be part of the vocabulary\n maxlen: int, default=80\n Maximum length of the tokenized sequences\n pad_first: bool, default = True\n Indicates whether the padding index will be added at the beginning or the\n end of the sequences\n pad_idx: int, default = 1\n padding index. Fastai\'s Tokenizer leaves 0 for the \'unknown\' token.\n word_vectors_path: str, Optional\n Path to the pretrained word vectors\n n_cpus: int, Optional, default = None\n number of CPUs to used during the tokenization process\n verbose: int, default 1\n Enable verbose output.\n\n Attributes\n ----------\n vocab: Vocab\n an instance of `pytorch_widedeep.utils.fastai_transforms.ChunkVocab`\n embedding_matrix: np.ndarray\n Array with the pretrained embeddings if `word_vectors_path` is not None\n\n Examples\n ---------\n >>> import pandas as pd\n >>> from pytorch_widedeep.preprocessing import ChunkTextPreprocessor\n >>> chunk_df = pd.DataFrame({\'text_column\': ["life is like a box of chocolates",\n ... "You never know what you\'re gonna get"]})\n >>> chunk_text_preprocessor = ChunkTextPreprocessor(text_col=\'text_column\', n_chunks=1,\n ... max_vocab=25, min_freq=1, maxlen=10, verbose=0, n_cpus=1)\n >>> processed_chunk = chunk_text_preprocessor.fit_transform(chunk_df)\n ' def __init__(self, text_col: str, n_chunks: int, root_dir: Optional[str]=None, max_vocab: int=30000, min_freq: int=5, maxlen: int=80, pad_first: bool=True, pad_idx: int=1, already_processed: Optional[bool]=False, word_vectors_path: Optional[str]=None, n_cpus: Optional[int]=None, verbose: int=1): super(ChunkTextPreprocessor, self).__init__(text_col=text_col, max_vocab=max_vocab, min_freq=min_freq, maxlen=maxlen, pad_first=pad_first, pad_idx=pad_idx, already_processed=already_processed, word_vectors_path=word_vectors_path, n_cpus=n_cpus, verbose=verbose) self.n_chunks = n_chunks self.root_dir = root_dir self.chunk_counter = 0 self.is_fitted = False def partial_fit(self, chunk: pd.DataFrame) -> 'ChunkTextPreprocessor': self.chunk_counter += 1 texts = self._read_texts(chunk, self.root_dir) tokens = get_texts(texts, self.already_processed, self.n_cpus) if (not hasattr(self, 'vocab')): self.vocab = ChunkVocab(max_vocab=self.max_vocab, min_freq=self.min_freq, pad_idx=self.pad_idx, n_chunks=self.n_chunks) self.vocab.fit(tokens) if (self.chunk_counter == self.n_chunks): if self.verbose: print('The vocabulary contains {} tokens'.format(len(self.vocab.stoi))) if (self.word_vectors_path is not None): self.embedding_matrix = build_embeddings_matrix(self.vocab, self.word_vectors_path, self.min_freq) self.is_fitted = True return self def fit(self, chunk: pd.DataFrame) -> 'ChunkTextPreprocessor': return self.partial_fit(chunk) def __repr__(self) -> str: list_of_params: List[str] = ["text_col='{text_col}'"] if (self.n_chunks is not None): list_of_params.append('n_chunks={n_chunks}') if (self.root_dir is not None): list_of_params.append('root_dir={root_dir}') list_of_params.append('max_vocab={max_vocab}') list_of_params.append('min_freq={min_freq}') list_of_params.append('maxlen={maxlen}') list_of_params.append('pad_first={pad_first}') list_of_params.append('pad_idx={pad_idx}') if (self.word_vectors_path is not None): list_of_params.append('word_vectors_path={word_vectors_path}') if (self.n_cpus is not None): list_of_params.append('n_cpus={n_cpus}') list_of_params.append('verbose={verbose}') all_params = ', '.join(list_of_params) return f'ChunkTextPreprocessor({all_params.format(**self.__dict__)})'
class BaseContrastiveDenoisingTrainer(ABC): def __init__(self, model: ModelWithAttention, preprocessor: TabPreprocessor, optimizer: Optional[Optimizer], lr_scheduler: Optional[LRScheduler], callbacks: Optional[List[Callback]], loss_type: Literal[('contrastive', 'denoising', 'both')], projection_head1_dims: Optional[List[int]], projection_head2_dims: Optional[List[int]], projection_heads_activation: str, cat_mlp_type: Literal[('single', 'multiple')], cont_mlp_type: Literal[('single', 'multiple')], denoise_mlps_activation: str, verbose: int, seed: int, **kwargs): self._check_projection_head_dims(model, projection_head1_dims, projection_head2_dims) self._check_model_is_supported(model) (self.device, self.num_workers) = self._set_device_and_num_workers(**kwargs) self.early_stop = False self.verbose = verbose self.seed = seed self.cd_model = ContrastiveDenoisingModel(model, preprocessor, loss_type, projection_head1_dims, projection_head2_dims, projection_heads_activation, cat_mlp_type, cont_mlp_type, denoise_mlps_activation) self.cd_model.to(self.device) self.loss_type = loss_type self._set_loss_fn(**kwargs) self.optimizer = (optimizer if (optimizer is not None) else torch.optim.AdamW(self.cd_model.parameters())) self.lr_scheduler = lr_scheduler self._set_lr_scheduler_running_params(lr_scheduler, **kwargs) self._set_callbacks(callbacks) @abstractmethod def pretrain(self, X_tab: np.ndarray, X_val: Optional[np.ndarray], val_split: Optional[float], validation_freq: int, n_epochs: int, batch_size: int): raise NotImplementedError('Trainer.pretrain method not implemented') @abstractmethod def save(self, path: str, save_state_dict: bool, model_filename: str): raise NotImplementedError('Trainer.save method not implemented') def _set_loss_fn(self, **kwargs): if (self.loss_type in ['contrastive', 'both']): temperature = kwargs.get('temperature', 0.1) reductiom = kwargs.get('reductiom', 'mean') self.contrastive_loss = InfoNCELoss(temperature, reductiom) if (self.loss_type in ['denoising', 'both']): lambda_cat = kwargs.get('lambda_cat', 1.0) lambda_cont = kwargs.get('lambda_cont', 1.0) reductiom = kwargs.get('reductiom', 'mean') self.denoising_loss = DenoisingLoss(lambda_cat, lambda_cont, reductiom) def _compute_loss(self, g_projs: Optional[Tuple[(Tensor, Tensor)]], x_cat_and_cat_: Optional[Tuple[(Tensor, Tensor)]], x_cont_and_cont_: Optional[Tuple[(Tensor, Tensor)]]) -> Tensor: contrastive_loss = (self.contrastive_loss(g_projs) if (self.loss_type in ['contrastive', 'both']) else torch.tensor(0.0)) denoising_loss = (self.denoising_loss(x_cat_and_cat_, x_cont_and_cont_) if (self.loss_type in ['denoising', 'both']) else torch.tensor(0.0)) return (contrastive_loss + denoising_loss) def _set_reduce_on_plateau_criterion(self, lr_scheduler, reducelronplateau_criterion): self.reducelronplateau = False if isinstance(lr_scheduler, ReduceLROnPlateau): self.reducelronplateau = True if (self.reducelronplateau and (not reducelronplateau_criterion)): UserWarning("The learning rate scheduler is of type ReduceLROnPlateau. The step method in this scheduler requires a 'metrics' param that can be either the validation loss or the validation metric. Please, when instantiating the Trainer, specify which quantity will be tracked using reducelronplateau_criterion = 'loss' (default) or reducelronplateau_criterion = 'metric'") self.reducelronplateau_criterion = 'loss' else: self.reducelronplateau_criterion = reducelronplateau_criterion def _set_lr_scheduler_running_params(self, lr_scheduler, **kwargs): reducelronplateau_criterion = kwargs.get('reducelronplateau_criterion', None) self._set_reduce_on_plateau_criterion(lr_scheduler, reducelronplateau_criterion) if (lr_scheduler is not None): self.cyclic_lr = ('cycl' in lr_scheduler.__class__.__name__.lower()) else: self.cyclic_lr = False def _set_callbacks(self, callbacks: Any): self.callbacks: List = [History(), LRShedulerCallback()] if (callbacks is not None): for callback in callbacks: if isinstance(callback, type): callback = callback() self.callbacks.append(callback) self.callback_container = CallbackContainer(self.callbacks) self.callback_container.set_model(self.cd_model) self.callback_container.set_trainer(self) def _restore_best_weights(self): early_stopping_min_delta = None model_checkpoint_min_delta = None already_restored = False for callback in self.callback_container.callbacks: if ((callback.__class__.__name__ == 'EarlyStopping') and callback.restore_best_weights): early_stopping_min_delta = callback.min_delta already_restored = True if (callback.__class__.__name__ == 'ModelCheckpoint'): model_checkpoint_min_delta = callback.min_delta if (((early_stopping_min_delta is not None) and (model_checkpoint_min_delta is not None)) and (early_stopping_min_delta != model_checkpoint_min_delta)): warnings.warn("'min_delta' is different in the 'EarlyStopping' and 'ModelCheckpoint' callbacks. This implies a different definition of 'improvement' for these two callbacks", UserWarning) if already_restored: pass else: for callback in self.callback_container.callbacks: if (callback.__class__.__name__ == 'ModelCheckpoint'): if callback.save_best_only: if self.verbose: print(f'Model weights restored to best epoch: {(callback.best_epoch + 1)}') self.cd_model.load_state_dict(callback.best_state_dict) elif self.verbose: print("Model weights after training corresponds to the those of the final epoch which might not be the best performing weights. Use the 'ModelCheckpoint' Callback to restore the best epoch weights.") @staticmethod def _set_device_and_num_workers(**kwargs): default_num_workers = (0 if ((sys.platform == 'darwin') and (sys.version_info.minor > 7)) else os.cpu_count()) default_device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) device = kwargs.get('device', default_device) num_workers = kwargs.get('num_workers', default_num_workers) return (device, num_workers) @staticmethod def _check_model_is_supported(model: ModelWithAttention): if (model.__class__.__name__ == 'TabPerceiver'): raise ValueError("Self-Supervised pretraining is not supported for the 'TabPerceiver'") if ((model.__class__.__name__ == 'TabTransformer') and (not model.embed_continuous)): raise ValueError("Self-Supervised pretraining is only supported if both categorical and continuum columns are embedded. Please set 'embed_continuous = True'") @staticmethod def _check_projection_head_dims(model: ModelWithAttention, projection_head1_dims: Optional[List[int]], projection_head2_dims: Optional[List[int]]): error_msg = f'The first dimension of the projection heads must be the same as the embeddings dimension or input dimension of the model: {model.input_dim}. ' if ((projection_head1_dims is not None) and (model.input_dim != projection_head1_dims[0])): raise ValueError(error_msg) if ((projection_head2_dims is not None) and (model.input_dim != projection_head2_dims[0])): raise ValueError(error_msg) def __repr__(self) -> str: list_of_params: List[str] = [] list_of_params.append(f'model={self.cd_model.__class__.__name__}') if (self.optimizer is not None): list_of_params.append(f'optimizer={self.optimizer.__class__.__name__}') if (self.lr_scheduler is not None): list_of_params.append(f'lr_scheduler={self.lr_scheduler.__class__.__name__}') if (self.callbacks is not None): callbacks = [c.__class__.__name__ for c in self.callbacks] list_of_params.append(f'callbacks={callbacks}') list_of_params.append('loss_type={loss_type}') if (self.cd_model.projection_head1_dims is not None): list_of_params.append(f'projection_head1_dims={self.cd_model.projection_head1_dims}') if (self.cd_model.projection_head2_dims is not None): list_of_params.append(f'projection_head2_dims={self.cd_model.projection_head2_dims}') list_of_params.append(f'projection_heads_activation={self.cd_model.projection_heads_activation}') list_of_params.append(f'cat_mlp_type={self.cd_model.cat_mlp_type}') list_of_params.append(f'cont_mlp_type={self.cd_model.cont_mlp_type}') list_of_params.append(f'denoise_mlps_activation={self.cd_model.denoise_mlps_activation}') list_of_params.append('verbose={verbose}') list_of_params.append('seed={seed}') all_params = ', '.join(list_of_params) return f'ContrastiveDenoisingTrainer({all_params.format(**self.__dict__)})'
class BaseEncoderDecoderTrainer(ABC): def __init__(self, encoder: ModelWithoutAttention, decoder: DecoderWithoutAttention, masked_prob: float, optimizer: Optional[Optimizer], lr_scheduler: Optional[LRScheduler], callbacks: Optional[List[Callback]], verbose: int, seed: int, **kwargs): (self.device, self.num_workers) = self._set_device_and_num_workers(**kwargs) self.early_stop = False self.verbose = verbose self.seed = seed self.ed_model = EncoderDecoderModel(encoder, decoder, masked_prob) self.ed_model.to(self.device) self.loss_fn = EncoderDecoderLoss() self.optimizer = (optimizer if (optimizer is not None) else torch.optim.AdamW(self.ed_model.parameters())) self.lr_scheduler = lr_scheduler self._set_lr_scheduler_running_params(lr_scheduler, **kwargs) self._set_callbacks(callbacks) @abstractmethod def pretrain(self, X_tab: np.ndarray, X_val: Optional[np.ndarray], val_split: Optional[float], validation_freq: int, n_epochs: int, batch_size: int): raise NotImplementedError('Trainer.pretrain method not implemented') @abstractmethod def save(self, path: str, save_state_dict: bool, model_filename: str): raise NotImplementedError('Trainer.save method not implemented') def _set_reduce_on_plateau_criterion(self, lr_scheduler, reducelronplateau_criterion): self.reducelronplateau = False if isinstance(lr_scheduler, ReduceLROnPlateau): self.reducelronplateau = True if (self.reducelronplateau and (not reducelronplateau_criterion)): UserWarning("The learning rate scheduler is of type ReduceLROnPlateau. The step method in this scheduler requires a 'metrics' param that can be either the validation loss or the validation metric. Please, when instantiating the Trainer, specify which quantity will be tracked using reducelronplateau_criterion = 'loss' (default) or reducelronplateau_criterion = 'metric'") self.reducelronplateau_criterion = 'loss' else: self.reducelronplateau_criterion = reducelronplateau_criterion def _set_lr_scheduler_running_params(self, lr_scheduler, **kwargs): reducelronplateau_criterion = kwargs.get('reducelronplateau_criterion', None) self._set_reduce_on_plateau_criterion(lr_scheduler, reducelronplateau_criterion) if (lr_scheduler is not None): self.cyclic_lr = ('cycl' in lr_scheduler.__class__.__name__.lower()) else: self.cyclic_lr = False def _set_callbacks(self, callbacks: Any): self.callbacks: List = [History(), LRShedulerCallback()] if (callbacks is not None): for callback in callbacks: if isinstance(callback, type): callback = callback() self.callbacks.append(callback) self.callback_container = CallbackContainer(self.callbacks) self.callback_container.set_model(self.ed_model) self.callback_container.set_trainer(self) def _restore_best_weights(self): early_stopping_min_delta = None model_checkpoint_min_delta = None already_restored = False for callback in self.callback_container.callbacks: if ((callback.__class__.__name__ == 'EarlyStopping') and callback.restore_best_weights): early_stopping_min_delta = callback.min_delta already_restored = True if (callback.__class__.__name__ == 'ModelCheckpoint'): model_checkpoint_min_delta = callback.min_delta if (((early_stopping_min_delta is not None) and (model_checkpoint_min_delta is not None)) and (early_stopping_min_delta != model_checkpoint_min_delta)): warnings.warn("'min_delta' is different in the 'EarlyStopping' and 'ModelCheckpoint' callbacks. This implies a different definition of 'improvement' for these two callbacks", UserWarning) if already_restored: pass else: for callback in self.callback_container.callbacks: if (callback.__class__.__name__ == 'ModelCheckpoint'): if callback.save_best_only: if self.verbose: print(f'Model weights restored to best epoch: {(callback.best_epoch + 1)}') self.ed_model.load_state_dict(callback.best_state_dict) elif self.verbose: print("Model weights after training corresponds to the those of the final epoch which might not be the best performing weights. Use the 'ModelCheckpoint' Callback to restore the best epoch weights.") @staticmethod def _set_device_and_num_workers(**kwargs): default_num_workers = (0 if ((sys.platform == 'darwin') and (sys.version_info.minor > 7)) else os.cpu_count()) default_device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) device = kwargs.get('device', default_device) num_workers = kwargs.get('num_workers', default_num_workers) return (device, num_workers) def __repr__(self) -> str: list_of_params: List[str] = [] list_of_params.append(f'encoder={self.ed_model.encoder.__class__.__name__}') list_of_params.append(f'decoder={self.ed_model.decoder.__class__.__name__}') list_of_params.append(f'masked_prob={self.ed_model.masker.p}') if (self.optimizer is not None): list_of_params.append(f'optimizer={self.optimizer.__class__.__name__}') if (self.lr_scheduler is not None): list_of_params.append(f'lr_scheduler={self.lr_scheduler.__class__.__name__}') if (self.callbacks is not None): callbacks = [c.__class__.__name__ for c in self.callbacks] list_of_params.append(f'callbacks={callbacks}') list_of_params.append('verbose={verbose}') list_of_params.append('seed={seed}') all_params = ', '.join(list_of_params) return f'EncoderDecoderTrainer({all_params.format(**self.__dict__)})'
class ContrastiveDenoisingTrainer(BaseContrastiveDenoisingTrainer): 'This class trains a Contrastive, Denoising Self Supervised \'routine\' that\n is based on the one described in\n [SAINT: Improved Neural Networks for Tabular Data via Row Attention and\n Contrastive Pre-Training](https://arxiv.org/abs/2106.01342), their Figure 1.\n\n Parameters\n ----------\n model: ModelWithAttention,\n An instance of a `TabTransformer`, `SAINT`, `FTTransformer`,\n `TabFastFormer`, `TabPerceiver`, `ContextAttentionMLP` and\n `SelfAttentionMLP`.\n preprocessor: `TabPreprocessor`\n A fitted `TabPreprocessor` object. See\n `pytorch_widedeep.preprocessing.tab_preprocessor.TabPreprocessor`\n optimizer: Optional[Optimizer] = None,\n An instance of Pytorch\'s `Optimizer` object (e.g. `torch.optim.Adam\n ()`). if no optimizer is passed it will default to `AdamW`.\n lr_scheduler: Optional[LRScheduler] = None,\n An instance of Pytorch\'s `LRScheduler` object\n (e.g `torch.optim.lr_scheduler.StepLR(opt, step_size=5)`).\n callbacks: Optional[List[Callback]] = None,\n List with `Callback` objects. The three callbacks available in\n `pytorch-widedeep` are: `LRHistory`, `ModelCheckpoint` and\n `EarlyStopping`. This can also be a custom callback. See\n `pytorch_widedeep.callbacks.Callback` or the Examples folder in the\n repo.\n loss_type: str, default = "both"\n One of \'_contrastive_\', \'_denoising_\' or \'_both_\'. See [SAINT: Improved\n Neural Networks for Tabular Data via Row Attention and Contrastive\n Pre-Training](https://arxiv.org/abs/2203.05556), their figure (1)\n and their equation (5).\n projection_head1_dims: list, Optional, default = None\n The projection heads are simply MLPs. This parameter is a list\n of integers with the dimensions of the MLP hidden layers. See the\n [paper](https://arxiv.org/abs/2203.05556) for details. Note that\n setting up this parameter requires some knowledge of the architecture\n one is using. For example, if we are representing the features with\n embeddings of dim 32 (i.e. the so called dimension of the model is\n 32), then the first dimension of the projection head must be 32 (e.g.\n [32, 16])\n projection_head2_dims: list, Optional, default = None\n Same as \'_projection_head1_dims_\' for the second head\n projection_heads_activation: str, default = "relu"\n Activation function for the projection heads\n cat_mlp_type: str, default = "multiple"\n If \'_denoising_\' loss is used, one can choose two types of \'stacked\'\n MLPs to process the output from the transformer-based encoder that\n receives \'corrupted\' (cut-mixed and mixed-up) features. These\n are \'_single_\' or \'_multiple_\'. The former approach will apply a single\n MLP to all the categorical features while the latter will use one MLP\n per categorical feature\n cont_mlp_type: str, default = "multiple"\n Same as \'cat_mlp_type\' but for the continuous features\n denoise_mlps_activation: str, default = "relu"\n activation function for the so called \'denoising mlps\'.\n verbose: int, default=1\n Setting it to 0 will print nothing during training.\n seed: int, default=1\n Random seed to be used internally for train_test_split\n\n Other Parameters\n ----------------\n **kwargs: dict\n Other infrequently used arguments that can also be passed as kwargs are:\n\n - **device**: `str`<br/>\n string indicating the device. One of _\'cpu\'_ or _\'gpu\'_\n\n - **num_workers**: `int`<br/>\n number of workers to be used internally by the data loaders\n\n - **reducelronplateau_criterion**: `str`\n This sets the criterion that will be used by the lr scheduler to\n take a step: One of _\'loss\'_ or _\'metric\'_. The ReduceLROnPlateau\n learning rate is a bit particular.\n\n ' def __init__(self, model: ModelWithAttention, preprocessor: TabPreprocessor, optimizer: Optional[Optimizer]=None, lr_scheduler: Optional[LRScheduler]=None, callbacks: Optional[List[Callback]]=None, loss_type: Literal[('contrastive', 'denoising', 'both')]='both', projection_head1_dims: Optional[List[int]]=None, projection_head2_dims: Optional[List[int]]=None, projection_heads_activation: str='relu', cat_mlp_type: Literal[('single', 'multiple')]='multiple', cont_mlp_type: Literal[('single', 'multiple')]='multiple', denoise_mlps_activation: str='relu', verbose: int=1, seed: int=1, **kwargs): super().__init__(model=model, preprocessor=preprocessor, loss_type=loss_type, optimizer=optimizer, lr_scheduler=lr_scheduler, callbacks=callbacks, projection_head1_dims=projection_head1_dims, projection_head2_dims=projection_head2_dims, projection_heads_activation=projection_heads_activation, cat_mlp_type=cat_mlp_type, cont_mlp_type=cont_mlp_type, denoise_mlps_activation=denoise_mlps_activation, verbose=verbose, seed=seed, **kwargs) def pretrain(self, X_tab: np.ndarray, X_tab_val: Optional[np.ndarray]=None, val_split: Optional[float]=None, validation_freq: int=1, n_epochs: int=1, batch_size: int=32): 'Pretrain method. Can also be called using `.fit(<same_args>)`\n\n Parameters\n ----------\n X_tab: np.ndarray,\n tabular dataset\n X_tab_val: np.ndarray, Optional, default = None\n validation data. Note that, although it is possible to use\n contrastive-denoising training with a validation set, such set\n must include feature values that are _all_ seen in the training\n set in the case of the categorical columns. This is because the\n values of the columns themselves will be used as targets when\n computing the loss. Therefore, if a new category is present in\n the validation set that was not seen in training this will\n effectively be like trying to predict a new, never seen category\n (and Pytorch will throw an error)\n val_split: float, Optional. default=None\n An alterative to passing the validation set is to use a train/val\n split fraction via `val_split`\n validation_freq: int, default=1\n epochs validation frequency\n n_epochs: int, default=1\n number of epochs\n batch_size: int, default=32\n batch size\n ' self.batch_size = batch_size (train_set, eval_set) = self._train_eval_split(X_tab, X_tab_val, val_split) train_loader = DataLoader(dataset=train_set, batch_size=batch_size, num_workers=self.num_workers) train_steps = len(train_loader) if (eval_set is not None): eval_loader = DataLoader(dataset=eval_set, batch_size=batch_size, num_workers=self.num_workers, shuffle=False) eval_steps = len(eval_loader) self.callback_container.on_train_begin({'batch_size': batch_size, 'train_steps': train_steps, 'n_epochs': n_epochs}) for epoch in range(n_epochs): epoch_logs: Dict[(str, float)] = {} self.callback_container.on_epoch_begin(epoch, logs=epoch_logs) self.train_running_loss = 0.0 with trange(train_steps, disable=(self.verbose != 1)) as t: for (batch_idx, X) in zip(t, train_loader): t.set_description(('epoch %i' % (epoch + 1))) train_loss = self._train_step(X[0], batch_idx) self.callback_container.on_batch_end(batch=batch_idx) print_loss_and_metric(t, train_loss) epoch_logs = save_epoch_logs(epoch_logs, train_loss, None, 'train') on_epoch_end_metric = None if ((eval_set is not None) and ((epoch % validation_freq) == (validation_freq - 1))): self.callback_container.on_eval_begin() self.valid_running_loss = 0.0 with trange(eval_steps, disable=(self.verbose != 1)) as v: for (batch_idx, X) in zip(v, eval_loader): v.set_description('valid') val_loss = self._eval_step(X[0], batch_idx) print_loss_and_metric(v, val_loss) epoch_logs = save_epoch_logs(epoch_logs, val_loss, None, 'val') on_epoch_end_metric = val_loss elif self.reducelronplateau: raise NotImplementedError('ReduceLROnPlateau scheduler can be used only with validation data.') self.callback_container.on_epoch_end(epoch, epoch_logs, on_epoch_end_metric) if self.early_stop: self.callback_container.on_train_end(epoch_logs) break self.callback_container.on_train_end(epoch_logs) self._restore_best_weights() self.cd_model.train() def fit(self, X_tab: np.ndarray, X_tab_val: Optional[np.ndarray]=None, val_split: Optional[float]=None, validation_freq: int=1, n_epochs: int=1, batch_size: int=32): return self.pretrain(X_tab, X_tab_val, val_split, validation_freq, n_epochs, batch_size) def save(self, path: str, save_state_dict: bool=False, model_filename: str='cd_model.pt'): 'Saves the model, training and evaluation history (if any) to disk\n\n Parameters\n ----------\n path: str\n path to the directory where the model and the feature importance\n attribute will be saved.\n save_state_dict: bool, default = False\n Boolean indicating whether to save directly the model or the\n model\'s state dictionary\n model_filename: str, Optional, default = "cd_model.pt"\n filename where the model weights will be store\n ' save_dir = Path(path) history_dir = (save_dir / 'history') history_dir.mkdir(exist_ok=True, parents=True) with open((history_dir / 'train_eval_history.json'), 'w') as teh: json.dump(self.history, teh) has_lr_history = any([(clbk.__class__.__name__ == 'LRHistory') for clbk in self.callbacks]) if ((self.lr_scheduler is not None) and has_lr_history): with open((history_dir / 'lr_history.json'), 'w') as lrh: json.dump(self.lr_history, lrh) model_path = (save_dir / model_filename) if save_state_dict: torch.save(self.cd_model.state_dict(), model_path) else: torch.save(self.cd_model, model_path) def _train_step(self, X_tab: Tensor, batch_idx: int) -> float: X = X_tab.to(self.device) self.optimizer.zero_grad() (g_projs, cat_x_and_x_, cont_x_and_x_) = self.cd_model(X) loss = self._compute_loss(g_projs, cat_x_and_x_, cont_x_and_x_) loss.backward() self.optimizer.step() self.train_running_loss += loss.item() avg_loss = (self.train_running_loss / (batch_idx + 1)) return avg_loss def _eval_step(self, X_tab: Tensor, batch_idx: int) -> float: self.cd_model.eval() with torch.no_grad(): X = X_tab.to(self.device) (g_projs, cat_x_and_x_, cont_x_and_x_) = self.cd_model(X) loss = self._compute_loss(g_projs, cat_x_and_x_, cont_x_and_x_) self.valid_running_loss += loss.item() avg_loss = (self.valid_running_loss / (batch_idx + 1)) return avg_loss def _train_eval_split(self, X: np.ndarray, X_tab_val: Optional[np.ndarray]=None, val_split: Optional[float]=None) -> Tuple[(TensorDataset, Optional[TensorDataset])]: if (X_tab_val is not None): train_set = TensorDataset(torch.from_numpy(X)) eval_set = TensorDataset(torch.from_numpy(X_tab_val)) elif (val_split is not None): (X_tr, X_tab_val) = train_test_split(X, test_size=val_split, random_state=self.seed) train_set = TensorDataset(torch.from_numpy(X_tr)) eval_set = TensorDataset(torch.from_numpy(X_tab_val)) else: train_set = TensorDataset(torch.from_numpy(X)) eval_set = None return (train_set, eval_set)
class EncoderDecoderTrainer(BaseEncoderDecoderTrainer): "This class implements an Encoder-Decoder self-supervised 'routine'\n inspired by\n [TabNet: Attentive Interpretable Tabular Learning](https://arxiv.org/abs/1908.07442).\n See Figure 1 above.\n\n Parameters\n ----------\n encoder: ModelWithoutAttention,\n An instance of a `TabMlp`, `TabResNet` or `TabNet` model\n decoder: Optional[DecoderWithoutAttention] = None,\n An instance of a `TabMlpDecoder`, `TabResNetDecoder` or\n `TabNetDecoder` model. if `None` the decoder will be automatically\n build as a '_simetric_' model to the Encoder\n masked_prob: float = 0.2,\n Indicates the fraction of elements in the embedding tensor that will\n be masked and hence used for reconstruction\n optimizer: Optional[Optimizer] = None,\n An instance of Pytorch's `Optimizer` object (e.g. `torch.optim.Adam\n ()`). if no optimizer is passed it will default to `AdamW`.\n lr_scheduler: Optional[LRScheduler] = None,\n An instance of Pytorch's `LRScheduler` object\n (e.g `torch.optim.lr_scheduler.StepLR(opt, step_size=5)`).\n callbacks: Optional[List[Callback]] = None,\n List with `Callback` objects. The three callbacks available in\n `pytorch-widedeep` are: `LRHistory`, `ModelCheckpoint` and\n `EarlyStopping`. This can also be a custom callback. See\n `pytorch_widedeep.callbacks.Callback` or the Examples folder in the\n repo.\n verbose: int, default=1\n Setting it to 0 will print nothing during training.\n seed: int, default=1\n Random seed to be used internally for train_test_split\n\n Other Parameters\n ----------------\n **kwargs: dict\n Other infrequently used arguments that can also be passed as kwargs are:\n\n - **device**: `str`<br/>\n string indicating the device. One of _'cpu'_ or _'gpu'_\n\n - **num_workers**: `int`<br/>\n number of workers to be used internally by the data loaders\n\n - **reducelronplateau_criterion**: `str`\n This sets the criterion that will be used by the lr scheduler to\n take a step: One of _'loss'_ or _'metric'_. The ReduceLROnPlateau\n learning rate is a bit particular.\n\n " def __init__(self, encoder: ModelWithoutAttention, decoder: Optional[DecoderWithoutAttention]=None, masked_prob: float=0.2, optimizer: Optional[Optimizer]=None, lr_scheduler: Optional[LRScheduler]=None, callbacks: Optional[List[Callback]]=None, verbose: int=1, seed: int=1, **kwargs): super().__init__(encoder=encoder, decoder=decoder, masked_prob=masked_prob, optimizer=optimizer, lr_scheduler=lr_scheduler, callbacks=callbacks, verbose=verbose, seed=seed, **kwargs) def pretrain(self, X_tab: np.ndarray, X_tab_val: Optional[np.ndarray]=None, val_split: Optional[float]=None, validation_freq: int=1, n_epochs: int=1, batch_size: int=32): 'Pretrain method. Can also be called using `.fit(<same_args>)`\n\n Parameters\n ----------\n X_tab: np.ndarray,\n tabular dataset\n X_tab_val: np.ndarray, Optional, default = None\n validation data\n val_split: float, Optional. default=None\n An alterative to passing the validation set is to use a train/val\n split fraction via `val_split`\n validation_freq: int, default=1\n epochs validation frequency\n n_epochs: int, default=1\n number of epochs\n batch_size: int, default=32\n batch size\n ' self.batch_size = batch_size (train_set, eval_set) = self._train_eval_split(X_tab, X_tab_val, val_split) train_loader = DataLoader(dataset=train_set, batch_size=batch_size, num_workers=self.num_workers) train_steps = len(train_loader) if (eval_set is not None): eval_loader = DataLoader(dataset=eval_set, batch_size=batch_size, num_workers=self.num_workers, shuffle=False) eval_steps = len(eval_loader) self.callback_container.on_train_begin({'batch_size': batch_size, 'train_steps': train_steps, 'n_epochs': n_epochs}) for epoch in range(n_epochs): epoch_logs: Dict[(str, float)] = {} self.callback_container.on_epoch_begin(epoch, logs=epoch_logs) self.train_running_loss = 0.0 with trange(train_steps, disable=(self.verbose != 1)) as t: for (batch_idx, X) in zip(t, train_loader): t.set_description(('epoch %i' % (epoch + 1))) train_loss = self._train_step(X[0], batch_idx) self.callback_container.on_batch_end(batch=batch_idx) print_loss_and_metric(t, train_loss) epoch_logs = save_epoch_logs(epoch_logs, train_loss, None, 'train') on_epoch_end_metric = None if ((eval_set is not None) and ((epoch % validation_freq) == (validation_freq - 1))): self.callback_container.on_eval_begin() self.valid_running_loss = 0.0 with trange(eval_steps, disable=(self.verbose != 1)) as v: for (batch_idx, X) in zip(v, eval_loader): v.set_description('valid') val_loss = self._eval_step(X[0], batch_idx) print_loss_and_metric(v, val_loss) epoch_logs = save_epoch_logs(epoch_logs, val_loss, None, 'val') on_epoch_end_metric = val_loss elif self.reducelronplateau: raise NotImplementedError('ReduceLROnPlateau scheduler can be used only with validation data.') self.callback_container.on_epoch_end(epoch, epoch_logs, on_epoch_end_metric) if self.early_stop: self.callback_container.on_train_end(epoch_logs) break self.callback_container.on_train_end(epoch_logs) self._restore_best_weights() self.ed_model.train() def fit(self, X_tab: np.ndarray, X_tab_val: Optional[np.ndarray]=None, val_split: Optional[float]=None, validation_freq: int=1, n_epochs: int=1, batch_size: int=32): return self.pretrain(X_tab, X_tab_val, val_split, validation_freq, n_epochs, batch_size) def save(self, path: str, save_state_dict: bool=False, model_filename: str='ed_model.pt'): 'Saves the model, training and evaluation history (if any) to disk\n\n Parameters\n ----------\n path: str\n path to the directory where the model and the feature importance\n attribute will be saved.\n save_state_dict: bool, default = False\n Boolean indicating whether to save directly the model or the\n model\'s state dictionary\n model_filename: str, Optional, default = "ed_model.pt"\n filename where the model weights will be store\n ' save_dir = Path(path) history_dir = (save_dir / 'history') history_dir.mkdir(exist_ok=True, parents=True) with open((history_dir / 'train_eval_history.json'), 'w') as teh: json.dump(self.history, teh) has_lr_history = any([(clbk.__class__.__name__ == 'LRHistory') for clbk in self.callbacks]) if ((self.lr_scheduler is not None) and has_lr_history): with open((history_dir / 'lr_history.json'), 'w') as lrh: json.dump(self.lr_history, lrh) model_path = (save_dir / model_filename) if save_state_dict: torch.save(self.ed_model.state_dict(), model_path) else: torch.save(self.ed_model, model_path) def explain(self, X_tab: np.ndarray, save_step_masks: bool=False): raise NotImplementedError("The 'explain' is currently not implemented for Self Supervised Pretraining") def _train_step(self, X_tab: Tensor, batch_idx: int) -> float: X = X_tab.to(self.device) self.optimizer.zero_grad() (x_embed, x_embed_rec, mask) = self.ed_model(X) loss = self.loss_fn(x_embed, x_embed_rec, mask) loss.backward() self.optimizer.step() self.train_running_loss += loss.item() avg_loss = (self.train_running_loss / (batch_idx + 1)) return avg_loss def _eval_step(self, X_tab: Tensor, batch_idx: int) -> float: self.ed_model.eval() with torch.no_grad(): X = X_tab.to(self.device) (x_embed, x_embed_rec, mask) = self.ed_model(X) loss = self.loss_fn(x_embed, x_embed_rec, mask) self.valid_running_loss += loss.item() avg_loss = (self.valid_running_loss / (batch_idx + 1)) return avg_loss def _train_eval_split(self, X: np.ndarray, X_tab_val: Optional[np.ndarray]=None, val_split: Optional[float]=None) -> Tuple[(TensorDataset, Optional[TensorDataset])]: if (X_tab_val is not None): train_set = TensorDataset(torch.from_numpy(X)) eval_set = TensorDataset(torch.from_numpy(X_tab_val)) elif (val_split is not None): (X_tr, X_tab_val) = train_test_split(X, test_size=val_split, random_state=self.seed) train_set = TensorDataset(torch.from_numpy(X_tr)) eval_set = TensorDataset(torch.from_numpy(X_tab_val)) else: train_set = TensorDataset(torch.from_numpy(X)) eval_set = None return (train_set, eval_set)
class BaseBayesianTrainer(ABC): def __init__(self, model: BaseBayesianModel, objective: str, custom_loss_function: Optional[Module], optimizer: Optimizer, lr_scheduler: LRScheduler, callbacks: Optional[List[Callback]], metrics: Optional[Union[(List[Metric], List[TorchMetric])]], verbose: int, seed: int, **kwargs): if (objective not in ['binary', 'multiclass', 'regression']): raise ValueError("If 'custom_loss_function' is not None, 'objective' must be 'binary' 'multiclass' or 'regression', consistent with the loss function") (self.device, self.num_workers) = self._set_device_and_num_workers(**kwargs) self.early_stop = False self.model = model self.model.to(self.device) self.verbose = verbose self.seed = seed self.objective = objective self.loss_fn = self._set_loss_fn(objective, custom_loss_function, **kwargs) self.optimizer = (optimizer if (optimizer is not None) else torch.optim.AdamW(self.model.parameters())) self.lr_scheduler = lr_scheduler self._set_lr_scheduler_running_params(lr_scheduler, **kwargs) self._set_callbacks_and_metrics(callbacks, metrics) @abstractmethod def fit(self, X_tab: np.ndarray, target: np.ndarray, X_tab_val: Optional[np.ndarray], target_val: Optional[np.ndarray], val_split: Optional[float], n_epochs: int, val_freq: int, batch_size: int, n_train_samples: int, n_val_samples: int): raise NotImplementedError('Trainer.fit method not implemented') @abstractmethod def predict(self, X_tab: np.ndarray, n_samples: int, return_samples: bool, batch_size: int) -> np.ndarray: raise NotImplementedError('Trainer.predict method not implemented') @abstractmethod def predict_proba(self, X_tab: np.ndarray, n_samples: int, return_samples: bool, batch_size: int) -> np.ndarray: raise NotImplementedError('Trainer.predict_proba method not implemented') @abstractmethod def save(self, path: str, save_state_dict: bool, model_filename: str): raise NotImplementedError('Trainer.save method not implemented') def _restore_best_weights(self): early_stopping_min_delta = None model_checkpoint_min_delta = None already_restored = False for callback in self.callback_container.callbacks: if ((callback.__class__.__name__ == 'EarlyStopping') and callback.restore_best_weights): early_stopping_min_delta = callback.min_delta already_restored = True if (callback.__class__.__name__ == 'ModelCheckpoint'): model_checkpoint_min_delta = callback.min_delta if (((early_stopping_min_delta is not None) and (model_checkpoint_min_delta is not None)) and (early_stopping_min_delta != model_checkpoint_min_delta)): warnings.warn("'min_delta' is different in the 'EarlyStopping' and 'ModelCheckpoint' callbacks. This implies a different definition of 'improvement' for these two callbacks", UserWarning) if already_restored: pass else: for callback in self.callback_container.callbacks: if (callback.__class__.__name__ == 'ModelCheckpoint'): if callback.save_best_only: if self.verbose: print(f'Model weights restored to best epoch: {(callback.best_epoch + 1)}') self.model.load_state_dict(callback.best_state_dict) elif self.verbose: print("Model weights after training corresponds to the those of the final epoch which might not be the best performing weights. Use the 'ModelCheckpoint' Callback to restore the best epoch weights.") def _set_loss_fn(self, objective, custom_loss_function, **kwargs): if (custom_loss_function is not None): return custom_loss_function class_weight = (torch.tensor(kwargs['class_weight']).to(self.device) if ('class_weight' in kwargs) else None) if (self.objective != 'regression'): return bayesian_alias_to_loss(objective, weight=class_weight) else: return bayesian_alias_to_loss(objective) def _set_reduce_on_plateau_criterion(self, lr_scheduler, reducelronplateau_criterion): self.reducelronplateau = False if isinstance(lr_scheduler, ReduceLROnPlateau): self.reducelronplateau = True if (self.reducelronplateau and (not reducelronplateau_criterion)): UserWarning("The learning rate scheduler is of type ReduceLROnPlateau. The step method in this scheduler requires a 'metrics' param that can be either the validation loss or the validation metric. Please, when instantiating the Trainer, specify which quantity will be tracked using reducelronplateau_criterion = 'loss' (default) or reducelronplateau_criterion = 'metric'") else: self.reducelronplateau_criterion = 'loss' def _set_lr_scheduler_running_params(self, lr_scheduler, **kwargs): reducelronplateau_criterion = kwargs.get('reducelronplateau_criterion', None) self._set_reduce_on_plateau_criterion(lr_scheduler, reducelronplateau_criterion) if (lr_scheduler is not None): self.cyclic_lr = ('cycl' in lr_scheduler.__class__.__name__.lower()) else: self.cyclic_lr = False def _set_callbacks_and_metrics(self, callbacks: Any, metrics: Any): self.callbacks: List = [History(), LRShedulerCallback()] if (callbacks is not None): for callback in callbacks: if isinstance(callback, type): callback = callback() self.callbacks.append(callback) if (metrics is not None): self.metric = MultipleMetrics(metrics) self.callbacks += [MetricCallback(self.metric)] else: self.metric = None self.callback_container = CallbackContainer(self.callbacks) self.callback_container.set_model(self.model) self.callback_container.set_trainer(self) @staticmethod def _set_device_and_num_workers(**kwargs): default_num_workers = (0 if ((sys.platform == 'darwin') and (sys.version_info.minor > 7)) else os.cpu_count()) default_device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) device = kwargs.get('device', default_device) num_workers = kwargs.get('num_workers', default_num_workers) return (device, num_workers) def __repr__(self) -> str: list_of_params: List[str] = [] list_of_params.append(f'fname={self.model.__class__.__name__}') list_of_params.append('objective={objective}') list_of_params.append(f'custom_loss_function={self.loss_fn.__class__.__name__}') list_of_params.append(f'optimizer={self.optimizer.__class__.__name__}') if (self.lr_scheduler is not None): list_of_params.append(f'lr_scheduler={self.lr_scheduler.__class__.__name__}') if (self.callbacks is not None): callbacks = (('[' + ', '.join([c.__class__.__name__ for c in self.callbacks])) + ']') list_of_params.append(f'callbacks={callbacks}') if (self.verbose is not None): list_of_params.append('verbose={verbose}') if (self.seed is not None): list_of_params.append('seed={seed}') if (self.device is not None): list_of_params.append('device={device}') if (self.num_workers is not None): list_of_params.append('num_workers={num_workers}') all_params = ', '.join(list_of_params) return f'BayesianTrainer({all_params.format(**self.__dict__)})'
class BaseTrainer(ABC): def __init__(self, model: WideDeep, objective: str, custom_loss_function: Optional[Module], optimizers: Optional[Union[(Optimizer, Dict[(str, Optimizer)])]], lr_schedulers: Optional[Union[(LRScheduler, Dict[(str, LRScheduler)])]], initializers: Optional[Union[(Initializer, Dict[(str, Initializer)])]], transforms: Optional[List[Transforms]], callbacks: Optional[List[Callback]], metrics: Optional[Union[(List[Metric], List[TorchMetric])]], verbose: int, seed: int, **kwargs): self._check_inputs(model, objective, optimizers, lr_schedulers, custom_loss_function) (self.device, self.num_workers) = self._set_device_and_num_workers(**kwargs) self.early_stop = False self.verbose = verbose self.seed = seed self.model = model if self.model.is_tabnet: self.lambda_sparse = kwargs.get('lambda_sparse', 0.001) self.model.to(self.device) self.model.wd_device = self.device self.objective = objective self.method = _ObjectiveToMethod.get(objective) self._initialize(initializers) self.loss_fn = self._set_loss_fn(objective, custom_loss_function, **kwargs) self.optimizer = self._set_optimizer(optimizers) self.lr_scheduler = self._set_lr_scheduler(lr_schedulers, **kwargs) self.transforms = self._set_transforms(transforms) self._set_callbacks_and_metrics(callbacks, metrics) @abstractmethod def fit(self, *args, **kwargs): pass @abstractmethod def predict(self, *args, **kwargs) -> np.ndarray: pass @abstractmethod def predict_proba(self, *args, **kwargs) -> np.ndarray: pass @abstractmethod def save(self, path: str, save_state_dict: bool, model_filename: str): raise NotImplementedError('Trainer.save method not implemented') def _restore_best_weights(self): early_stopping_min_delta = None model_checkpoint_min_delta = None already_restored = False for callback in self.callback_container.callbacks: if ((callback.__class__.__name__ == 'EarlyStopping') and callback.restore_best_weights): early_stopping_min_delta = callback.min_delta already_restored = True if (callback.__class__.__name__ == 'ModelCheckpoint'): model_checkpoint_min_delta = callback.min_delta if (((early_stopping_min_delta is not None) and (model_checkpoint_min_delta is not None)) and (early_stopping_min_delta != model_checkpoint_min_delta)): warnings.warn("'min_delta' is different in the 'EarlyStopping' and 'ModelCheckpoint' callbacks. This implies a different definition of 'improvement' for these two callbacks", UserWarning) if already_restored: pass else: for callback in self.callback_container.callbacks: if (callback.__class__.__name__ == 'ModelCheckpoint'): if callback.save_best_only: if self.verbose: print(f'Model weights restored to best epoch: {(callback.best_epoch + 1)}') self.model.load_state_dict(callback.best_state_dict) elif self.verbose: print("Model weights after training corresponds to the those of the final epoch which might not be the best performing weights. Use the 'ModelCheckpoint' Callback to restore the best epoch weights.") def _initialize(self, initializers): if (initializers is not None): if isinstance(initializers, Dict): self.initializer = MultipleInitializer(initializers, verbose=self.verbose) self.initializer.apply(self.model) elif isinstance(initializers, type): self.initializer = initializers() self.initializer(self.model) elif isinstance(initializers, Initializer): self.initializer = initializers self.initializer(self.model) def _set_loss_fn(self, objective, custom_loss_function, **kwargs): class_weight = (torch.tensor(kwargs['class_weight']).to(self.device) if ('class_weight' in kwargs) else None) if (custom_loss_function is not None): return custom_loss_function elif ((self.method not in ['regression', 'qregression']) and ('focal_loss' not in objective)): return alias_to_loss(objective, weight=class_weight) elif ('focal_loss' in objective): alpha = kwargs.get('alpha', 0.25) gamma = kwargs.get('gamma', 2.0) return alias_to_loss(objective, alpha=alpha, gamma=gamma) else: return alias_to_loss(objective) def _set_optimizer(self, optimizers: Union[(Optimizer, Dict[(str, Optimizer)])]): if (optimizers is not None): if isinstance(optimizers, Optimizer): optimizer: Union[(Optimizer, MultipleOptimizer)] = optimizers elif isinstance(optimizers, Dict): opt_names = list(optimizers.keys()) mod_names = [n for (n, c) in self.model.named_children()] if self.model.with_fds: if ('enf_pos' in mod_names): mod_names.remove('enf_pos') mod_names.remove('fds_layer') optimizers['deeptabular'].add_param_group({'params': self.model.fds_layer.pred_layer.parameters()}) for mn in mod_names: assert (mn in opt_names), 'No optimizer found for {}'.format(mn) optimizer = MultipleOptimizer(optimizers) else: optimizer = torch.optim.Adam(self.model.parameters()) return optimizer def _set_lr_scheduler(self, lr_schedulers, **kwargs): reducelronplateau_criterion = kwargs.get('reducelronplateau_criterion', None) self._set_reduce_on_plateau_criterion(lr_schedulers, reducelronplateau_criterion) if (lr_schedulers is not None): if (isinstance(lr_schedulers, LRScheduler) or isinstance(lr_schedulers, ReduceLROnPlateau)): lr_scheduler = lr_schedulers cyclic_lr = ('cycl' in lr_scheduler.__class__.__name__.lower()) else: lr_scheduler = MultipleLRScheduler(lr_schedulers) scheduler_names = [sc.__class__.__name__.lower() for (_, sc) in lr_scheduler._schedulers.items()] cyclic_lr = any([('cycl' in sn) for sn in scheduler_names]) else: (lr_scheduler, cyclic_lr) = (None, False) self.cyclic_lr = cyclic_lr return lr_scheduler def _set_reduce_on_plateau_criterion(self, lr_schedulers, reducelronplateau_criterion): self.reducelronplateau = False if isinstance(lr_schedulers, Dict): for (_, scheduler) in lr_schedulers.items(): if isinstance(scheduler, ReduceLROnPlateau): self.reducelronplateau = True elif isinstance(lr_schedulers, ReduceLROnPlateau): self.reducelronplateau = True if (self.reducelronplateau and (not reducelronplateau_criterion)): UserWarning("The learning rate scheduler of at least one of the model components is of type ReduceLROnPlateau. The step method in this scheduler requires a 'metrics' param that can be either the validation loss or the validation metric. Please, when instantiating the Trainer, specify which quantity will be tracked using reducelronplateau_criterion = 'loss' (default) or reducelronplateau_criterion = 'metric'") self.reducelronplateau_criterion = 'loss' else: self.reducelronplateau_criterion = reducelronplateau_criterion @staticmethod def _set_transforms(transforms): if (transforms is not None): return MultipleTransforms(transforms)() else: return None def _set_callbacks_and_metrics(self, callbacks: Any, metrics: Any): self.callbacks: List = [History(), LRShedulerCallback()] if (callbacks is not None): for callback in callbacks: if isinstance(callback, type): callback = callback() self.callbacks.append(callback) if (metrics is not None): self.metric = MultipleMetrics(metrics) self.callbacks += [MetricCallback(self.metric)] else: self.metric = None self.callback_container = CallbackContainer(self.callbacks) self.callback_container.set_model(self.model) self.callback_container.set_trainer(self) @staticmethod def _check_inputs(model, objective, optimizers, lr_schedulers, custom_loss_function): if (model.with_fds and (_ObjectiveToMethod.get(objective) != 'regression')): raise ValueError('Feature Distribution Smooting can be used only for regression') if ((_ObjectiveToMethod.get(objective) == 'multiclass') and (model.pred_dim == 1)): raise ValueError("This is a multiclass classification problem but the size of the output layer is set to 1. Please, set the 'pred_dim' param equal to the number of classes when instantiating the 'WideDeep' class") if isinstance(optimizers, Dict): if ((lr_schedulers is not None) and (not isinstance(lr_schedulers, Dict))): raise ValueError("''optimizers' and 'lr_schedulers' must have consistent type: (Optimizer and LRScheduler) or (Dict[str, Optimizer] and Dict[str, LRScheduler]) Please, read the documentation or see the examples for more details") if ((custom_loss_function is not None) and (objective not in ['binary', 'multiclass', 'regression'])): raise ValueError("If 'custom_loss_function' is not None, 'objective' must be 'binary' 'multiclass' or 'regression', consistent with the loss function") @staticmethod def _set_device_and_num_workers(**kwargs): default_num_workers = (0 if ((sys.platform == 'darwin') and (sys.version_info.minor > 7)) else os.cpu_count()) default_device = ('cuda' if torch.cuda.is_available() else 'cpu') device = kwargs.get('device', default_device) num_workers = kwargs.get('num_workers', default_num_workers) return (device, num_workers) def __repr__(self) -> str: list_of_params: List[str] = [] list_of_params.append(f'model={self.model.__class__.__name__}') list_of_params.append('objective={objective}') list_of_params.append(f'loss_function={self.loss_fn.__class__.__name__}') list_of_params.append(f'optimizers={self.optimizer.__class__.__name__}') list_of_params.append(f'lr_schedulers={self.lr_scheduler.__class__.__name__}') if (self.callbacks is not None): callbacks_str = (('[' + ', '.join([c.__class__.__name__ for c in self.callbacks])) + ']') list_of_params.append(f'callbacks={callbacks_str}') if (self.verbose is not None): list_of_params.append('verbose={verbose}') if (self.seed is not None): list_of_params.append('seed={seed}') if (self.device is not None): list_of_params.append('device={device}') if (self.num_workers is not None): list_of_params.append('num_workers={num_workers}') all_params = ', '.join(list_of_params) return f'Trainer({all_params.format(**self.__dict__)})'
class FineTune(): 'Fine-tune methods to be applied to the individual model components.\n\n Note that they can also be used to "warm-up" those components before\n the joined training.\n\n There are 3 fine-tune/warm-up routines available:\n\n 1) Fine-tune all trainable layers at once\n\n 2) Gradual fine-tuning inspired by the work of Felbo et al., 2017\n\n 3) Gradual fine-tuning inspired by the work of Howard & Ruder 2018\n\n The structure of the code in this class is designed to be instantiated\n within the class WideDeep. This is not ideal, but represents a\n compromise towards implementing a fine-tuning functionality for the\n current overall structure of the package without having to\n re-structure most of the existing code. This will change in future\n releases.\n\n Parameters\n ----------\n loss_fn: Any\n any function with the same strucure as \'loss_fn\' in the class ``Trainer``\n metric: ``Metric`` or ``MultipleMetrics``\n object of class Metric (see Metric in pytorch_widedeep.metrics)\n method: str\n one of \'binary\', \'regression\' or \'multiclass\'\n verbose: Boolean\n ' def __init__(self, loss_fn: Any, metric: Union[(Metric, MultipleMetrics)], method: Literal[('binary', 'regression', 'multiclass')], verbose: int): self.loss_fn = loss_fn self.metric = metric self.method = method self.verbose = verbose def finetune_all(self, model: WDModel, model_name: str, loader: DataLoader, n_epochs: int, max_lr: float): "Fine-tune/warm-up all trainable layers in a model using a one cyclic\n learning rate with a triangular pattern. This is refereed as Slanted\n Triangular learing rate in Jeremy Howard & Sebastian Ruder 2018\n (https://arxiv.org/abs/1801.06146). The cycle is described as follows:\n\n 1) The learning rate will gradually increase for 10% of the training steps\n from max_lr/10 to max_lr.\n\n 2) It will then gradually decrease to max_lr/10 for the remaining 90% of the\n steps.\n\n The optimizer used in the process is AdamW\n\n Parameters:\n ----------\n model: `Module``\n ``Module`` object containing one the WideDeep model components (wide,\n deeptabular, deeptext or deepimage)\n model_name: str\n string indicating the model name to access the corresponding parameters.\n One of 'wide', 'deeptabular', 'deeptext' or 'deepimage'\n loader: ``DataLoader``\n Pytorch DataLoader containing the data used to fine-tune\n n_epochs: int\n number of epochs used to fine-tune the model\n max_lr: float\n maximum learning rate value during the triangular cycle.\n " if self.verbose: print('Training {} for {} epochs'.format(model_name, n_epochs)) model.train() optimizer = torch.optim.AdamW(model.parameters(), lr=(max_lr / 10.0)) (step_size_up, step_size_down) = self._steps_up_down(len(loader), n_epochs) scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=(max_lr / 10.0), max_lr=max_lr, step_size_up=step_size_up, step_size_down=step_size_down, cycle_momentum=False) self._finetune(model, model_name, loader, optimizer, scheduler, n_epochs=n_epochs) def finetune_gradual(self, model: WDModel, model_name: str, loader: DataLoader, last_layer_max_lr: float, layers: List[nn.Module], routine: str): "Fine-tune/warm-up certain layers within the model following a\n gradual fine-tune routine. The approaches implemented in this method are\n based on fine-tuning routines described in the the work of Felbo et\n al., 2017 in their DeepEmoji paper (https://arxiv.org/abs/1708.00524)\n and Howard & Sebastian Ruder 2018 ULMFit paper\n (https://arxiv.org/abs/1801.06146).\n\n A one cycle triangular learning rate is used. In both Felbo's and\n Howard's routines a gradually decreasing learning rate is used as we\n go deeper into the network. The 'closest' layer to the output\n neuron(s) will use a maximum learning rate of 'last_layer_max_lr'. The\n learning rate will then decrease by a factor of 2.5 per layer\n\n 1) The 'Felbo' routine: train the first layer in 'layers' for one\n epoch. Then train the next layer in 'layers' for one epoch freezing\n the already trained up layer(s). Repeat untill all individual layers\n are trained. Then, train one last epoch with all trained/fine-tuned\n layers trainable\n\n 2) The 'Howard' routine: fine-tune the first layer in 'layers' for one\n epoch. Then traine the next layer in the model for one epoch while\n keeping the already trained up layer(s) trainable. Repeat.\n\n Parameters:\n ----------\n model: ``Module``\n ``Module`` object containing one the WideDeep model components (wide,\n deeptabular, deeptext or deepimage)\n model_name: str\n string indicating the model name to access the corresponding parameters.\n One of 'wide', 'deeptabular', 'deeptext' or 'deepimage'\n loader: ``DataLoader``\n Pytorch DataLoader containing the data to fine-tune with.\n last_layer_max_lr: float\n maximum learning rate value during the triangular cycle for the layer\n closest to the output neuron(s). Deeper layers in 'model' will be trained\n with a gradually descending learning rate. The descending factor is fixed\n and is 2.5\n layers: list\n List of ``Module`` objects containing the layers that will be fine-tuned.\n This must be in *'FINE-TUNE ORDER'*.\n routine: str\n one of 'howard' or 'felbo'\n " model.train() (step_size_up, step_size_down) = self._steps_up_down(len(loader)) original_setup = {} for (n, p) in model.named_parameters(): original_setup[n] = p.requires_grad layers_max_lr = ([last_layer_max_lr] + [(last_layer_max_lr / (2.5 * n)) for n in range(1, len(layers))]) for layer in layers: for p in layer.parameters(): p.requires_grad = False if (routine == 'howard'): params: List = [] max_lr: List = [] base_lr: List = [] for (i, (lr, layer)) in enumerate(zip(layers_max_lr, layers)): if self.verbose: print('Training {}, layer {} of {}'.format(model_name, (i + 1), len(layers))) for p in layer.parameters(): p.requires_grad = True if (routine == 'felbo'): (params, max_lr, base_lr) = (layer.parameters(), lr, (lr / 10.0)) elif (routine == 'howard'): params += [{'params': layer.parameters(), 'lr': (lr / 10.0)}] max_lr += [lr] base_lr += [(lr / 10.0)] optimizer = torch.optim.AdamW(params) scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size_up, step_size_down=step_size_down, cycle_momentum=False) self._finetune(model, model_name, loader, optimizer, scheduler) if (routine == 'felbo'): for p in layer.parameters(): p.requires_grad = False if (routine == 'felbo'): if self.verbose: print('Training one last epoch...') for layer in layers: for p in layer.parameters(): p.requires_grad = True (params, max_lr, base_lr) = ([], [], []) for (lr, layer) in zip(layers_max_lr, layers): params += [{'params': layer.parameters(), 'lr': (lr / 10.0)}] max_lr += [lr] base_lr += [(lr / 10.0)] optimizer = torch.optim.AdamW(params) scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size_up, step_size_down=step_size_down, cycle_momentum=False) self._finetune(model, model_name, loader, optimizer, scheduler) for (n, p) in model.named_parameters(): p.requires_grad = original_setup[n] def _finetune(self, model: WDModel, model_name: str, loader: DataLoader, optimizer: Optimizer, scheduler: LRScheduler, n_epochs: int=1): '\n Standard Pytorch training loop\n ' steps = len(loader) for epoch in range(n_epochs): running_loss = 0.0 with trange(steps, disable=(self.verbose != 1)) as t: for (batch_idx, packed_data) in zip(t, loader): t.set_description(('epoch %i' % (epoch + 1))) try: (data, target, lds_weightt) = packed_data except ValueError: (data, target) = packed_data X = (data[model_name].cuda() if use_cuda else data[model_name]) y = (target.view((- 1), 1).float() if (self.method not in ['multiclass', 'qregression']) else target) y = (y.cuda() if use_cuda else y) optimizer.zero_grad() y_pred = model(X) loss = self.loss_fn(y_pred, y) loss.backward() optimizer.step() scheduler.step() running_loss += loss.item() avg_loss = (running_loss / (batch_idx + 1)) if (self.metric is not None): if (self.method == 'regression'): score = self.metric(y_pred, y) if (self.method == 'binary'): score = self.metric(torch.sigmoid(y_pred), y) if (self.method == 'qregression'): score = self.metric(y_pred, y) if (self.method == 'multiclass'): score = self.metric(F.softmax(y_pred, dim=1), y) t.set_postfix(metrics={k: np.round(v, 4) for (k, v) in score.items()}, loss=avg_loss) else: t.set_postfix(loss=avg_loss) def _steps_up_down(self, steps: int, n_epochs: int=1) -> Tuple[(int, int)]: '\n Calculate the number of steps up and down during the one cycle fine-tune for a\n given number of epochs\n\n Parameters:\n ----------\n steps: int\n steps per epoch\n n_epochs: int, default=1\n number of fine-tune epochs\n\n Returns:\n -------\n up, down: Tuple, int\n number of steps increasing/decreasing the learning rate during the cycle\n ' up = max([round(((steps * n_epochs) * 0.1)), 1]) down = ((steps * n_epochs) - up) return (up, down)
class classproperty(): "In python 3.9 you can just use\n\n @classmethod\n @property\n\n Given that we support 3.7, 3.8 as well as 3.9, let's use this hack\n " def __init__(self, func): self.func = func def __get__(self, decorated_self, decorated_cls): return self.func(decorated_cls)
class _LossAliases(): loss_aliases = {'binary': ['binary', 'logistic', 'binary_logloss', 'binary_cross_entropy'], 'multiclass': ['multiclass', 'multi_logloss', 'cross_entropy', 'categorical_cross_entropy'], 'regression': ['regression', 'mse', 'l2', 'mean_squared_error'], 'mean_absolute_error': ['mean_absolute_error', 'mae', 'l1'], 'mean_squared_log_error': ['mean_squared_log_error', 'msle'], 'root_mean_squared_error': ['root_mean_squared_error', 'rmse'], 'root_mean_squared_log_error': ['root_mean_squared_log_error', 'rmsle'], 'zero_inflated_lognormal': ['zero_inflated_lognormal', 'ziln'], 'focalr_mse': ['focalr_mse'], 'focalr_rmse': ['focalr_rmse'], 'focalr_l1': ['focalr_l1'], 'huber': ['huber'], 'quantile': ['quantile'], 'tweedie': ['tweedie']} @classproperty def alias_to_loss(cls): return {loss: alias for (alias, losses) in cls.loss_aliases.items() for loss in losses} @classmethod def get(cls, loss): return cls.loss_aliases.get(loss)
class _ObjectiveToMethod(): objective_to_method = {'binary': 'binary', 'logistic': 'binary', 'binary_logloss': 'binary', 'binary_cross_entropy': 'binary', 'binary_focal_loss': 'binary', 'multiclass': 'multiclass', 'multi_logloss': 'multiclass', 'cross_entropy': 'multiclass', 'categorical_cross_entropy': 'multiclass', 'multiclass_focal_loss': 'multiclass', 'regression': 'regression', 'mse': 'regression', 'l2': 'regression', 'mean_squared_error': 'regression', 'mean_absolute_error': 'regression', 'mae': 'regression', 'l1': 'regression', 'mean_squared_log_error': 'regression', 'msle': 'regression', 'root_mean_squared_error': 'regression', 'rmse': 'regression', 'root_mean_squared_log_error': 'regression', 'rmsle': 'regression', 'zero_inflated_lognormal': 'regression', 'ziln': 'regression', 'tweedie': 'regression', 'focalr_mse': 'regression', 'focalr_rmse': 'regression', 'focalr_l1': 'regression', 'huber': 'regression', 'quantile': 'qregression'} @classproperty def method_to_objecive(cls): _method_to_objecive = defaultdict(list) for (obj, method) in cls.objective_to_method.items(): _method_to_objecive[method].append(obj) return _method_to_objecive @classmethod def keys(cls): return cls.objective_to_method.keys() @classmethod def get(cls, obj): return cls.objective_to_method.get(obj)
class MultipleLRScheduler(object): def __init__(self, scheds: Dict[(str, LRScheduler)]): self._schedulers = scheds def step(self): for (_, sc) in self._schedulers.items(): sc.step()
class MultipleOptimizer(object): def __init__(self, opts: Dict[(str, Optimizer)]): self._optimizers = opts def zero_grad(self): for (_, op) in self._optimizers.items(): op.zero_grad() def step(self): for (_, op) in self._optimizers.items(): op.step()
class MultipleTransforms(object): def __init__(self, transforms: List[Transforms]): instantiated_transforms = [] for transform in transforms: if isinstance(transform, type): instantiated_transforms.append(transform()) else: instantiated_transforms.append(transform) self._transforms = instantiated_transforms def __call__(self): return Compose(self._transforms)
def tabular_train_val_split(seed: int, method: str, X: np.ndarray, y: np.ndarray, X_val: Optional[np.ndarray]=None, y_val: Optional[np.ndarray]=None, val_split: Optional[float]=None): "\n Function to create the train/val split for the BayesianTrainer where only\n tabular data is used\n\n Parameters\n ----------\n seed: int\n random seed to be used during train/val split\n method: str\n 'regression', 'binary' or 'multiclass'\n X: np.ndarray\n tabular dataset (categorical and continuous features)\n y: np.ndarray\n X_val: np.ndarray, Optional, default = None\n Dict with the validation set, where the keys are the component names\n (e.g: 'wide') and the values the corresponding arrays\n y_val: np.ndarray, Optional, default = None\n\n Returns\n -------\n train_set: ``TensorDataset``\n eval_set: ``TensorDataset``\n " if (X_val is not None): assert (y_val is not None), "if X_val is not None the validation target 'y_val' must also be specified" train_set = TensorDataset(torch.from_numpy(X), torch.from_numpy(y)) eval_set = TensorDataset(torch.from_numpy(X_val), torch.from_numpy(y_val)) elif (val_split is not None): (y_tr, y_val, idx_tr, idx_val) = train_test_split(y, np.arange(len(y)), test_size=val_split, random_state=seed, stratify=(y if (method != 'regression') else None)) (X_tr, X_val) = (X[idx_tr], X[idx_val]) train_set = TensorDataset(torch.from_numpy(X_tr), torch.from_numpy(y_tr)) eval_set = TensorDataset(torch.from_numpy(X_val), torch.from_numpy(y_val)) else: train_set = TensorDataset(torch.from_numpy(X), torch.from_numpy(y)) eval_set = None return (train_set, eval_set)
def wd_train_val_split(seed: int, method: str, X_wide: Optional[np.ndarray]=None, X_tab: Optional[np.ndarray]=None, X_text: Optional[np.ndarray]=None, X_img: Optional[np.ndarray]=None, X_train: Optional[Dict[(str, np.ndarray)]]=None, X_val: Optional[Dict[(str, np.ndarray)]]=None, val_split: Optional[float]=None, target: Optional[np.ndarray]=None, transforms: Optional[List[Transforms]]=None, **lds_args): "\n Function to create the train/val split for a wide and deep model\n\n If a validation set (X_val) is passed to the fit method, or val_split is\n specified, the train/val split will happen internally. A number of options\n are allowed in terms of data inputs. For parameter information, please,\n see the ``Trainer``'s' ``.fit()`` method documentation\n\n Parameters\n ----------\n seed: int\n random seed to be used during train/val split\n method: str\n 'regression', 'binary' or 'multiclass'\n X_wide: np.ndarray, Optional, default = None\n wide dataset\n X_tab: np.ndarray, Optional, default = None\n tabular dataset (categorical and continuous features)\n X_img: np.ndarray, Optional, default = None\n image dataset\n X_text: np.ndarray, Optional, default = None\n text dataset\n X_val: Dict, Optional, default = None\n Dict with the validation set, where the keys are the component names\n (e.g: 'wide') and the values the corresponding arrays\n val_split: float, Optional, default = None\n Alternatively, the validation split can be specified via a float\n target: np.ndarray, Optional, default = None\n transforms: List, Optional, default = None\n List of Transforms to be applied to the image dataset\n\n Returns\n -------\n train_set: ``WideDeepDataset``\n eval_set: ``WideDeepDataset``\n " if (X_val is not None): assert (X_train is not None), 'if the validation set is passed as a dictionary, the training set must also be a dictionary' train_set = WideDeepDataset(**X_train, transforms=transforms, **lds_args) eval_set = WideDeepDataset(**X_val, transforms=transforms, is_training=False) elif (val_split is not None): if (not X_train): X_train = _build_train_dict(X_wide, X_tab, X_text, X_img, target) (y_tr, y_val, idx_tr, idx_val) = train_test_split(X_train['target'], np.arange(len(X_train['target'])), test_size=val_split, random_state=seed, stratify=(X_train['target'] if (method != 'regression') else None)) (X_tr, X_val) = ({'target': y_tr}, {'target': y_val}) if ('X_wide' in X_train.keys()): (X_tr['X_wide'], X_val['X_wide']) = (X_train['X_wide'][idx_tr], X_train['X_wide'][idx_val]) if ('X_tab' in X_train.keys()): (X_tr['X_tab'], X_val['X_tab']) = (X_train['X_tab'][idx_tr], X_train['X_tab'][idx_val]) if ('X_text' in X_train.keys()): (X_tr['X_text'], X_val['X_text']) = (X_train['X_text'][idx_tr], X_train['X_text'][idx_val]) if ('X_img' in X_train.keys()): (X_tr['X_img'], X_val['X_img']) = (X_train['X_img'][idx_tr], X_train['X_img'][idx_val]) train_set = WideDeepDataset(**X_tr, transforms=transforms, **lds_args) eval_set = WideDeepDataset(**X_val, transforms=transforms, is_training=False) else: if (not X_train): X_train = _build_train_dict(X_wide, X_tab, X_text, X_img, target) train_set = WideDeepDataset(**X_train, transforms=transforms, **lds_args) eval_set = None return (train_set, eval_set)
def _build_train_dict(X_wide, X_tab, X_text, X_img, target): X_train = {'target': target} if (X_wide is not None): X_train['X_wide'] = X_wide if (X_tab is not None): X_train['X_tab'] = X_tab if (X_text is not None): X_train['X_text'] = X_text if (X_img is not None): X_train['X_img'] = X_img return X_train
def print_loss_and_metric(pb: tqdm, loss: float, score: Optional[Dict]=None): "\n Function to improve readability and avoid code repetition in the\n training/validation loop within the Trainer's fit method\n\n Parameters\n ----------\n pb: tqdm\n tqdm object defined as trange(...)\n loss: float\n Loss value\n score: Dict\n Dictionary where the keys are the metric names and the values are the\n corresponding values\n " if (score is not None): pb.set_postfix(metrics={k: np.round(v.astype(float), 4).tolist() for (k, v) in score.items()}, loss=loss) else: pb.set_postfix(loss=loss)
def save_epoch_logs(epoch_logs: Dict, loss: float, score: Dict, stage: str): "\n Function to improve readability and avoid code repetition in the\n training/validation loop within the Trainer's fit method\n\n Parameters\n ----------\n epoch_logs: Dict\n Dict containing the epoch logs\n loss: float\n loss value\n score: Dict\n Dictionary where the keys are the metric names and the values are the\n corresponding values\n stage: str\n one of 'train' or 'val'\n " epoch_logs['_'.join([stage, 'loss'])] = loss if (score is not None): for (k, v) in score.items(): log_k = '_'.join([stage, k]) epoch_logs[log_k] = v return epoch_logs
def bayesian_alias_to_loss(loss_fn: str, **kwargs): 'Function that returns the corresponding loss function given an alias.\n To be used with the ``BayesianTrainer``\n\n Parameters\n ----------\n loss_fn: str\n Loss name\n\n Returns\n -------\n Object\n loss function\n\n Examples\n --------\n >>> from pytorch_widedeep.training._trainer_utils import bayesian_alias_to_loss\n >>> loss_fn = bayesian_alias_to_loss(loss_fn="binary", weight=None)\n ' if (loss_fn == 'binary'): return nn.BCEWithLogitsLoss(pos_weight=kwargs['weight'], reduction='sum') if (loss_fn == 'multiclass'): return nn.CrossEntropyLoss(weight=kwargs['weight'], reduction='sum') if (loss_fn == 'regression'): return BayesianSELoss()
def alias_to_loss(loss_fn: str, **kwargs): 'Function that returns the corresponding loss function given an alias\n\n Parameters\n ----------\n loss_fn: str\n Loss name or alias\n\n Returns\n -------\n Object\n loss function\n\n Examples\n --------\n >>> from pytorch_widedeep.training._trainer_utils import alias_to_loss\n >>> loss_fn = alias_to_loss(loss_fn="binary_logloss", weight=None)\n ' if (loss_fn not in _ObjectiveToMethod.keys()): raise ValueError('objective or loss function is not supported. Please consider passing a callable directly to the compile method (see docs) or use one of the supported objectives or loss functions: {}'.format(', '.join(_ObjectiveToMethod.keys()))) if (loss_fn in _LossAliases.get('binary')): return nn.BCEWithLogitsLoss(pos_weight=kwargs['weight']) if (loss_fn in _LossAliases.get('multiclass')): return nn.CrossEntropyLoss(weight=kwargs['weight']) if (loss_fn in _LossAliases.get('regression')): return MSELoss() if (loss_fn in _LossAliases.get('mean_absolute_error')): return L1Loss() if (loss_fn in _LossAliases.get('mean_squared_log_error')): return MSLELoss() if (loss_fn in _LossAliases.get('root_mean_squared_error')): return RMSELoss() if (loss_fn in _LossAliases.get('root_mean_squared_log_error')): return RMSLELoss() if (loss_fn in _LossAliases.get('zero_inflated_lognormal')): return ZILNLoss() if (loss_fn in _LossAliases.get('quantile')): return QuantileLoss() if (loss_fn in _LossAliases.get('tweedie')): return TweedieLoss() if (loss_fn in _LossAliases.get('huber')): return HuberLoss() if (loss_fn in _LossAliases.get('focalr_l1')): return FocalR_L1Loss() if (loss_fn in _LossAliases.get('focalr_mse')): return FocalR_MSELoss() if (loss_fn in _LossAliases.get('focalr_rmse')): return FocalR_RMSELoss() if ('focal_loss' in loss_fn): return FocalLoss(**kwargs)
class LabelEncoder(): 'Label Encode categorical values for multiple columns at once\n\n :information_source: **NOTE**:\n LabelEncoder reserves 0 for `unseen` new categories. This is convenient\n when defining the embedding layers, since we can just set padding idx to 0.\n\n Parameters\n ----------\n columns_to_encode: list, Optional, default = None\n List of strings containing the names of the columns to encode. If\n `None` all columns of type `object` in the dataframe will be label\n encoded.\n with_attention: bool, default = False\n Boolean indicating whether the preprocessed data will be passed to an\n attention-based model. Aliased as `for_transformer`.\n shared_embed: bool, default = False\n Boolean indicating if the embeddings will be "_shared_" when using\n attention-based models. The idea behind `shared_embed` is described\n in the Appendix A in the [TabTransformer paper](https://arxiv.org/abs/2012.06678):\n \'_The goal of having column embedding is to enable the model to\n distinguish the classes in one column from those in the\n other columns_\'. In other words, the idea is to let the model learn\n which column is embedded at the time. See: `pytorch_widedeep.models.transformers._layers.SharedEmbeddings`.\n\n Attributes\n ----------\n encoding_dict : Dict\n Dictionary containing the encoding mappings in the format, e.g. : <br/>\n `{\'colname1\': {\'cat1\': 1, \'cat2\': 2, ...}, \'colname2\': {\'cat1\': 1, \'cat2\': 2, ...}, ...}`\n inverse_encoding_dict : Dict\n Dictionary containing the inverse encoding mappings in the format, e.g. : <br/>\n `{\'colname1\': {1: \'cat1\', 2: \'cat2\', ...}, \'colname2\': {1: \'cat1\', 2: \'cat2\', ...}, ...}`\n\n ' @Alias('with_attention', 'for_transformer') def __init__(self, columns_to_encode: Optional[List[str]]=None, with_attention: bool=False, shared_embed: bool=False): self.columns_to_encode = columns_to_encode self.shared_embed = shared_embed self.with_attention = with_attention self.reset_embed_idx = ((not self.with_attention) or self.shared_embed) def partial_fit(self, chunk: pd.DataFrame) -> 'LabelEncoder': 'Main method. Creates encoding attributes.\n\n Returns\n -------\n LabelEncoder\n `LabelEncoder` fitted object\n ' if (self.columns_to_encode is None): self.columns_to_encode = list(chunk.select_dtypes(include=['object']).columns) else: for col in self.columns_to_encode: chunk[col] = chunk[col].astype('O') unique_column_vals: Dict[(str, List[str])] = {} for c in self.columns_to_encode: unique_column_vals[c] = chunk[c].unique().tolist() if (not hasattr(self, 'encoding_dict')): self.encoding_dict: Dict[(str, Dict[(str, int)])] = {} if (('cls_token' in unique_column_vals) and self.shared_embed): self.encoding_dict['cls_token'] = {'[CLS]': 0} del unique_column_vals['cls_token'] self.cum_idx: int = 1 for (k, v) in unique_column_vals.items(): self.encoding_dict[k] = {o: (i + self.cum_idx) for (i, o) in enumerate(unique_column_vals[k])} self.cum_idx = (1 if self.reset_embed_idx else (self.cum_idx + len(unique_column_vals[k]))) else: if (('cls_token' in unique_column_vals) and self.shared_embed): del unique_column_vals['cls_token'] unseen_classes: Dict[(str, List[str])] = {} for c in self.columns_to_encode: unseen_classes[c] = list(np.setdiff1d(unique_column_vals[c], list(self.encoding_dict[c].keys()))) for (k, v) in unique_column_vals.items(): _idx = ((max(self.encoding_dict[k].values()) + 1) if self.reset_embed_idx else self.cum_idx) if (len(unseen_classes[k]) != 0): for (i, o) in enumerate(unseen_classes[k]): if (o not in self.encoding_dict[k]): self.encoding_dict[k][o] = (i + _idx) self.cum_idx = (1 if self.reset_embed_idx else (self.cum_idx + len(unseen_classes[k]))) return self def fit(self, df: pd.DataFrame) -> 'LabelEncoder': 'Runs update under the hood\n\n Returns\n -------\n LabelEncoder\n `LabelEncoder` fitted object\n ' self.partial_fit(df.copy()) self.inverse_encoding_dict = self.create_inverse_encoding_dict() return self def transform(self, df: pd.DataFrame) -> pd.DataFrame: 'Label Encoded the categories in `columns_to_encode`\n\n Returns\n -------\n pd.DataFrame\n label-encoded dataframe\n ' try: self.encoding_dict except AttributeError: raise NotFittedError("This LabelEncoder instance is not fitted yet. Call 'fit' with appropriate arguments before using this LabelEncoder.") df_inp = df.copy() for col in self.columns_to_encode: df_inp[col] = df_inp[col].astype('O') for (k, v) in self.encoding_dict.items(): df_inp[k] = df_inp[k].apply((lambda x: (v[x] if (x in v.keys()) else 0))) return df_inp def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame: "Combines `fit` and `transform`\n\n Examples\n --------\n\n >>> import pandas as pd\n >>> from pytorch_widedeep.utils import LabelEncoder\n >>> df = pd.DataFrame({'col1': [1,2,3], 'col2': ['me', 'you', 'him']})\n >>> columns_to_encode = ['col2']\n >>> encoder = LabelEncoder(columns_to_encode)\n >>> encoder.fit_transform(df)\n col1 col2\n 0 1 1\n 1 2 2\n 2 3 3\n >>> encoder.encoding_dict\n {'col2': {'me': 1, 'you': 2, 'him': 3}}\n\n Returns\n -------\n pd.DataFrame\n label-encoded dataframe\n " return self.fit(df).transform(df) def create_inverse_encoding_dict(self) -> Dict[(str, Dict[(int, str)])]: inverse_encoding_dict = dict() for c in self.encoding_dict: inverse_encoding_dict[c] = {v: k for (k, v) in self.encoding_dict[c].items()} inverse_encoding_dict[c][0] = 'unseen' return inverse_encoding_dict def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame: "Returns the original categories\n\n Examples\n --------\n\n >>> import pandas as pd\n >>> from pytorch_widedeep.utils import LabelEncoder\n >>> df = pd.DataFrame({'col1': [1,2,3], 'col2': ['me', 'you', 'him']})\n >>> columns_to_encode = ['col2']\n >>> encoder = LabelEncoder(columns_to_encode)\n >>> df_enc = encoder.fit_transform(df)\n >>> encoder.inverse_transform(df_enc)\n col1 col2\n 0 1 me\n 1 2 you\n 2 3 him\n\n Returns\n -------\n pd.DataFrame\n DataFrame with original categories\n " if (not hasattr(self, 'inverse_encoding_dict')): self.inverse_encoding_dict = self.create_inverse_encoding_dict() for (k, v) in self.inverse_encoding_dict.items(): df[k] = df[k].apply((lambda x: v[x])) return df def __repr__(self) -> str: list_of_params: List[str] = [] if (self.columns_to_encode is not None): list_of_params.append('columns_to_encode={columns_to_encode}') if self.with_attention: list_of_params.append('with_attention={with_attention}') if self.shared_embed: list_of_params.append('shared_embed={shared_embed}') all_params = ', '.join(list_of_params) return f'LabelEncoder({all_params.format(**self.__dict__)})'
def find_bin(bin_edges: Union[(np.ndarray, Tensor)], values: Union[(np.ndarray, Tensor)], ret_value: bool=True) -> Union[(np.ndarray, Tensor)]: "Returns indices that are the results of applying the 'searchsorted' algo\n to 'bin_edges' and 'values' or the left edge of the bins (i.e. bin_edges[indices])\n\n Parameters\n ----------\n bin_edges: Union[np.ndarray, Tensor]\n monotonically increasing array of bin edges\n values: Union[np.ndarray, Tensor]\n values for which we want corresponding bins\n ret_value: bool\n if True, return bin values else indices\n\n Returns\n -------\n left_bin_edges: Union[np.ndarray, Tensor]\n left bin edges\n " if ((type(bin_edges) == np.ndarray) and (type(values) == np.ndarray)): indices: Union[(np.ndarray, Tensor)] = np.searchsorted(bin_edges, values, side='left') indices = np.where(((indices == 0) | (indices == len(bin_edges))), indices, (indices - 1)) indices = np.where((indices != len(bin_edges)), indices, (indices - 2)) elif ((type(bin_edges) == Tensor) and (type(values) == Tensor)): bin_edges = bin_edges.to(values.device) indices = torch.searchsorted(bin_edges, values, right=False) indices = torch.where(((indices == 0) | (indices == len(bin_edges))), indices, (indices - 1)) indices = torch.where((indices != len(bin_edges)), indices, (indices - 2)) else: raise TypeError('Both input arrays must be of teh same type, either np.ndarray of Tensor') return (indices if (not ret_value) else bin_edges[indices])
def _laplace(x, sigma: Union[(int, float)]=2): return (np.exp(((- abs(x)) / sigma)) / (2.0 * sigma))
def get_kernel_window(kernel: Literal[('gaussian', 'triang', 'laplace')]='gaussian', ks: int=5, sigma: Union[(int, float)]=2) -> List[float]: "Procedure to prepare the window of values from symetrical kernel function\n for smoothing of the distribution in Label and Feature Distribution\n Smoothing (LDS & FDS).\n\n Parameters\n ----------\n kernel: Literal['gaussian', 'triang', 'laplace'] = 'gaussian'\n choice of kernel for label distribution smoothing\n ks: int = 5\n kernel size, i.e. count of samples in symmetric window\n sigma: Union[int,float] = 2\n standard deviation of ['gaussian','laplace'] kernel\n\n Returns\n -------\n kernel_window: list\n list with values from the chosen kernel function\n " half_ks = ((ks - 1) // 2) if (kernel == 'gaussian'): base_kernel = ((([0.0] * half_ks) + [1.0]) + ([0.0] * half_ks)) kernel_window = gaussian_filter1d(base_kernel, sigma=sigma) elif (kernel == 'triang'): kernel_window = (triang(ks) / sum(triang(ks))) elif (kernel == 'laplace'): kernel_window = list(map(_laplace, np.arange((- half_ks), (half_ks + 1)))) else: raise ValueError("Kernel can be only ['gaussian', 'triang', 'laplace'].") return kernel_window
def partition(a: Collection, sz: int) -> List[Collection]: 'Split iterables `a` in equal parts of size `sz`' return [a[i:(i + sz)] for i in range(0, len(a), sz)]
def partition_by_cores(a: Collection, n_cpus: int) -> List[Collection]: 'Split data in `a` equally among `n_cpus` cores' return partition(a, ((len(a) // n_cpus) + 1))
def ifnone(a: Any, b: Any) -> Any: '`a` if `a` is not None, otherwise `b`.' return (b if (a is None) else a)
def num_cpus() -> Optional[int]: 'Get number of cpus' try: return len(os.sched_getaffinity(0)) except AttributeError: return os.cpu_count()
class BaseTokenizer(): 'Basic class for a tokenizer function.' def __init__(self, lang: str): self.lang = lang def tokenizer(self, t: str) -> List[str]: return t.split(' ') def add_special_cases(self, toks: Collection[str]): pass
class SpacyTokenizer(BaseTokenizer): def __init__(self, lang: str): 'Wrapper around a spacy tokenizer to make it a :obj:`BaseTokenizer`.\n\n Parameters\n ----------\n lang: str\n Language of the text to be tokenized\n ' self.tok = spacy.blank(lang) def tokenizer(self, t: str): "Runs ``Spacy``'s ``tokenizer``\n\n Parameters\n ----------\n t: str\n text to be tokenized\n " return [t.text for t in self.tok.tokenizer(t)] def add_special_cases(self, toks: Collection[str]): "Runs ``Spacy``'s ``add_special_case`` method\n\n Parameters\n ----------\n toks: Collection\n `List`, `Tuple`, `Set` or `Dictionary` where the values are\n strings that are the special cases to add to the tokenizer\n " for w in toks: self.tok.tokenizer.add_special_case(w, [{ORTH: w}])
def spec_add_spaces(t: str) -> str: 'Add spaces around / and # in `t`. \n' return re.sub('([/#\\n])', ' \\1 ', t)
def rm_useless_spaces(t: str) -> str: 'Remove multiple spaces in `t`.' return re.sub(' {2,}', ' ', t)
def replace_rep(t: str) -> str: 'Replace repetitions at the character level in `t`.' def _replace_rep(m: Match[str]) -> str: (c, cc) = m.groups() return f' {TK_REP} {(len(cc) + 1)} {c} ' re_rep = re.compile('(\\S)(\\1{3,})') return re_rep.sub(_replace_rep, t)
def replace_wrep(t: str) -> str: 'Replace word repetitions in `t`.' def _replace_wrep(m: Match[str]) -> str: (c, cc) = m.groups() return f' {TK_WREP} {(len(cc.split()) + 1)} {c} ' re_wrep = re.compile('(\\b\\w+\\W+)(\\1{3,})') return re_wrep.sub(_replace_wrep, t)
def fix_html(x: str) -> str: 'List of replacements from html strings in `x`.' re1 = re.compile(' +') x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace('#36;', '$').replace('\\n', '\n').replace('quot;', "'").replace('<br />', '\n').replace('\\"', '"').replace('<unk>', UNK).replace(' @.@ ', '.').replace(' @-@ ', '-').replace(' @,@ ', ',').replace('\\', ' \\ ') return re1.sub(' ', html.unescape(x))
def replace_all_caps(x: Collection[str]) -> Collection[str]: 'Replace tokens in ALL CAPS in `x` by their lower version and add `TK_UP` before.' res = [] for t in x: if (t.isupper() and (len(t) > 1)): res.append(TK_UP) res.append(t.lower()) else: res.append(t) return res
def deal_caps(x: Collection[str]) -> Collection[str]: 'Replace all Capitalized tokens in `x` by their lower version and add `TK_MAJ` before.' res = [] for t in x: if (t == ''): continue if (t[0].isupper() and (len(t) > 1) and t[1:].islower()): res.append(TK_MAJ) res.append(t.lower()) return res
class Tokenizer(): 'Class to combine a series of rules and a tokenizer function to tokenize\n text with multiprocessing.\n\n Setting some of the parameters of this class require perhaps some\n familiarity with the source code.\n\n Parameters\n ----------\n tok_func: Callable, default = ``SpacyTokenizer``\n Tokenizer Object. See `pytorch_widedeep.utils.fastai_transforms.SpacyTokenizer`\n lang: str, default = "en"\n Text\'s Language\n pre_rules: ListRules, Optional, default = None\n Custom type: ``Collection[Callable[[str], str]]``. These are\n `Callable` objects that will be applied to the text (str) directly as\n `rule(tok)` before being tokenized.\n post_rules: ListRules, Optional, default = None\n Custom type: ``Collection[Callable[[str], str]]``. These are\n `Callable` objects that will be applied to the tokens as\n `rule(tokens)` after the text has been tokenized.\n special_cases: Collection, Optional, default= None\n special cases to be added to the tokenizer via ``Spacy``\'s\n ``add_special_case`` method\n n_cpus: int, Optional, default = None\n number of CPUs to used during the tokenization process\n ' def __init__(self, tok_func: Callable=SpacyTokenizer, lang: str='en', pre_rules: Optional[ListRules]=None, post_rules: Optional[ListRules]=None, special_cases: Optional[Collection[str]]=None, n_cpus: Optional[int]=None): (self.tok_func, self.lang, self.special_cases) = (tok_func, lang, special_cases) self.pre_rules = ifnone(pre_rules, defaults.text_pre_rules) self.post_rules = ifnone(post_rules, defaults.text_post_rules) self.special_cases = (special_cases if (special_cases is not None) else defaults.text_spec_tok) self.n_cpus = ifnone(n_cpus, defaults.cpus) def __repr__(self) -> str: res = f'''Tokenizer {self.tok_func.__name__} in {self.lang} with the following rules: ''' for rule in self.pre_rules: res += f''' - {rule.__name__} ''' for rule in self.post_rules: res += f''' - {rule.__name__} ''' return res def process_text(self, t: str, tok: BaseTokenizer) -> List[str]: 'Process and tokenize one text ``t`` with tokenizer ``tok``.\n\n Parameters\n ----------\n t: str\n text to be processed and tokenized\n tok: ``BaseTokenizer``\n Instance of `BaseTokenizer`. See\n `pytorch_widedeep.utils.fastai_transforms.BaseTokenizer`\n\n Returns\n -------\n List[str]\n List of tokens\n ' for rule in self.pre_rules: t = rule(t) toks = tok.tokenizer(t) for rule in self.post_rules: toks = rule(toks) return toks def _process_all_1(self, texts: Collection[str]) -> List[List[str]]: 'Process a list of ``texts`` in one process.' tok = self.tok_func(self.lang) if self.special_cases: tok.add_special_cases(self.special_cases) return [self.process_text(str(t), tok) for t in texts] def process_all(self, texts: Collection[str]) -> List[List[str]]: 'Process a list of texts. Parallel execution of ``process_text``.\n\n Examples\n --------\n >>> from pytorch_widedeep.utils import Tokenizer\n >>> texts = [\'Machine learning is great\', \'but building stuff is even better\']\n >>> tok = Tokenizer()\n >>> tok.process_all(texts)\n [[\'xxmaj\', \'machine\', \'learning\', \'is\', \'great\'], [\'but\', \'building\', \'stuff\', \'is\', \'even\', \'better\']]\n\n :information_source: **NOTE**:\n Note the token ``TK_MAJ`` (`xxmaj`), used to indicate the\n next word begins with a capital in the original text. For more\n details of special tokens please see the [``fastai`` docs](https://docs.fast.ai/text.core.html#Tokenizing).\n\n Returns\n -------\n List[List[str]]\n List containing lists of tokens. One list per "_document_"\n\n ' if (self.n_cpus <= 1): return self._process_all_1(texts) with ProcessPoolExecutor(self.n_cpus) as e: return sum(e.map(self._process_all_1, partition_by_cores(texts, self.n_cpus)), [])
class Vocab(): "Contains the correspondence between numbers and tokens.\n\n Parameters\n ----------\n max_vocab: int\n maximum vocabulary size\n min_freq: int\n minimum frequency for a token to be considereds\n pad_idx: int, Optional, default = None\n padding index. If `None`, Fastai's Tokenizer leaves the 0 index\n for the unknown token (_'xxunk'_) and defaults to 1 for the padding\n token (_'xxpad'_).\n\n Attributes\n ----------\n itos: Collection\n `index to str`. Collection of strings that are the tokens of the\n vocabulary\n stoi: defaultdict\n `str to index`. Dictionary containing the tokens of the vocabulary and\n their corresponding index\n " def __init__(self, max_vocab: int, min_freq: int, pad_idx: Optional[int]=None): self.max_vocab = max_vocab self.min_freq = min_freq self.pad_idx = pad_idx def create(self, tokens: Tokens) -> 'Vocab': "Create a vocabulary object from a set of tokens.\n\n Parameters\n ----------\n tokens: Tokens\n Custom type: ``Collection[Collection[str]]`` see\n `pytorch_widedeep.wdtypes`. Collection of collection of\n strings (e.g. list of tokenized sentences)\n\n Examples\n --------\n >>> from pytorch_widedeep.utils import Tokenizer, Vocab\n >>> texts = ['Machine learning is great', 'but building stuff is even better']\n >>> tokens = Tokenizer().process_all(texts)\n >>> vocab = Vocab(max_vocab=18, min_freq=1).create(tokens)\n >>> vocab.numericalize(['machine', 'learning', 'is', 'great'])\n [10, 11, 9, 12]\n >>> vocab.textify([10, 11, 9, 12])\n 'machine learning is great'\n\n :information_source: **NOTE**:\n Note the many special tokens that ``fastai``'s' tokenizer adds. These\n are particularly useful when building Language models and/or in\n classification/Regression tasks. Please see the [``fastai`` docs](https://docs.fast.ai/text.core.html#Tokenizing).\n\n Returns\n -------\n Vocab\n An instance of a `Vocab` object\n " freq = Counter((p for o in tokens for p in o)) itos = [o for (o, c) in freq.most_common(self.max_vocab) if (c >= self.min_freq)] for o in reversed(defaults.text_spec_tok): if (o in itos): itos.remove(o) itos.insert(0, o) if ((self.pad_idx is not None) and (self.pad_idx != 1)): itos.remove(PAD) itos.insert(self.pad_idx, PAD) xxunk_idx = np.where([(el == 'xxunk') for el in itos])[0][0] else: xxunk_idx = 0 itos = itos[:self.max_vocab] if (len(itos) < self.max_vocab): while ((len(itos) % 8) != 0): itos.append('xxfake') self.itos = itos self.stoi = defaultdict((lambda : xxunk_idx), {v: k for (k, v) in enumerate(self.itos)}) return self def fit(self, tokens: Tokens) -> 'Vocab': return self.create(tokens) def numericalize(self, t: Collection[str]) -> List[int]: "Convert a list of tokens ``t`` to their ids.\n\n Returns\n -------\n List[int]\n List of '_numericalsed_' tokens\n " return [self.stoi[w] for w in t] def transform(self, t: Collection[str]) -> List[int]: return self.numericalize(t) def textify(self, nums: Collection[int], sep=' ') -> List[str]: 'Convert a list of ``nums`` (or indexes) to their tokens.\n\n Returns\n -------\n List[str]\n List of tokens\n ' return (sep.join([self.itos[i] for i in nums]) if (sep is not None) else [self.itos[i] for i in nums]) def inverse_transform(self, nums: Collection[int], sep=' ') -> List[str]: return self.textify(nums, sep) def __getstate__(self): return {'itos': self.itos} def __setstate__(self, state: dict): self.itos = state['itos'] self.stoi = defaultdict(int, {v: k for (k, v) in enumerate(self.itos)})
class ChunkVocab(): def __init__(self, max_vocab: int, min_freq: int, n_chunks: int, pad_idx: Optional[int]=None): self.max_vocab = max_vocab self.min_freq = min_freq self.n_chunks = n_chunks self.pad_idx = pad_idx self.chunk_counter = 0 self.is_fitted = False def fit(self, tokens: Tokens) -> 'ChunkVocab': if (self.chunk_counter == 0): self.freq = Counter((tok for sent in tokens for tok in sent)) else: self.freq.update((tok for sent in tokens for tok in sent)) self.chunk_counter += 1 if (self.chunk_counter == self.n_chunks): itos = [o for (o, c) in self.freq.most_common(self.max_vocab) if (c >= self.min_freq)] for o in reversed(defaults.text_spec_tok): if (o in itos): itos.remove(o) itos.insert(0, o) if (self.pad_idx is not None): itos.remove(PAD) itos.insert(self.pad_idx, PAD) xxunk_idx = np.where([(el == 'xxunk') for el in itos])[0][0] itos = itos[:self.max_vocab] if (len(itos) < self.max_vocab): while ((len(itos) % 8) != 0): itos.append('xxfake') self.itos = itos self.stoi = defaultdict((lambda : xxunk_idx), {v: k for (k, v) in enumerate(self.itos)}) self.is_fitted = True return self def transform(self, t: Collection[str]) -> List[int]: "Convert a list of tokens ``t`` to their ids.\n\n Returns\n -------\n List[int]\n List of '_numericalsed_' tokens\n " return [self.stoi[w] for w in t] def inverse_transform(self, nums: Collection[int], sep=' ') -> List[str]: 'Convert a list of ``nums`` (or indexes) to their tokens.\n\n Returns\n -------\n List[str]\n List of tokens\n ' return (sep.join([self.itos[i] for i in nums]) if (sep is not None) else [self.itos[i] for i in nums]) def __getstate__(self): return {'itos': self.itos} def __setstate__(self, state: dict): self.itos = state['itos'] self.stoi = defaultdict(int, {v: k for (k, v) in enumerate(self.itos)})
class Alias(): def __init__(self, primary_name: str, aliases: Union[(str, List[str])]): 'Convert uses of `aliases` to `primary_name` upon calling the decorated\n function/method\n\n Parameters\n ----------\n primary_name: String\n Preferred name for the parameter, the value of which will be set\n to the value of the used alias. If `primary_name` is already\n explicitly used on call in addition to any aliases, the value of\n `primary_name` will remain unchanged. It only assumes the value of\n an alias if the `primary_name` is not used\n aliases: List, string\n One or multiple string aliases for `primary_name`. If\n `primary_name` is not used on call, its value will be set to that\n of a random alias in `aliases`. Before calling the decorated\n callable, all `aliases` are removed from its kwargs\n\n Examples\n --------\n >>> class Foo():\n ... @Alias("a", ["a2"])\n ... def __init__(self, a, b=None):\n ... print(a, b)\n >>> @Alias("a", ["a2"])\n ... @Alias("b", ["b2"])\n ... def bar(a, b=None):\n ... print(a, b)\n >>> foo = Foo(a2="x", b="y")\n x y\n >>> bar(a2="x", b2="y")\n x y' self.primary_name = primary_name self.aliases = (aliases if isinstance(aliases, list) else [aliases]) @wrapt.decorator def __call__(self, wrapped, instance, args, kwargs): for alias in set(self.aliases).intersection(kwargs): kwargs.setdefault(self.primary_name, kwargs.pop(alias)) if instance: set_default_attr(instance, '__wd_aliases_used', {})[self.primary_name] = alias else: set_default_attr(wrapped, '__wd_aliases_used', {})[self.primary_name] = alias return wrapped(*args, **kwargs)
def set_default_attr(obj: Any, name: str, value: Any): 'Set the `name` attribute of `obj` to `value` if the attribute does not\n already exist\n\n Parameters\n ----------\n obj: Object\n Object whose `name` attribute will be returned (after setting it to\n `value`, if necessary)\n name: String\n Name of the attribute to set to `value`, or to return\n value: Object\n Default value to give to `obj.name` if the attribute does not already\n exist\n\n Returns\n -------\n Object\n `obj.name` if it exists. Else, `value`\n\n Examples\n --------\n >>> foo = type("Foo", tuple(), {"my_attr": 32})\n >>> set_default_attr(foo, "my_attr", 99)\n 32\n >>> set_default_attr(foo, "other_attr", 9000)\n 9000\n >>> assert foo.my_attr == 32\n >>> assert foo.other_attr == 9000\n ' try: return getattr(obj, name) except AttributeError: setattr(obj, name, value) return value
def simple_preprocess(doc: str, lower: bool=False, deacc: bool=False, min_len: int=2, max_len: int=15) -> List[str]: "\n This is `Gensim`'s `simple_preprocess` with a `lower` param to\n indicate wether or not to lower case all the token in the doc\n\n For more information see: `Gensim` [utils module](https://radimrehurek.com/gensim/utils.html)\n\n Parameters\n ----------\n doc: str\n Input document.\n lower: bool, default = False\n Lower case tokens in the input doc\n deacc: bool, default = False\n Remove accent marks from tokens using `Gensim`'s `deaccent`\n min_len: int, default = 2\n Minimum length of token (inclusive). Shorter tokens are discarded.\n max_len: int, default = 15\n Maximum length of token in result (inclusive). Longer tokens are discarded.\n\n Examples\n --------\n >>> from pytorch_widedeep.utils import simple_preprocess\n >>> simple_preprocess('Machine learning is great')\n ['Machine', 'learning', 'is', 'great']\n\n Returns\n -------\n List[str]\n List with the processed tokens\n " tokens = [token for token in tokenize(doc, lower=lower, deacc=deacc, errors='ignore') if ((min_len <= len(token) <= max_len) and (not token.startswith('_')))] return tokens
def get_texts(texts: List[str], already_processed: Optional[bool]=False, n_cpus: Optional[int]=None) -> List[List[str]]: "Tokenization using `Fastai`'s `Tokenizer` because it does a\n series of very convenients things during the tokenization process\n\n See `pytorch_widedeep.utils.fastai_utils.Tokenizer`\n\n Parameters\n ----------\n texts: List\n List of str with the texts (or documents). One str per document\n already_processed: bool, Optional, default = False\n Boolean indicating if the text is already processed and we simply want\n to tokenize it. This parameter is thought for those cases where the\n input sequences might not be text (but IDs, or anything else) and we\n just want to tokenize it\n n_cpus: int, Optional, default = None\n number of CPUs to used during the tokenization process\n\n Examples\n --------\n >>> from pytorch_widedeep.utils import get_texts\n >>> texts = ['Machine learning is great', 'but building stuff is even better']\n >>> get_texts(texts)\n [['xxmaj', 'machine', 'learning', 'is', 'great'], ['but', 'building', 'stuff', 'is', 'even', 'better']]\n\n Returns\n -------\n List[List[str]]\n List of lists, one list per '_document_' containing its corresponding tokens\n\n :information_source: **NOTE**:\n `get_texts` uses `pytorch_widedeep.utils.fastai_transforms.Tokenizer`.\n Such tokenizer uses a series of convenient processing steps, including\n the addition of some special tokens, such as `TK_MAJ` (`xxmaj`), used to\n indicate the next word begins with a capital in the original text. For more\n details of special tokens please see the [`fastai` `docs](https://docs.fast.ai/text.core.html#Tokenizing)\n " num_cpus = (n_cpus if (n_cpus is not None) else os.cpu_count()) if (not already_processed): processed_texts = [' '.join(simple_preprocess(t)) for t in texts] else: processed_texts = texts tok = Tokenizer(n_cpus=num_cpus).process_all(processed_texts) return tok
def pad_sequences(seq: List[int], maxlen: int, pad_first: bool=True, pad_idx: int=1) -> np.ndarray: "\n Given a List of tokenized and `numericalised` sequences it will return\n padded sequences according to the input parameters.\n\n Parameters\n ----------\n seq: List\n List of int with the `numericalised` tokens\n maxlen: int\n Maximum length of the padded sequences\n pad_first: bool, default = True\n Indicates whether the padding index will be added at the beginning or the\n end of the sequences\n pad_idx: int, default = 1\n padding index. Fastai's Tokenizer leaves 0 for the 'unknown' token.\n\n Examples\n --------\n >>> from pytorch_widedeep.utils import pad_sequences\n >>> seq = [1,2,3]\n >>> pad_sequences(seq, maxlen=5, pad_idx=0)\n array([0, 0, 1, 2, 3], dtype=int32)\n\n Returns\n -------\n np.ndarray\n numpy array with the padded sequences\n " if (len(seq) == 0): return (np.zeros(maxlen, dtype='int32') + pad_idx) elif (len(seq) >= maxlen): res = np.array(seq[(- maxlen):]).astype('int32') return res else: res = (np.zeros(maxlen, dtype='int32') + pad_idx) if pad_first: res[(- len(seq)):] = seq else: res[:len(seq)] = seq return res
def build_embeddings_matrix(vocab: Union[(Vocab, ChunkVocab)], word_vectors_path: str, min_freq: int, verbose: int=1) -> np.ndarray: 'Build the embedding matrix using pretrained word vectors.\n\n Returns pretrained word embeddings. If a word in our vocabulary is not\n among the pretrained embeddings it will be assigned the mean pretrained\n word-embeddings vector\n\n Parameters\n ----------\n vocab: Vocab\n see `pytorch_widedeep.utils.fastai_utils.Vocab`\n word_vectors_path: str\n path to the pretrained word embeddings\n min_freq: int\n minimum frequency required for a word to be in the vocabulary\n verbose: int, default=1\n level of verbosity. Set to 0 for no verbosity\n\n Returns\n -------\n np.ndarray\n Pretrained word embeddings\n ' if (not os.path.isfile(word_vectors_path)): raise FileNotFoundError('{} not found'.format(word_vectors_path)) if verbose: print('Indexing word vectors...') embeddings_index = {} f = open(word_vectors_path) for line in f: values = line.split() word = values[0] coefs = np.asarray(values[1:], dtype='float32') embeddings_index[word] = coefs f.close() if verbose: print('Loaded {} word vectors'.format(len(embeddings_index))) print('Preparing embeddings matrix...') mean_word_vector = np.mean(list(embeddings_index.values()), axis=0) embedding_dim = len(list(embeddings_index.values())[0]) num_words = len(vocab.itos) embedding_matrix = np.zeros((num_words, embedding_dim)) found_words = 0 for (i, word) in enumerate(vocab.itos): embedding_vector = embeddings_index.get(word) if (embedding_vector is not None): embedding_matrix[i] = embedding_vector found_words += 1 else: embedding_matrix[i] = mean_word_vector if verbose: print('{} words in the vocabulary had {} vectors and appear more than {} times'.format(found_words, word_vectors_path, min_freq)) return embedding_matrix.astype('float32')
def requirements(fname): return [line.strip() for line in open(os.path.join(os.path.dirname(__file__), fname))]
def test_mse_based_losses(): y_true = np.array([3, 5, 2.5, 7]).reshape((- 1), 1) y_pred = np.array([2.5, 5, 4, 8]).reshape((- 1), 1) t_true = torch.from_numpy(y_true) t_pred = torch.from_numpy(y_pred) are_close = np.isclose(mean_squared_error(y_true, y_pred), (BayesianSELoss()(t_pred, t_true).item() * 0.5)) assert are_close
def test_wide(): out = model(inp) assert ((out.size(0) == 10) and (out.size(1) == 1))
@pytest.mark.parametrize('model', [bwide, btabmlp]) @pytest.mark.parametrize('scheduler_name', ['step', 'cyclic']) def test_history_callback(model, scheduler_name): init_lr = 0.001 optimizer = torch.optim.Adam(model.parameters(), lr=init_lr) if (scheduler_name == 'cyclic'): scheduler = CyclicLR(optimizer, base_lr=init_lr, max_lr=0.01, step_size_up=5, cycle_momentum=False) len_lr_output = 11 elif (scheduler_name == 'step'): scheduler = StepLR(optimizer, step_size=4) len_lr_output = 5 btrainer = BayesianTrainer(model=model, objective='binary', optimizer=optimizer, lr_scheduler=scheduler, callbacks=[LRHistory(n_epochs=5)], verbose=0) btrainer.fit(X_tab=X_tab, target=target, n_epochs=5, batch_size=16) out = [] out.append((len(btrainer.history['train_loss']) == 5)) lr_list = btrainer.lr_history['lr_0'] out.append((len(lr_list) == len_lr_output)) if (scheduler_name == 'step'): out.append((lr_list[(- 1)] == (init_lr / 10))) elif (scheduler_name == 'cyclic'): out.append((lr_list[(- 1)] == init_lr)) assert all(out)
@pytest.mark.parametrize('model', [bwide, btabmlp]) def test_early_stop(model): btrainer = BayesianTrainer(model=model, objective='binary', callbacks=[EarlyStopping(min_delta=10000.0, patience=3, restore_best_weights=True, verbose=1)], verbose=0) btrainer.fit(X_tab=X_tab, target=target, val_split=0.2, n_epochs=5, batch_size=16) assert (len(btrainer.history['train_loss']) == (3 + 1))
@pytest.mark.parametrize('fpath, save_best_only, max_save, n_files', [('tests/test_bayesian_models/test_model_functioning/weights/test_weights', True, 2, 2), ('tests/test_bayesian_models/test_model_functioning/weights/test_weights', False, 2, 2), ('tests/test_bayesian_models/test_model_functioning/weights/test_weights', False, 0, 5), (None, False, 0, 0)]) def test_model_checkpoint(fpath, save_best_only, max_save, n_files): trainer = BayesianTrainer(model=btabmlp, objective='binary', callbacks=[ModelCheckpoint(filepath=fpath, save_best_only=save_best_only, max_save=max_save)], verbose=0) trainer.fit(X_tab=X_tab, target=target, n_epochs=5, val_split=0.2) if fpath: n_saved = len(os.listdir('tests/test_bayesian_models/test_model_functioning/weights/')) shutil.rmtree('tests/test_bayesian_models/test_model_functioning/weights/') else: n_saved = 0 assert (n_saved <= n_files)
def test_filepath_error(): btabmlp = BayesianTabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[16, 4]) with pytest.raises(ValueError): trainer = BayesianTrainer(model=btabmlp, objective='binary', callbacks=[ModelCheckpoint(filepath='wrong_file_path')], verbose=0)
@pytest.mark.parametrize('model, X_tab, target, X_tab_val, target_val , val_split', [(wide, X_wide, target, None, None, 0.2), (wide, X_wide_tr, y_train, X_wide_val, y_val, None), (tabmlp, X_tabmlp, target, None, None, 0.2), (tabmlp, X_tabmlp_tr, y_train, X_tabmlp_val, y_val, None)]) def test_data_input_options(model, X_tab, target, X_tab_val, target_val, val_split): trainer = BayesianTrainer(model, objective='binary', verbose=0) trainer.fit(X_tab=X_tab, target=target, X_tab_val=X_tab_val, target_val=target_val, val_split=val_split, batch_size=16) assert (trainer.history['train_loss'] is not None)
@pytest.mark.parametrize('model_name', ['wide', 'tabmlp']) @pytest.mark.parametrize('objective', ['binary', 'multiclass']) @pytest.mark.parametrize('return_samples', [True, False]) @pytest.mark.parametrize('embed_continuous', [True, False]) def test_classification(model_name, objective, return_samples, embed_continuous): bsz = 32 n_samples = 5 pred_dim = (1 if (objective == 'binary') else 3) target = (target_binary if (objective == 'binary') else target_multic) if (model_name == 'wide'): X_tab = X_wide model = BayesianWide(np.unique(X_wide).shape[0], pred_dim) elif (model_name == 'tabmlp'): X_tab = X_tabmlp model = BayesianTabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], embed_continuous=embed_continuous, mlp_hidden_dims=[32, 16], pred_dim=pred_dim) trainer = BayesianTrainer(model, objective=objective, verbose=0) trainer.fit(X_tab=X_tab, target=target, batch_size=16) preds = trainer.predict(X_tab=X_tab, return_samples=return_samples, batch_size=16) probs = trainer.predict_proba(X_tab=X_tab, return_samples=return_samples, batch_size=16) out = [] if return_samples: out.append((preds.shape[0] == n_samples)) out.append((preds.shape[1] == bsz)) out.append((probs.shape[0] == n_samples)) out.append((probs.shape[1] == bsz)) out.append(((probs.shape[2] == 2) if (objective == 'binary') else 3)) else: out.append((preds.shape[0] == bsz)) out.append((probs.shape[0] == bsz)) out.append(((probs.shape[1] == 2) if (objective == 'binary') else 3)) assert all(out)
@pytest.mark.parametrize('model_name', ['wide', 'tabmlp']) @pytest.mark.parametrize('return_samples', [True, False]) def test_regression(model_name, return_samples): bsz = 32 n_samples = 5 if (model_name == 'wide'): X_tab = X_wide model = BayesianWide(np.unique(X_wide).shape[0], 1) elif (model_name == 'tabmlp'): X_tab = X_tabmlp model = BayesianTabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[32, 16], pred_dim=1) trainer = BayesianTrainer(model, objective='regression', verbose=0) trainer.fit(X_tab=X_tab, target=target_regres, batch_size=16) preds = trainer.predict(X_tab=X_tab, return_samples=return_samples, batch_size=16) out = [] if return_samples: out.append((preds.shape[0] == n_samples)) out.append((preds.shape[1] == bsz)) else: out.append((preds.shape[0] == bsz)) assert all(out)
@pytest.mark.parametrize('model', [bwide, btabmlp]) def test_save_and_load(model): btrainer = BayesianTrainer(model=model, objective='binary', verbose=0) X = (X_wide if (model.__class__.__name__ == 'BayesianWide') else X_tab) btrainer.fit(X_tab=X, target=target, n_epochs=5, batch_size=16) if (model.__class__.__name__ == 'BayesianWide'): weight_mu = model.bayesian_wide_linear.weight_mu.data weight_rho = model.bayesian_wide_linear.weight_rho.data elif (model.__class__.__name__ == 'BayesianTabMlp'): weight_mu = model.cat_and_cont_embed.cat_embed.embed_layers['emb_layer_a'].weight_mu.data weight_rho = model.cat_and_cont_embed.cat_embed.embed_layers['emb_layer_a'].weight_rho.data btrainer.save('tests/test_bayesian_models/test_bayes_model_functioning/model_dir/', model_filename='bayesian_model.pt') new_model = torch.load('tests/test_bayesian_models/test_bayes_model_functioning/model_dir/bayesian_model.pt') if (model.__class__.__name__ == 'BayesianWide'): new_weight_mu = new_model.bayesian_wide_linear.weight_mu.data new_weight_rho = new_model.bayesian_wide_linear.weight_rho.data elif (model.__class__.__name__ == 'BayesianTabMlp'): new_weight_mu = new_model.cat_and_cont_embed.cat_embed.embed_layers['emb_layer_a'].weight_mu.data new_weight_rho = new_model.cat_and_cont_embed.cat_embed.embed_layers['emb_layer_a'].weight_rho.data shutil.rmtree('tests/test_bayesian_models/test_bayes_model_functioning/model_dir/') assert (torch.allclose(weight_mu, new_weight_mu) and torch.allclose(weight_rho, new_weight_rho))
@pytest.mark.parametrize('model_name', ['wide', 'tabmlp']) def test_save_and_load_dict(model_name): (model1, btrainer1) = _build_model_and_trainer(model_name) X = (X_wide if (model_name == 'wide') else X_tab) btrainer1.fit(X_tab=X, target=target, n_epochs=5, batch_size=16) btrainer1.save('tests/test_bayesian_models/test_bayes_model_functioning/model_dir/', model_filename='bayesian_model.pt', save_state_dict=True) if (model_name == 'wide'): weight_mu = model1.bayesian_wide_linear.weight_mu.data weight_rho = model1.bayesian_wide_linear.weight_rho.data elif (model_name == 'tabmlp'): weight_mu = model1.cat_and_cont_embed.cat_embed.embed_layers['emb_layer_a'].weight_mu.data weight_rho = model1.cat_and_cont_embed.cat_embed.embed_layers['emb_layer_a'].weight_rho.data (model2, btrainer2) = _build_model_and_trainer(model_name) btrainer2.model.load_state_dict(torch.load('tests/test_bayesian_models/test_bayes_model_functioning/model_dir/bayesian_model.pt')) if (model_name == 'wide'): new_weight_mu = model1.bayesian_wide_linear.weight_mu.data new_weight_rho = model1.bayesian_wide_linear.weight_rho.data elif (model_name == 'tabmlp'): new_weight_mu = model1.cat_and_cont_embed.cat_embed.embed_layers['emb_layer_a'].weight_mu.data new_weight_rho = model1.cat_and_cont_embed.cat_embed.embed_layers['emb_layer_a'].weight_rho.data same_weights = (torch.allclose(weight_mu, new_weight_mu) and torch.allclose(weight_rho, new_weight_rho)) if os.path.isfile('tests/test_bayesian_models/test_bayes_model_functioning/model_dir/history/train_eval_history.json'): history_saved = True else: history_saved = False shutil.rmtree('tests/test_bayesian_models/test_bayes_model_functioning/model_dir/') assert (same_weights and history_saved)
def _build_model_and_trainer(model_name): if (model_name == 'wide'): model = BayesianWide(np.unique(X_wide).shape[0], 1) elif (model_name == 'tabmlp'): model = BayesianTabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[32, 16]) trainer = BayesianTrainer(model=model, objective='binary', verbose=0) return (model, trainer)
def create_df(): cat_cols = [np.array(choices(c, k=5)) for c in [cat_col1_vals, cat_col2_vals]] cont_cols = [np.round(np.random.rand(5), 2) for _ in range(2)] target = [np.random.choice(2, 5, p=[0.8, 0.2])] return pd.DataFrame(np.vstack(((cat_cols + cont_cols) + target)).transpose(), columns=colnames)
@pytest.mark.parametrize('return_dataframe', [True, False]) @pytest.mark.parametrize('embed_continuous', [True, False]) def test_bayesian_mlp_models(return_dataframe, embed_continuous): tab_preprocessor = TabPreprocessor(cat_embed_cols=embed_cols, continuous_cols=cont_cols) X_tab = tab_preprocessor.fit_transform(df_init) model = BayesianTabMlp(column_idx=tab_preprocessor.column_idx, cat_embed_input=tab_preprocessor.cat_embed_input, continuous_cols=tab_preprocessor.continuous_cols, embed_continuous=embed_continuous, mlp_hidden_dims=[8, 4]) t2v = Tab2Vec(model, tab_preprocessor, return_dataframe=return_dataframe) (t2v_out, _) = t2v.fit_transform(df_t2v, target_col='target') embed_dim = sum([el[2] for el in tab_preprocessor.cat_embed_input]) n_cont_cols = len(tab_preprocessor.continuous_cols) cont_dim = ((n_cont_cols * model.cont_embed_dim) if embed_continuous else n_cont_cols) assert (t2v_out.shape[1] == (embed_dim + cont_dim))
class DummyPreprocessor(BasePreprocessor): def __init__(self): super().__init__() def fit(self, df): self.att1 = 1 self.att2 = 2 return df def transform(self, df): check_is_fitted(self, attributes=['att1', 'att2'], all_or_any='any') return df def fit_transform(self, df): return self.fit(df).transform(df)
class IncompletePreprocessor(BasePreprocessor): def __init__(self): super().__init__() def fit(self, df): return df def transform(self, df): return df
def test_check_is_fitted(): dummy_preprocessor = DummyPreprocessor() with pytest.raises(NotFittedError): dummy_preprocessor.transform(df)
def test_base_non_implemented_error(): with pytest.raises(NotImplementedError): incomplete_preprocessor = IncompletePreprocessor() incomplete_preprocessor.fit_transform(df)
def test_aap_ssp(): img = cv2.imread('/'.join([imd_dir, 'galaxy1.png'])) aap = AspectAwarePreprocessor(128, 128) spp = SimplePreprocessor(128, 128) out1 = aap.preprocess(img) out2 = aap.preprocess(img.transpose(1, 0, 2)) out3 = spp.preprocess(img) assert ((out1.shape[0] == 128) and (out2.shape[0] == 128) and (out3.shape[0] == 128))
def test_sizes(): img_width = X_imgs.shape[1] img_height = X_imgs.shape[2] assert np.all(((img_width == processor.width), (img_height == processor.height)))
def test_notimplementederror(): with pytest.raises(NotImplementedError): org_df = processor.inverse_transform(X_imgs)
def test_pad_sequences(): out = [] seq = [1, 2, 3] padded_seq_1 = text_utils.pad_sequences(seq, maxlen=5, pad_idx=0) out.append(all([(el == 0) for el in padded_seq_1[:2]])) padded_seq_2 = text_utils.pad_sequences(seq, maxlen=5, pad_idx=1, pad_first=False) out.append(all([(el == 1) for el in padded_seq_2[(- 2):]])) assert all(out)
def test_inverse_transform(): df = pd.DataFrame({'text_column': ['life is like a box of chocolates', "You never know what you're going to get"]}) text_preprocessor = TextPreprocessor(text_col='text_column', max_vocab=25, min_freq=1, maxlen=10, verbose=False) padded_seq = text_preprocessor.fit_transform(df) org_df = text_preprocessor.inverse_transform(padded_seq) texts = org_df.text_column.values assert (('life is like box of chocolates' in texts[0]) and ('you never know what you re going to get' in texts[1]))
def test_notfittederror(): processor = TextPreprocessor(min_freq=0, text_col='texts') with pytest.raises(NotFittedError): processor.transform(df)
def create_test_dataset(input_type, with_crossed=True): df = pd.DataFrame() col1 = list(np.random.choice(input_type, 3)) col2 = list(np.random.choice(input_type, 3)) (df['col1'], df['col2']) = (col1, col2) if with_crossed: crossed = ['_'.join([str(c1), str(c2)]) for (c1, c2) in zip(col1, col2)] nuniques = ((df.col1.nunique() + df.col2.nunique()) + len(np.unique(crossed))) else: nuniques = (df.col1.nunique() + df.col2.nunique()) return (df, nuniques)
@pytest.mark.parametrize('input_df, expected_shape', [(df_letters, unique_letters), (df_numbers, unique_numbers)]) def test_preprocessor1(input_df, expected_shape): wide_mtx = preprocessor1.fit_transform(input_df) assert (np.unique(wide_mtx).shape[0] == expected_shape)
@pytest.mark.parametrize('input_df, expected_shape', [(df_letters_wo_crossed, unique_letters_wo_crossed), (df_numbers_wo_crossed, unique_numbers_wo_crossed)]) def test_prepare_wide_wo_crossed(input_df, expected_shape): wide_mtx = preprocessor2.fit_transform(input_df) assert (np.unique(wide_mtx).shape[0] == expected_shape)
@pytest.mark.parametrize('input_df', [df_letters, df_numbers]) def test_inverse_transform(input_df): wide_mtx = preprocessor1.fit_transform(input_df) org_df = preprocessor1.inverse_transform(wide_mtx) org_df = org_df[input_df.columns.tolist()] for c in org_df.columns: org_df[c] = org_df[c].astype(input_df[c].dtype) assert input_df.equals(org_df)