code
stringlengths
17
6.64M
class Accuracy(Metric): def __init__(self, top_k: int=1): super(Accuracy, self).__init__() self.top_k = top_k self.correct_count = 0 self.total_count = 0 self._name = 'acc' def reset(self): self.correct_count = 0 self.total_count = 0 def __call__(self, y_pred: Tensor, y_true: Tensor) -> np.ndarray: num_classes = y_pred.size(1) if (num_classes == 1): y_pred = y_pred.round() y_true = y_true elif (num_classes > 1): y_pred = y_pred.topk(self.top_k, 1)[1] y_true = y_true.view((- 1), 1).expand_as(y_pred) self.correct_count += y_pred.eq(y_true).sum().item() self.total_count += len(y_pred) accuracy = (float(self.correct_count) / float(self.total_count)) return np.array(accuracy)
class SillyCallback(Callback): def on_train_begin(self, logs=None): self.trainer.silly_callback = {} self.trainer.silly_callback['beginning'] = [] self.trainer.silly_callback['end'] = [] def on_epoch_begin(self, epoch, logs=None): self.trainer.silly_callback['beginning'].append((epoch + 1)) def on_epoch_end(self, epoch, logs=None, metric=None): self.trainer.silly_callback['end'].append((epoch + 1))
class RayTuneReporter(Callback): 'Callback that allows reporting history and lr_history values to RayTune\n during Hyperparameter tuning\n\n Callbacks are passed as input parameters to the ``Trainer`` class. See\n :class:`pytorch_widedeep.trainer.Trainer`\n\n For examples see the examples folder at:\n\n .. code-block:: bash\n\n /examples/12_HyperParameter_tuning_w_RayTune.ipynb\n ' def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None): report_dict = {} for (k, v) in self.trainer.history.items(): report_dict.update({k: v[(- 1)]}) if hasattr(self.trainer, 'lr_history'): for (k, v) in self.trainer.lr_history.items(): report_dict.update({k: v[(- 1)]}) tune.report(report_dict)
class WnBReportBest(Callback): 'Callback that allows reporting best performance of a run to WnB\n during Hyperparameter tuning. It is an adjusted pytorch_widedeep.callbacks.ModelCheckpoint\n with added WnB and removed checkpoint saving.\n\n Callbacks are passed as input parameters to the ``Trainer`` class.\n\n Parameters\n ----------\n wb: obj\n Weights&Biases API interface to report single best result usable for\n comparisson of multiple paramater combinations by, for example,\n `parallel coordinates\n <https://docs.wandb.ai/ref/app/features/panels/parallel-coordinates>`_.\n E.g W&B summary report `wandb.run.summary["best"]`.\n monitor: str, default="loss"\n quantity to monitor. Typically `\'val_loss\'` or metric name\n (e.g. `\'val_acc\'`)\n mode: str, default="auto"\n If ``save_best_only=True``, the decision to overwrite the current save\n file is made based on either the maximization or the minimization of\n the monitored quantity. For `\'acc\'`, this should be `\'max\'`, for\n `\'loss\'` this should be `\'min\'`, etc. In `\'auto\'` mode, the\n direction is automatically inferred from the name of the monitored\n quantity.\n\n ' def __init__(self, wb: object, monitor: str='val_loss', mode: str='auto'): super(WnBReportBest, self).__init__() self.monitor = monitor self.mode = mode self.wb = wb if (self.mode not in ['auto', 'min', 'max']): warnings.warn(('WnBReportBest mode %s is unknown, fallback to auto mode.' % self.mode), RuntimeWarning) self.mode = 'auto' if (self.mode == 'min'): self.monitor_op = np.less self.best = np.Inf elif (self.mode == 'max'): self.monitor_op = np.greater self.best = (- np.Inf) elif self._is_metric(self.monitor): self.monitor_op = np.greater self.best = (- np.Inf) else: self.monitor_op = np.less self.best = np.Inf def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None): logs = (logs or {}) current = logs.get(self.monitor) if (current is not None): if self.monitor_op(current, self.best): self.wb.run.summary['best'] = current self.best = current self.best_epoch = epoch @staticmethod def _is_metric(monitor: str): 'copied from pytorch_widedeep.callbacks' if any([(s in monitor) for s in ['acc', 'prec', 'rec', 'fscore', 'f1', 'f2']]): return True else: return False
@wandb_mixin def training_function(config, X_train, X_val): early_stopping = EarlyStopping() model_checkpoint = ModelCheckpoint(save_best_only=True) batch_size = config['batch_size'] trainer = Trainer(model, objective='binary_focal_loss', callbacks=[RayTuneReporter, WnBReportBest(wb=wandb), early_stopping, model_checkpoint], lr_schedulers={'deeptabular': deep_sch}, initializers={'deeptabular': XavierNormal}, optimizers={'deeptabular': deep_opt}, metrics=[accuracy, precision, recall, f1], verbose=0) trainer.fit(X_train=X_train, X_val=X_val, n_epochs=5, batch_size=batch_size)
def get_coo_indexes(lil): rows = [] cols = [] for (i, el) in enumerate(lil): if (type(el) != list): el = [el] for j in el: rows.append(i) cols.append(j) return (rows, cols)
def get_sparse_features(series, shape): coo_indexes = get_coo_indexes(series.tolist()) sparse_df = coo_matrix((np.ones(len(coo_indexes[0])), (coo_indexes[0], coo_indexes[1])), shape=shape) return sparse_df
def sparse_to_idx(data, pad_idx=(- 1)): indexes = data.nonzero() indexes_df = pd.DataFrame() indexes_df['rows'] = indexes[0] indexes_df['cols'] = indexes[1] mdf = indexes_df.groupby('rows').apply((lambda x: x['cols'].tolist())) max_len = mdf.apply((lambda x: len(x))).max() return mdf.apply((lambda x: pd.Series((x + ([pad_idx] * (max_len - len(x))))))).values
class Wide(nn.Module): def __init__(self, input_dim: int, pred_dim: int): super().__init__() self.input_dim = input_dim self.pred_dim = pred_dim self.wide_linear = nn.Linear(input_dim, pred_dim) def forward(self, X): out = self.wide_linear(X.type(torch.float32)) return out
class SimpleEmbed(nn.Module): def __init__(self, vocab_size: int, embed_dim: int, pad_idx: int): super().__init__() self.vocab_size = vocab_size self.embed_dim = embed_dim self.pad_idx = pad_idx self.embed = nn.Embedding(vocab_size, embed_dim, padding_idx=pad_idx) def forward(self, X): embed = self.embed(X) embed_mean = torch.mean(embed, dim=1) return embed_mean @property def output_dim(self) -> int: return self.embed_dim
class BayesianModule(nn.Module): "Simply a 'hack' to facilitate the computation of the KL divergence for all\n Bayesian models\n " def init(self): super().__init__()
class BaseBayesianModel(nn.Module): 'Base model containing the two methods common to all Bayesian models' def init(self): super().__init__() def _kl_divergence(self): kld = 0 for module in self.modules(): if isinstance(module, BayesianModule): kld += (module.log_variational_posterior - module.log_prior) return kld def sample_elbo(self, input: Tensor, target: Tensor, loss_fn: nn.Module, n_samples: int, n_batches: int) -> Tuple[(Tensor, Tensor)]: outputs_l = [] kld = 0.0 for _ in range(n_samples): outputs_l.append(self(input)) kld += self._kl_divergence() outputs = torch.stack(outputs_l) complexity_cost = (kld / n_batches) likelihood_cost = loss_fn(outputs.mean(0), target) return (outputs, (complexity_cost + likelihood_cost))
class ScaleMixtureGaussianPrior(object): 'Defines the Scale Mixture Prior as proposed in Weight Uncertainty in\n Neural Networks (Eq 7 in the original publication)\n ' def __init__(self, pi: float, sigma1: float, sigma2: float): super().__init__() self.pi = pi self.sigma1 = sigma1 self.sigma2 = sigma2 self.gaussian1 = torch.distributions.Normal(0, sigma1) self.gaussian2 = torch.distributions.Normal(0, sigma2) def log_prior(self, input: Tensor) -> Tensor: prob1 = torch.exp(self.gaussian1.log_prob(input)) prob2 = torch.exp(self.gaussian2.log_prob(input)) return torch.log(((self.pi * prob1) + ((1 - self.pi) * prob2))).sum()
class GaussianPosterior(object): 'Defines the Gaussian variational posterior as proposed in Weight\n Uncertainty in Neural Networks\n ' def __init__(self, param_mu: Tensor, param_rho: Tensor): super().__init__() self.param_mu = param_mu self.param_rho = param_rho self.normal = torch.distributions.Normal(0, 1) @property def sigma(self): return torch.log1p(torch.exp(self.param_rho)) def sample(self) -> Tensor: epsilon = self.normal.sample(self.param_rho.size()).to(self.param_rho.device) return (self.param_mu + (self.sigma * epsilon)) def log_posterior(self, input: Tensor) -> Tensor: return (((- math.log(math.sqrt((2 * math.pi)))) - torch.log(self.sigma)) - (((input - self.param_mu) ** 2) / (2 * (self.sigma ** 2)))).sum()
class BayesianEmbedding(BayesianModule): 'A simple lookup table that looks up embeddings in a fixed dictionary and\n size.\n\n Parameters\n ----------\n n_embed: int\n number of embeddings. Typically referred as size of the vocabulary\n embed_dim: int\n Dimension of the embeddings\n padding_idx: int, optional, default = None\n If specified, the entries at ``padding_idx`` do not contribute to the\n gradient; therefore, the embedding vector at ``padding_idx`` is not\n updated during training, i.e. it remains as a fixed “pad”. For a\n newly constructed Embedding, the embedding vector at ``padding_idx``\n will default to all zeros, but can be updated to another value to be\n used as the padding vector\n max_norm: float, optional, default = None\n If given, each embedding vector with norm larger than ``max_norm`` is\n renormalized to have norm max_norm\n norm_type: float, optional, default = 2.\n The p of the p-norm to compute for the ``max_norm`` option.\n scale_grad_by_freq: bool, optional, default = False\n If given, this will scale gradients by the inverse of frequency of the\n words in the mini-batch.\n sparse: bool, optional, default = False\n If True, gradient w.r.t. weight matrix will be a sparse tensor. See\n Notes for more details regarding sparse gradients.\n prior_sigma_1: float, default = 1.0\n Prior of the sigma parameter for the first of the two Gaussian\n distributions that will be mixed to produce the prior weight\n distribution\n prior_sigma_2: float, default = 0.002\n Prior of the sigma parameter for the second of the two Gaussian\n distributions that will be mixed to produce the prior weight\n distribution\n prior_pi: float, default = 0.8\n Scaling factor that will be used to mix the Gaussians to produce the\n prior weight distribution\n posterior_mu_init: float = 0.0\n The posterior sample of the weights is defined as:\n\n .. math::\n \\begin{aligned}\n \\mathbf{w} &= \\mu + log(1 + exp(\\rho))\n \\end{aligned}\n\n where:\n\n .. math::\n \\begin{aligned}\n \\mathcal{N}(x\\vert \\mu, \\sigma) &= \\frac{1}{\\sqrt{2\\pi}\\sigma}e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}\\\\\n \\log{\\mathcal{N}(x\\vert \\mu, \\sigma)} &= -\\log{\\sqrt{2\\pi}} -\\log{\\sigma} -\\frac{(x-\\mu)^2}{2\\sigma^2}\\\\\n \\end{aligned}\n\n :math:`\\mu` is initialised using a normal distributtion with mean\n ``posterior_rho_init`` and std equal to 0.1.\n posterior_rho_init: float = -7.0\n As in the case of :math:`\\mu`, :math:`\\rho` is initialised using a\n normal distributtion with mean ``posterior_rho_init`` and std equal to\n 0.1.\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.bayesian_models import bayesian_nn as bnn\n >>> embedding = bnn.BayesianEmbedding(10, 3)\n >>> input = torch.LongTensor([[1,2,4,5],[4,3,2,9]])\n >>> out = embedding(input)\n ' def __init__(self, n_embed: int, embed_dim: int, padding_idx: Optional[int]=None, max_norm: Optional[float]=None, norm_type: Optional[float]=2.0, scale_grad_by_freq: Optional[bool]=False, sparse: Optional[bool]=False, prior_sigma_1: float=1.0, prior_sigma_2: float=0.002, prior_pi: float=0.8, posterior_mu_init: float=0.0, posterior_rho_init: float=(- 7.0)): super(BayesianEmbedding, self).__init__() self.n_embed = n_embed self.embed_dim = embed_dim self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq self.sparse = sparse self.prior_sigma_1 = prior_sigma_1 self.prior_sigma_2 = prior_sigma_2 self.prior_pi = prior_pi self.posterior_mu_init = posterior_mu_init self.posterior_rho_init = posterior_rho_init self.weight_mu = nn.Parameter(torch.Tensor(n_embed, embed_dim).normal_(posterior_mu_init, 0.1)) self.weight_rho = nn.Parameter(torch.Tensor(n_embed, embed_dim).normal_(posterior_rho_init, 0.1)) self.weight_sampler = GaussianPosterior(self.weight_mu, self.weight_rho) self.weight_prior_dist = ScaleMixtureGaussianPrior(self.prior_pi, self.prior_sigma_1, self.prior_sigma_2) self.log_prior: Union[(Tensor, float)] = 0.0 self.log_variational_posterior: Union[(Tensor, float)] = 0.0 def forward(self, X: Tensor) -> Tensor: if (not self.training): return F.embedding(X, self.weight_mu, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse) weight = self.weight_sampler.sample() self.log_variational_posterior = self.weight_sampler.log_posterior(weight) self.log_prior = self.weight_prior_dist.log_prior(weight) return F.embedding(X, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse) def extra_repr(self) -> str: s = '{n_embed}, {embed_dim}' if (self.padding_idx is not None): s += ', padding_idx={padding_idx}' if (self.max_norm is not None): s += ', max_norm={max_norm}' if (self.norm_type != 2): s += ', norm_type={norm_type}' if (self.scale_grad_by_freq is not False): s += ', scale_grad_by_freq={scale_grad_by_freq}' if (self.sparse is not False): s += ', sparse=True' if (self.prior_sigma_1 != 1.0): s += ', prior_sigma_1={prior_sigma_1}' if (self.prior_sigma_2 != 0.002): s += ', prior_sigma_2={prior_sigma_2}' if (self.prior_pi != 0.8): s += ', prior_pi={prior_pi}' if (self.posterior_mu_init != 0.0): s += ', posterior_mu_init={posterior_mu_init}' if (self.posterior_rho_init != (- 7.0)): s += ', posterior_rho_init={posterior_rho_init}' return s.format(**self.__dict__)
class BayesianLinear(BayesianModule): 'Applies a linear transformation to the incoming data as proposed in Weight\n Uncertainity on Neural Networks\n\n Parameters\n ----------\n in_features: int\n size of each input sample\n out_features: int\n size of each output sample\n use_bias: bool, default = True\n Boolean indicating if an additive bias will be learnt\n prior_sigma_1: float, default = 1.0\n Prior of the sigma parameter for the first of the two Gaussian\n distributions that will be mixed to produce the prior weight\n distribution\n prior_sigma_2: float, default = 0.002\n Prior of the sigma parameter for the second of the two Gaussian\n distributions that will be mixed to produce the prior weight\n distribution\n prior_pi: float, default = 0.8\n Scaling factor that will be used to mix the Gaussians to produce the\n prior weight distribution\n posterior_mu_init: float = 0.0\n The posterior sample of the weights is defined as:\n\n .. math::\n \\begin{aligned}\n \\mathbf{w} &= \\mu + log(1 + exp(\\rho))\n \\end{aligned}\n\n where:\n\n .. math::\n \\begin{aligned}\n \\mathcal{N}(x\\vert \\mu, \\sigma) &= \\frac{1}{\\sqrt{2\\pi}\\sigma}e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}\\\\\n \\log{\\mathcal{N}(x\\vert \\mu, \\sigma)} &= -\\log{\\sqrt{2\\pi}} -\\log{\\sigma} -\\frac{(x-\\mu)^2}{2\\sigma^2}\\\\\n \\end{aligned}\n\n :math:`\\mu` is initialised using a normal distributtion with mean\n ``posterior_rho_init`` and std equal to 0.1.\n posterior_rho_init: float = -7.0\n As in the case of :math:`\\mu`, :math:`\\rho` is initialised using a\n normal distributtion with mean ``posterior_rho_init`` and std equal to\n 0.1.\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.bayesian_models import bayesian_nn as bnn\n >>> linear = bnn.BayesianLinear(10, 6)\n >>> input = torch.rand(6, 10)\n >>> out = linear(input)\n ' def __init__(self, in_features: int, out_features: int, use_bias: bool=True, prior_sigma_1: float=1.0, prior_sigma_2: float=0.002, prior_pi: float=0.8, posterior_mu_init: float=0.0, posterior_rho_init: float=(- 7.0)): super(BayesianLinear, self).__init__() self.in_features = in_features self.out_features = out_features self.use_bias = use_bias self.posterior_mu_init = posterior_mu_init self.posterior_rho_init = posterior_rho_init self.prior_sigma_1 = prior_sigma_1 self.prior_sigma_2 = prior_sigma_2 self.prior_pi = prior_pi self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features).normal_(posterior_mu_init, 0.1)) self.weight_rho = nn.Parameter(torch.Tensor(out_features, in_features).normal_(posterior_rho_init, 0.1)) self.weight_sampler = GaussianPosterior(self.weight_mu, self.weight_rho) if self.use_bias: self.bias_mu = nn.Parameter(torch.Tensor(out_features).normal_(posterior_mu_init, 0.1)) self.bias_rho = nn.Parameter(torch.Tensor(out_features).normal_(posterior_rho_init, 0.1)) self.bias_sampler = GaussianPosterior(self.bias_mu, self.bias_rho) else: (self.bias_mu, self.bias_rho) = (None, None) self.weight_prior_dist = ScaleMixtureGaussianPrior(self.prior_pi, self.prior_sigma_1, self.prior_sigma_2) if self.use_bias: self.bias_prior_dist = ScaleMixtureGaussianPrior(self.prior_pi, self.prior_sigma_1, self.prior_sigma_2) self.log_prior: Union[(Tensor, float)] = 0.0 self.log_variational_posterior: Union[(Tensor, float)] = 0.0 def forward(self, X: Tensor) -> Tensor: if (not self.training): return F.linear(X, self.weight_mu, self.bias_mu) weight = self.weight_sampler.sample() if self.use_bias: bias = self.bias_sampler.sample() bias_log_posterior: Union[(Tensor, float)] = self.bias_sampler.log_posterior(bias) bias_log_prior: Union[(Tensor, float)] = self.bias_prior_dist.log_prior(bias) else: bias = None bias_log_posterior = 0.0 bias_log_prior = 0.0 self.log_variational_posterior = (self.weight_sampler.log_posterior(weight) + bias_log_posterior) self.log_prior = (self.weight_prior_dist.log_prior(weight) + bias_log_prior) return F.linear(X, weight, bias) def extra_repr(self) -> str: s = '{in_features}, {out_features}' if (self.use_bias is not False): s += ', use_bias=True' if (self.prior_sigma_1 != 1.0): s += ', prior_sigma_1={prior_sigma_1}' if (self.prior_sigma_2 != 0.002): s += ', prior_sigma_2={prior_sigma_2}' if (self.prior_pi != 0.8): s += ', prior_pi={prior_pi}' if (self.posterior_mu_init != 0.0): s += ', posterior_mu_init={posterior_mu_init}' if (self.posterior_rho_init != (- 7.0)): s += ', posterior_rho_init={posterior_rho_init}' return s.format(**self.__dict__)
class BayesianWide(BaseBayesianModel): 'Defines a `Wide` model. This is a linear model where the\n non-linearlities are captured via crossed-columns\n\n Parameters\n ----------\n input_dim: int\n size of the Embedding layer. `input_dim` is the summation of all the\n individual values for all the features that go through the wide\n component. For example, if the wide component receives 2 features with\n 5 individual values each, `input_dim = 10`\n pred_dim: int\n size of the ouput tensor containing the predictions\n prior_sigma_1: float, default = 1.0\n The prior weight distribution is a scaled mixture of two Gaussian\n densities:\n\n $$\n \\begin{aligned}\n P(\\mathbf{w}) = \\prod_{i=j} \\pi N (\\mathbf{w}_j | 0, \\sigma_{1}^{2}) + (1 - \\pi) N (\\mathbf{w}_j | 0, \\sigma_{2}^{2})\n \\end{aligned}\n $$\n\n `prior_sigma_1` is the prior of the sigma parameter for the first of the two\n Gaussians that will be mixed to produce the prior weight\n distribution.\n prior_sigma_2: float, default = 0.002\n Prior of the sigma parameter for the second of the two Gaussian\n distributions that will be mixed to produce the prior weight\n distribution\n prior_pi: float, default = 0.8\n Scaling factor that will be used to mix the Gaussians to produce the\n prior weight distribution\n posterior_mu_init: float = 0.0\n The posterior sample of the weights is defined as:\n\n $$\n \\begin{aligned}\n \\mathbf{w} &= \\mu + log(1 + exp(\\rho))\n \\end{aligned}\n $$\n\n where:\n\n $$\n \\begin{aligned}\n \\mathcal{N}(x\\vert \\mu, \\sigma) &= \\frac{1}{\\sqrt{2\\pi}\\sigma}e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}\\\\\n \\log{\\mathcal{N}(x\\vert \\mu, \\sigma)} &= -\\log{\\sqrt{2\\pi}} -\\log{\\sigma} -\\frac{(x-\\mu)^2}{2\\sigma^2}\\\\\n \\end{aligned}\n $$\n\n $\\mu$ is initialised using a normal distributtion with mean\n `posterior_mu_init` and std equal to 0.1.\n posterior_rho_init: float = -7.0\n As in the case of $\\mu$, $\\rho$ is initialised using a\n normal distributtion with mean `posterior_rho_init` and std equal to\n 0.1.\n\n Attributes\n -----------\n bayesian_wide_linear: nn.Module\n the linear layer that comprises the wide branch of the model\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.bayesian_models import BayesianWide\n >>> X = torch.empty(4, 4).random_(6)\n >>> wide = BayesianWide(input_dim=X.unique().size(0), pred_dim=1)\n >>> out = wide(X)\n ' def __init__(self, input_dim: int, pred_dim: int=1, prior_sigma_1: float=1.0, prior_sigma_2: float=0.002, prior_pi: float=0.8, posterior_mu_init: float=0.0, posterior_rho_init: float=(- 7.0)): super(BayesianWide, self).__init__() self.bayesian_wide_linear = bnn.BayesianEmbedding(n_embed=(input_dim + 1), embed_dim=pred_dim, padding_idx=0, prior_sigma_1=prior_sigma_1, prior_sigma_2=prior_sigma_2, prior_pi=prior_pi, posterior_mu_init=posterior_mu_init, posterior_rho_init=posterior_rho_init) self.bias = nn.Parameter(torch.zeros(pred_dim)) def forward(self, X: Tensor) -> Tensor: out = (self.bayesian_wide_linear(X.long()).sum(dim=1) + self.bias) return out
class BayesianMLP(nn.Module): def __init__(self, d_hidden: List[int], activation: str, use_bias: bool=True, prior_sigma_1: float=1.0, prior_sigma_2: float=0.002, prior_pi: float=0.8, posterior_mu_init: float=0.0, posterior_rho_init: float=(- 7.0)): super(BayesianMLP, self).__init__() self.d_hidden = d_hidden self.activation = activation act_fn = get_activation_fn(activation) self.bayesian_mlp = nn.Sequential() for i in range(1, len(d_hidden)): bayesian_dense_layer = nn.Sequential(*[bnn.BayesianLinear(d_hidden[(i - 1)], d_hidden[i], use_bias, prior_sigma_1, prior_sigma_2, prior_pi, posterior_mu_init, posterior_rho_init), (act_fn if (i != (len(d_hidden) - 1)) else nn.Identity())]) self.bayesian_mlp.add_module('bayesian_dense_layer_{}'.format((i - 1)), bayesian_dense_layer) def forward(self, X: Tensor) -> Tensor: return self.bayesian_mlp(X)
class BayesianTabMlp(BaseBayesianModel): 'Defines a `BayesianTabMlp` model.\n\n This class combines embedding representations of the categorical features\n with numerical (aka continuous) features, embedded or not. These are then\n passed through a series of probabilistic dense layers (i.e. a MLP).\n\n Parameters\n ----------\n column_idx: Dict\n Dict containing the index of the columns that will be passed through\n the `TabMlp` model. Required to slice the tensors. e.g. _{\'education\':\n 0, \'relationship\': 1, \'workclass\': 2, ...}_\n cat_embed_input: List, Optional, default = None\n List of Tuples with the column name, number of unique values and\n embedding dimension. e.g. _[(education, 11, 32), ...]_\n cat_embed_dropout: float, default = 0.1\n Categorical embeddings dropout\n cat_embed_activation: Optional, str, default = None,\n Activation function for the categorical embeddings, if any. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported\n continuous_cols: List, Optional, default = None\n List with the name of the numeric (aka continuous) columns\n cont_norm_layer: str, default = "batchnorm"\n Type of normalization layer applied to the continuous features. Options\n are: \'layernorm\', \'batchnorm\' or None.\n embed_continuous: bool, default = False,\n Boolean indicating if the continuous columns will be embedded\n (i.e. passed each through a linear layer with or without activation)\n cont_embed_dim: int, default = 32,\n Size of the continuous embeddings\n cont_embed_dropout: float, default = 0.1,\n Dropout for the continuous embeddings\n use_cont_bias: bool, default = True,\n Boolean indicating if bias will be used for the continuous embeddings\n cont_embed_activation: Optional, str, default = None,\n Activation function for the continuous embeddings if any. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported\n mlp_hidden_dims: List, default = [200, 100]\n List with the number of neurons per dense layer in the mlp.\n mlp_activation: str, default = "relu"\n Activation function for the dense layers of the MLP. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported\n prior_sigma_1: float, default = 1.0\n The prior weight distribution is a scaled mixture of two Gaussian\n densities:\n\n $$\n \\begin{aligned}\n P(\\mathbf{w}) = \\prod_{i=j} \\pi N (\\mathbf{w}_j | 0, \\sigma_{1}^{2}) + (1 - \\pi) N (\\mathbf{w}_j | 0, \\sigma_{2}^{2})\n \\end{aligned}\n $$\n\n `prior_sigma_1` is the prior of the sigma parameter for the first of the two\n Gaussians that will be mixed to produce the prior weight\n distribution.\n prior_sigma_2: float, default = 0.002\n Prior of the sigma parameter for the second of the two Gaussian\n distributions that will be mixed to produce the prior weight\n distribution for each Bayesian linear and embedding layer\n prior_pi: float, default = 0.8\n Scaling factor that will be used to mix the Gaussians to produce the\n prior weight distribution ffor each Bayesian linear and embedding\n layer\n posterior_mu_init: float = 0.0\n The posterior sample of the weights is defined as:\n\n $$\n \\begin{aligned}\n \\mathbf{w} &= \\mu + log(1 + exp(\\rho))\n \\end{aligned}\n $$\n where:\n\n $$\n \\begin{aligned}\n \\mathcal{N}(x\\vert \\mu, \\sigma) &= \\frac{1}{\\sqrt{2\\pi}\\sigma}e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}\\\\\n \\log{\\mathcal{N}(x\\vert \\mu, \\sigma)} &= -\\log{\\sqrt{2\\pi}} -\\log{\\sigma} -\\frac{(x-\\mu)^2}{2\\sigma^2}\\\\\n \\end{aligned}\n $$\n\n $\\mu$ is initialised using a normal distributtion with mean\n `posterior_mu_init` and std equal to 0.1.\n posterior_rho_init: float = -7.0\n As in the case of $\\mu$, $\\rho$ is initialised using a\n normal distributtion with mean `posterior_rho_init` and std equal to\n 0.1.\n\n Attributes\n ----------\n bayesian_cat_and_cont_embed: nn.Module\n This is the module that processes the categorical and continuous columns\n bayesian_tab_mlp: nn.Sequential\n mlp model that will receive the concatenation of the embeddings and\n the continuous columns\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.bayesian_models import BayesianTabMlp\n >>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)\n >>> colnames = [\'a\', \'b\', \'c\', \'d\', \'e\']\n >>> cat_embed_input = [(u,i,j) for u,i,j in zip(colnames[:4], [4]*4, [8]*4)]\n >>> column_idx = {k:v for v,k in enumerate(colnames)}\n >>> model = BayesianTabMlp(mlp_hidden_dims=[8,4], column_idx=column_idx, cat_embed_input=cat_embed_input,\n ... continuous_cols = [\'e\'])\n >>> out = model(X_tab)\n ' def __init__(self, column_idx: Dict[(str, int)], cat_embed_input: Optional[List[Tuple[(str, int, int)]]]=None, cat_embed_dropout: float=0.1, cat_embed_activation: Optional[str]=None, continuous_cols: Optional[List[str]]=None, embed_continuous: bool=False, cont_embed_dim: int=32, cont_embed_dropout: float=0.1, cont_embed_activation: Optional[str]=None, use_cont_bias: bool=True, cont_norm_layer: str='batchnorm', mlp_hidden_dims: List[int]=[200, 100], mlp_activation: str='leaky_relu', prior_sigma_1: float=1, prior_sigma_2: float=0.002, prior_pi: float=0.8, posterior_mu_init: float=0.0, posterior_rho_init: float=(- 7.0), pred_dim=1): super(BayesianTabMlp, self).__init__() self.column_idx = column_idx self.cat_embed_input = cat_embed_input self.cat_embed_dropout = cat_embed_dropout self.cat_embed_activation = cat_embed_activation self.continuous_cols = continuous_cols self.cont_norm_layer = cont_norm_layer self.embed_continuous = embed_continuous self.cont_embed_dim = cont_embed_dim self.cont_embed_dropout = cont_embed_dropout self.use_cont_bias = use_cont_bias self.cont_embed_activation = cont_embed_activation self.mlp_hidden_dims = mlp_hidden_dims self.mlp_activation = mlp_activation self.prior_sigma_1 = prior_sigma_1 self.prior_sigma_2 = prior_sigma_2 self.prior_pi = prior_pi self.posterior_mu_init = posterior_mu_init self.posterior_rho_init = posterior_rho_init self.pred_dim = pred_dim allowed_activations = ['relu', 'leaky_relu', 'tanh', 'gelu'] if (self.mlp_activation not in allowed_activations): raise ValueError("Currently, only the following activation functions are supported for the Bayesian MLP's dense layers: {}. Got '{}' instead".format(', '.join(allowed_activations), self.mlp_activation)) self.cat_and_cont_embed = BayesianDiffSizeCatAndContEmbeddings(column_idx, cat_embed_input, continuous_cols, embed_continuous, cont_embed_dim, use_cont_bias, cont_norm_layer, prior_sigma_1, prior_sigma_2, prior_pi, posterior_mu_init, posterior_rho_init) self.cat_embed_act_fn = (get_activation_fn(cat_embed_activation) if (cat_embed_activation is not None) else None) self.cont_embed_act_fn = (get_activation_fn(cont_embed_activation) if (cont_embed_activation is not None) else None) mlp_input_dim = self.cat_and_cont_embed.output_dim mlp_hidden_dims = (([mlp_input_dim] + mlp_hidden_dims) + [pred_dim]) self.bayesian_tab_mlp = BayesianMLP(mlp_hidden_dims, mlp_activation, True, prior_sigma_1, prior_sigma_2, prior_pi, posterior_mu_init, posterior_rho_init) def forward(self, X: Tensor) -> Tensor: (x_cat, x_cont) = self.cat_and_cont_embed(X) if (x_cat is not None): x = (self.cat_embed_act_fn(x_cat) if (self.cat_embed_act_fn is not None) else x_cat) if (x_cont is not None): if (self.cont_embed_act_fn is not None): x_cont = self.cont_embed_act_fn(x_cont) x = (torch.cat([x, x_cont], 1) if (x_cat is not None) else x_cont) return self.bayesian_tab_mlp(x)
def _get_current_time(): return datetime.datetime.now().strftime('%B %d, %Y - %I:%M%p')
def _is_metric(monitor: str): if any([(s in monitor) for s in ['acc', 'prec', 'rec', 'fscore', 'f1', 'f2']]): return True else: return False
class CallbackContainer(object): '\n Container holding a list of callbacks.\n ' def __init__(self, callbacks: Optional[List]=None, queue_length: int=10): instantiated_callbacks = [] if (callbacks is not None): for callback in callbacks: if isinstance(callback, type): instantiated_callbacks.append(callback()) else: instantiated_callbacks.append(callback) self.callbacks = [c for c in instantiated_callbacks] self.queue_length = queue_length def set_params(self, params): for callback in self.callbacks: callback.set_params(params) def set_model(self, model: Any): self.model = model for callback in self.callbacks: callback.set_model(model) def set_trainer(self, trainer: Any): self.trainer = trainer for callback in self.callbacks: callback.set_trainer(trainer) def on_epoch_begin(self, epoch: int, logs: Optional[Dict]=None): logs = (logs or {}) for callback in self.callbacks: callback.on_epoch_begin(epoch, logs) def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None): logs = (logs or {}) for callback in self.callbacks: callback.on_epoch_end(epoch, logs, metric) def on_batch_begin(self, batch: int, logs: Optional[Dict]=None): logs = (logs or {}) for callback in self.callbacks: callback.on_batch_begin(batch, logs) def on_batch_end(self, batch: int, logs: Optional[Dict]=None): logs = (logs or {}) for callback in self.callbacks: callback.on_batch_end(batch, logs) def on_train_begin(self, logs: Optional[Dict]=None): logs = (logs or {}) logs['start_time'] = _get_current_time() for callback in self.callbacks: callback.on_train_begin(logs) def on_train_end(self, logs: Optional[Dict]=None): logs = (logs or {}) for callback in self.callbacks: callback.on_train_end(logs) def on_eval_begin(self, logs: Optional[Dict]=None): logs = (logs or {}) for callback in self.callbacks: callback.on_eval_begin(logs)
class Callback(object): '\n Base class used to build new callbacks.\n ' def __init__(self): pass def set_params(self, params): self.params = params def set_model(self, model: Any): self.model = model def set_trainer(self, trainer: Any): self.trainer = trainer def on_epoch_begin(self, epoch: int, logs: Optional[Dict]=None): pass def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None): pass def on_batch_begin(self, batch: int, logs: Optional[Dict]=None): pass def on_batch_end(self, batch: int, logs: Optional[Dict]=None): pass def on_train_begin(self, logs: Optional[Dict]=None): pass def on_train_end(self, logs: Optional[Dict]=None): pass def on_eval_begin(self, logs: Optional[Dict]=None): pass
class History(Callback): 'Saves the metrics in the `history` attribute of the `Trainer`.\n\n This callback runs by default within `Trainer`, therefore, should not\n be passed to the `Trainer`. It is included here just for completion.\n ' def on_train_begin(self, logs: Optional[Dict]=None): self.trainer.history = {} def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None): logs = (logs or {}) for (k, v) in logs.items(): if isinstance(v, np.ndarray): v = v.tolist() if (isinstance(v, list) and (len(v) > 1)): for i in range(len(v)): self.trainer.history.setdefault(((k + '_') + str(i)), []).append(v[i]) else: self.trainer.history.setdefault(k, []).append(v)
class LRShedulerCallback(Callback): 'Callback for the learning rate schedulers to take a step\n\n This callback runs by default within `Trainer`, therefore, should not\n be passed to the `Trainer`. It is included here just for completion.\n ' def on_batch_end(self, batch: int, logs: Optional[Dict]=None): if (self.trainer.lr_scheduler is not None): if self._multiple_scheduler(): for (model_name, scheduler) in self.trainer.lr_scheduler._schedulers.items(): if self._is_cyclic(model_name): scheduler.step() elif self.trainer.cyclic_lr: self.trainer.lr_scheduler.step() def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None): if (self.trainer.lr_scheduler is not None): if self._multiple_scheduler(): for (model_name, scheduler) in self.trainer.lr_scheduler._schedulers.items(): if (not self._is_cyclic(model_name)): if isinstance(scheduler, ReduceLROnPlateau): scheduler.step(metric) else: scheduler.step() elif (not self.trainer.cyclic_lr): if isinstance(self.trainer.lr_scheduler, ReduceLROnPlateau): self.trainer.lr_scheduler.step(metric) else: self.trainer.lr_scheduler.step() def _multiple_scheduler(self): return (self.trainer.lr_scheduler.__class__.__name__ == 'MultipleLRScheduler') def _is_cyclic(self, model_name: str): return (self._has_scheduler(model_name) and ('cycl' in self.trainer.lr_scheduler._schedulers[model_name].__class__.__name__.lower())) def _has_scheduler(self, model_name: str): return (model_name in self.trainer.lr_scheduler._schedulers)
class MetricCallback(Callback): 'Callback that resets the metrics (if any metric is used)\n\n This callback runs by default within `Trainer`, therefore, should not\n be passed to the `Trainer`. It is included here just for completion.\n ' def __init__(self, container: MultipleMetrics): self.container = container def on_epoch_begin(self, epoch: int, logs: Optional[Dict]=None): self.container.reset() def on_eval_begin(self, logs: Optional[Dict]=None): self.container.reset()
class LRHistory(Callback): 'Saves the learning rates during training in the `lr_history` attribute\n of the `Trainer`.\n\n Callbacks are passed as input parameters to the `Trainer` class. See\n `pytorch_widedeep.trainer.Trainer`\n\n Parameters\n ----------\n n_epochs: int\n number of training epochs\n\n Examples\n --------\n >>> from pytorch_widedeep.callbacks import LRHistory\n >>> from pytorch_widedeep.models import TabMlp, Wide, WideDeep\n >>> from pytorch_widedeep.training import Trainer\n >>>\n >>> embed_input = [(u, i, j) for u, i, j in zip(["a", "b", "c"][:4], [4] * 3, [8] * 3)]\n >>> column_idx = {k: v for v, k in enumerate(["a", "b", "c"])}\n >>> wide = Wide(10, 1)\n >>> deep = TabMlp(mlp_hidden_dims=[8, 4], column_idx=column_idx, cat_embed_input=embed_input)\n >>> model = WideDeep(wide, deep)\n >>> trainer = Trainer(model, objective="regression", callbacks=[LRHistory(n_epochs=10)])\n ' def __init__(self, n_epochs: int): super(LRHistory, self).__init__() self.n_epochs = n_epochs def on_epoch_begin(self, epoch: int, logs: Optional[Dict]=None): if ((epoch == 0) and (self.trainer.lr_scheduler is not None)): self.trainer.lr_history = {} if self._multiple_scheduler(): self._save_group_lr_mulitple_scheduler(step_location='on_epoch_begin') else: self._save_group_lr(self.trainer.optimizer) def on_batch_end(self, batch: int, logs: Optional[Dict]=None): if (self.trainer.lr_scheduler is not None): if self._multiple_scheduler(): self._save_group_lr_mulitple_scheduler(step_location='on_batch_end') elif self.trainer.cyclic_lr: self._save_group_lr(self.trainer.optimizer) def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None): if ((epoch != (self.n_epochs - 1)) and (self.trainer.lr_scheduler is not None)): if self._multiple_scheduler(): self._save_group_lr_mulitple_scheduler(step_location='on_epoch_end') elif (not self.trainer.cyclic_lr): self._save_group_lr(self.trainer.optimizer) def _save_group_lr_mulitple_scheduler(self, step_location: str): for (model_name, opt) in self.trainer.optimizer._optimizers.items(): if (step_location == 'on_epoch_begin'): self._save_group_lr(opt, model_name) if (step_location == 'on_batch_end'): if self._is_cyclic(model_name): self._save_group_lr(opt, model_name) if (step_location == 'on_epoch_end'): if (not self._is_cyclic(model_name)): self._save_group_lr(opt, model_name) def _save_group_lr(self, opt: Optimizer, model_name: Optional[str]=None): for (group_idx, group) in enumerate(opt.param_groups): if (model_name is not None): group_name = '_'.join(['lr', model_name, str(group_idx)]) else: group_name = '_'.join(['lr', str(group_idx)]) self.trainer.lr_history.setdefault(group_name, []).append(group['lr']) def _multiple_scheduler(self): return (self.trainer.lr_scheduler.__class__.__name__ == 'MultipleLRScheduler') def _is_cyclic(self, model_name: str): return (self._has_scheduler(model_name) and ('cycl' in self.trainer.lr_scheduler._schedulers[model_name].__class__.__name__.lower())) def _has_scheduler(self, model_name: str): return (model_name in self.trainer.lr_scheduler._schedulers)
class ModelCheckpoint(Callback): 'Saves the model after every epoch.\n\n This class is almost identical to the corresponding keras class.\n Therefore, **credit** to the Keras Team.\n\n Callbacks are passed as input parameters to the `Trainer` class. See\n `pytorch_widedeep.trainer.Trainer`\n\n Parameters\n ----------\n filepath: str, default=None\n Full path to save the output weights. It must contain only the root of\n the filenames. Epoch number and `.pt` extension (for pytorch) will be\n added. e.g. `filepath="path/to/output_weights/weights_out"` And the\n saved files in that directory will be named:\n _\'weights_out_1.pt\', \'weights_out_2.pt\', ..._. If set to `None` the\n class just report best metric and best_epoch.\n monitor: str, default="loss"\n quantity to monitor. Typically _\'val_loss\'_ or metric name\n (e.g. _\'val_acc\'_)\n min_delta: float, default=0.\n minimum change in the monitored quantity to qualify as an\n improvement, i.e. an absolute change of less than min_delta, will\n count as no improvement.\n verbose:int, default=0\n verbosity mode\n save_best_only: bool, default=False,\n the latest best model according to the quantity monitored will not be\n overwritten.\n mode: str, default="auto"\n If `save_best_only=True`, the decision to overwrite the current save\n file is made based on either the maximization or the minimization of\n the monitored quantity. For _\'acc\'_, this should be _\'max\'_, for\n _\'loss\'_ this should be _\'min\'_, etc. In \'_auto\'_ mode, the\n direction is automatically inferred from the name of the monitored\n quantity.\n period: int, default=1\n Interval (number of epochs) between checkpoints.\n max_save: int, default=-1\n Maximum number of outputs to save. If -1 will save all outputs\n\n Attributes\n ----------\n best: float\n best metric\n best_epoch: int\n best epoch\n best_state_dict: dict\n best model state dictionary.<br/>\n To restore model to its best state use `Trainer.model.load_state_dict\n (model_checkpoint.best_state_dict)` where `model_checkpoint` is an\n instance of the class `ModelCheckpoint`. See the Examples folder in\n the repo or the Examples section in this documentation for details\n\n Examples\n --------\n >>> from pytorch_widedeep.callbacks import ModelCheckpoint\n >>> from pytorch_widedeep.models import TabMlp, Wide, WideDeep\n >>> from pytorch_widedeep.training import Trainer\n >>>\n >>> embed_input = [(u, i, j) for u, i, j in zip(["a", "b", "c"][:4], [4] * 3, [8] * 3)]\n >>> column_idx = {k: v for v, k in enumerate(["a", "b", "c"])}\n >>> wide = Wide(10, 1)\n >>> deep = TabMlp(mlp_hidden_dims=[8, 4], column_idx=column_idx, cat_embed_input=embed_input)\n >>> model = WideDeep(wide, deep)\n >>> trainer = Trainer(model, objective="regression", callbacks=[ModelCheckpoint(filepath=\'checkpoints/weights_out\')])\n ' def __init__(self, filepath: Optional[str]=None, monitor: str='val_loss', min_delta: float=0.0, verbose: int=0, save_best_only: bool=False, mode: str='auto', period: int=1, max_save: int=(- 1)): super(ModelCheckpoint, self).__init__() self.filepath = filepath self.monitor = monitor self.min_delta = min_delta self.verbose = verbose self.save_best_only = save_best_only self.mode = mode self.period = period self.max_save = max_save self.epochs_since_last_save = 0 if self.filepath: if (len(self.filepath.split('/')[:(- 1)]) == 0): raise ValueError("'filepath' must be the full path to save the output weights, including the root of the filenames. e.g. 'checkpoints/weights_out'") root_dir = '/'.join(self.filepath.split('/')[:(- 1)]) if (not os.path.exists(root_dir)): os.makedirs(root_dir) if (self.max_save > 0): self.old_files: List[str] = [] if (self.mode not in ['auto', 'min', 'max']): warnings.warn(('ModelCheckpoint mode %s is unknown, fallback to auto mode.' % self.mode), RuntimeWarning) self.mode = 'auto' if (self.mode == 'min'): self.monitor_op = np.less self.best = np.Inf elif (self.mode == 'max'): self.monitor_op = np.greater self.best = (- np.Inf) elif _is_metric(self.monitor): self.monitor_op = np.greater self.best = (- np.Inf) else: self.monitor_op = np.less self.best = np.Inf if (self.monitor_op == np.greater): self.min_delta *= 1 else: self.min_delta *= (- 1) def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None): logs = (logs or {}) self.epochs_since_last_save += 1 if (self.epochs_since_last_save >= self.period): self.epochs_since_last_save = 0 if self.filepath: filepath = '{}_{}.p'.format(self.filepath, (epoch + 1)) if self.save_best_only: current = logs.get(self.monitor) if (current is None): warnings.warn(('Can save best model only with %s available, skipping.' % self.monitor), RuntimeWarning) elif self.monitor_op((current - self.min_delta), self.best): if (self.verbose > 0): if self.filepath: print(f''' Epoch {(epoch + 1)}: {self.monitor} improved from {self.best:.5f} to {current:.5f} Saving model to {filepath}''') else: print(f''' Epoch {(epoch + 1)}: {self.monitor} improved from {self.best:.5f} to {current:.5f} ''') self.best = current self.best_epoch = epoch self.best_state_dict = copy.deepcopy(self.model.state_dict()) if self.filepath: torch.save(self.best_state_dict, filepath) if (self.max_save > 0): if (len(self.old_files) == self.max_save): try: os.remove(self.old_files[0]) except FileNotFoundError: pass self.old_files = self.old_files[1:] self.old_files.append(filepath) elif (self.verbose > 0): print(f''' Epoch {(epoch + 1)}: {self.monitor} did not improve from {self.best:.5f} considering a 'min_delta' improvement of {self.min_delta:.5f}''') if ((not self.save_best_only) and self.filepath): if (self.verbose > 0): print(('\nEpoch %05d: saving model to %s' % ((epoch + 1), filepath))) torch.save(self.model.state_dict(), filepath) if (self.max_save > 0): if (len(self.old_files) == self.max_save): try: os.remove(self.old_files[0]) except FileNotFoundError: pass self.old_files = self.old_files[1:] self.old_files.append(filepath) def __getstate__(self): d = self.__dict__ self_dict = {k: d[k] for k in d if (k not in ['trainer', 'model'])} return self_dict def __setstate__(self, state): self.__dict__ = state
class EarlyStopping(Callback): 'Stop training when a monitored quantity has stopped improving.\n\n This class is almost identical to the corresponding keras class.\n Therefore, **credit** to the Keras Team.\n\n Callbacks are passed as input parameters to the `Trainer` class. See\n `pytorch_widedeep.trainer.Trainer`\n\n Parameters\n -----------\n monitor: str, default=\'val_loss\'.\n Quantity to monitor. Typically _\'val_loss\'_ or metric name\n (e.g. _\'val_acc\'_)\n min_delta: float, default=0.\n minimum change in the monitored quantity to qualify as an\n improvement, i.e. an absolute change of less than min_delta, will\n count as no improvement.\n patience: int, default=10.\n Number of epochs that produced the monitored quantity with no\n improvement after which training will be stopped.\n verbose: int.\n verbosity mode.\n mode: str, default=\'auto\'\n one of _{\'auto\', \'min\', \'max\'}_. In _\'min\'_ mode, training will\n stop when the quantity monitored has stopped decreasing; in _\'max\'_\n mode it will stop when the quantity monitored has stopped increasing;\n in _\'auto\'_ mode, the direction is automatically inferred from the\n name of the monitored quantity.\n baseline: float, Optional. default=None.\n Baseline value for the monitored quantity to reach. Training will\n stop if the model does not show improvement over the baseline.\n restore_best_weights: bool, default=None\n Whether to restore model weights from the epoch with the best\n value of the monitored quantity. If `False`, the model weights\n obtained at the last step of training are used.\n\n Attributes\n ----------\n best: float\n best metric\n stopped_epoch: int\n epoch when the training stopped\n\n Examples\n --------\n >>> from pytorch_widedeep.callbacks import EarlyStopping\n >>> from pytorch_widedeep.models import TabMlp, Wide, WideDeep\n >>> from pytorch_widedeep.training import Trainer\n >>>\n >>> embed_input = [(u, i, j) for u, i, j in zip(["a", "b", "c"][:4], [4] * 3, [8] * 3)]\n >>> column_idx = {k: v for v, k in enumerate(["a", "b", "c"])}\n >>> wide = Wide(10, 1)\n >>> deep = TabMlp(mlp_hidden_dims=[8, 4], column_idx=column_idx, cat_embed_input=embed_input)\n >>> model = WideDeep(wide, deep)\n >>> trainer = Trainer(model, objective="regression", callbacks=[EarlyStopping(patience=10)])\n ' def __init__(self, monitor: str='val_loss', min_delta: float=0.0, patience: int=10, verbose: int=0, mode: str='auto', baseline: Optional[float]=None, restore_best_weights: bool=False): super(EarlyStopping, self).__init__() self.monitor = monitor self.min_delta = min_delta self.patience = patience self.verbose = verbose self.mode = mode self.baseline = baseline self.restore_best_weights = restore_best_weights self.wait = 0 self.stopped_epoch = 0 self.state_dict = None if (self.mode not in ['auto', 'min', 'max']): warnings.warn(('EarlyStopping mode %s is unknown, fallback to auto mode.' % self.mode), RuntimeWarning) self.mode = 'auto' if (self.mode == 'min'): self.monitor_op = np.less elif (self.mode == 'max'): self.monitor_op = np.greater elif _is_metric(self.monitor): self.monitor_op = np.greater else: self.monitor_op = np.less if (self.monitor_op == np.greater): self.min_delta *= 1 else: self.min_delta *= (- 1) def on_train_begin(self, logs: Optional[Dict]=None): self.wait = 0 self.stopped_epoch = 0 if (self.baseline is not None): self.best = self.baseline else: self.best = (np.Inf if (self.monitor_op == np.less) else (- np.Inf)) def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None): current = self.get_monitor_value(logs) if (current is None): return if self.monitor_op((current - self.min_delta), self.best): self.best = current self.wait = 0 self.best_epoch = epoch if self.restore_best_weights: self.state_dict = copy.deepcopy(self.model.state_dict()) else: self.wait += 1 if (self.wait >= self.patience): self.stopped_epoch = epoch self.trainer.early_stop = True def on_train_end(self, logs: Optional[Dict]=None): if ((self.stopped_epoch > 0) and (self.verbose > 0)): print(f'Best Epoch: {(self.best_epoch + 1)}. Best {self.monitor}: {self.best:.5f}') if (self.restore_best_weights and (self.state_dict is not None)): if (self.verbose > 0): print('Restoring model weights from the end of the best epoch') self.model.load_state_dict(self.state_dict) def get_monitor_value(self, logs): monitor_value = logs.get(self.monitor) if (monitor_value is None): warnings.warn(('Early stopping conditioned on metric `%s` which is not available. Available metrics are: %s' % (self.monitor, ','.join(list(logs.keys())))), RuntimeWarning) return monitor_value def __getstate__(self): d = self.__dict__ self_dict = {k: d[k] for k in d if (k not in ['trainer', 'model'])} return self_dict def __setstate__(self, state): self.__dict__ = state
def get_class_weights(dataset: WideDeepDataset) -> Tuple[(np.ndarray, int, int)]: "Helper function to get weights of classes in the imbalanced dataset.\n\n Parameters\n ----------\n dataset: `WideDeepDataset`\n dataset containing target classes in dataset.Y\n\n Returns\n ----------\n weights: array\n numpy array with weights\n minor_class_count: int\n count of samples in the smallest class for undersampling\n num_classes: int\n number of classes\n\n Other Parameters\n ----------------\n **kwargs: Dict\n This can include any parameter that can be passed to the _'standard'_\n pytorch[DataLoader]\n (https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader)\n and that is not already explicitely passed to the class.\n " weights = (1 / np.unique(dataset.Y, return_counts=True)[1]) minor_class_count = min(np.unique(dataset.Y, return_counts=True)[1]) num_classes = len(np.unique(dataset.Y)) return (weights, minor_class_count, num_classes)
class DataLoaderDefault(DataLoader): def __init__(self, dataset: WideDeepDataset, batch_size: int, num_workers: int, **kwargs): self.with_lds = dataset.with_lds super().__init__(dataset=dataset, batch_size=batch_size, num_workers=num_workers, **kwargs)
class DataLoaderImbalanced(DataLoader): "Class to load and shuffle batches with adjusted weights for imbalanced\n datasets. If the classes do not begin from 0 remapping is necessary. See\n [here](https://towardsdatascience.com/pytorch-tabular-multiclass-classification-9f8211a123ab).\n\n Parameters\n ----------\n dataset: `WideDeepDataset`\n see `pytorch_widedeep.training._wd_dataset`\n batch_size: int\n size of batch\n num_workers: int\n number of workers\n\n Other Parameters\n ----------------\n **kwargs: Dict\n This can include any parameter that can be passed to the _'standard'_\n pytorch\n [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader)\n and that is not already explicitely passed to the class. In addition,\n the dictionary can also include the extra parameter `oversample_mul` which\n will multiply the number of samples of the minority class to be sampled by\n the [`WeightedRandomSampler`](https://pytorch.org/docs/stable/data.html#torch.utils.data.WeightedRandomSampler).\n\n In other words, the `num_samples` param in `WeightedRandomSampler` will be defined as:\n\n $$\n minority \\space class \\space count \\times number \\space of \\space classes \\times oversample\\_mul\n $$\n " def __init__(self, dataset: WideDeepDataset, batch_size: int, num_workers: int, **kwargs): self.with_lds = dataset.with_lds if ('oversample_mul' in kwargs): oversample_mul = kwargs['oversample_mul'] del kwargs['oversample_mul'] else: oversample_mul = 1 (weights, minor_cls_cnt, num_clss) = get_class_weights(dataset) num_samples = int(((minor_cls_cnt * num_clss) * oversample_mul)) samples_weight = list(np.array([weights[i] for i in dataset.Y])) sampler = WeightedRandomSampler(samples_weight, num_samples, replacement=True) super().__init__(dataset, batch_size, num_workers=num_workers, sampler=sampler, **kwargs)
def load_bio_kdd04(as_frame: bool=False) -> Union[(np.ndarray, pd.DataFrame)]: 'Load and return the higly imbalanced binary classification Protein Homology\n Dataset from [KDD cup 2004](https://www.kdd.org/kdd-cup/view/kdd-cup-2004/Data).\n This datasets include only bio_train.dat part of the dataset\n\n\n * The first element of each line is a BLOCK ID that denotes to which native sequence\n this example belongs. There is a unique BLOCK ID for each native sequence.\n BLOCK IDs are integers running from 1 to 303 (one for each native sequence,\n i.e. for each query). BLOCK IDs were assigned before the blocks were split\n into the train and test sets, so they do not run consecutively in either file.\n * The second element of each line is an EXAMPLE ID that uniquely describes\n the example. You will need this EXAMPLE ID and the BLOCK ID when you submit results.\n * The third element is the class of the example. Proteins that are homologous to\n the native sequence are denoted by 1, non-homologous proteins (i.e. decoys) by 0.\n Test examples have a "?" in this position.\n * All following elements are feature values. There are 74 feature values in each line.\n The features describe the match (e.g. the score of a sequence alignment) between\n the native protein sequence and the sequence that is tested for homology.\n ' with resources.path('pytorch_widedeep.datasets.data', 'bio_train.parquet.brotli') as fpath: df = pd.read_parquet(fpath) if as_frame: return df else: return df.to_numpy()
def load_adult(as_frame: bool=False) -> Union[(np.ndarray, pd.DataFrame)]: 'Load and return the higly imbalanced binary classification [adult income datatest](http://www.cs.toronto.edu/~delve/data/adult/desc.html).\n you may find detailed description [here](http://www.cs.toronto.edu/~delve/data/adult/adultDetail.html)\n ' with resources.path('pytorch_widedeep.datasets.data', 'adult.parquet.brotli') as fpath: df = pd.read_parquet(fpath) if as_frame: return df else: return df.to_numpy()
def load_ecoli(as_frame: bool=False) -> Union[(np.ndarray, pd.DataFrame)]: 'Load and return the higly imbalanced multiclass classification e.coli dataset\n Dataset from [UCI Machine learning Repository](https://archive.ics.uci.edu/ml/datasets/ecoli).\n\n\n 1. Title: Protein Localization Sites\n\n 2. Creator and Maintainer:\n Kenta Nakai\n Institue of Molecular and Cellular Biology\n Osaka, University\n 1-3 Yamada-oka, Suita 565 Japan\n nakai@imcb.osaka-u.ac.jp\n http://www.imcb.osaka-u.ac.jp/nakai/psort.html\n Donor: Paul Horton (paulh@cs.berkeley.edu)\n Date: September, 1996\n See also: yeast database\n\n 3. Past Usage.\n Reference: "A Probablistic Classification System for Predicting the Cellular\n Localization Sites of Proteins", Paul Horton & Kenta Nakai,\n Intelligent Systems in Molecular Biology, 109-115.\n St. Louis, USA 1996.\n Results: 81% for E.coli with an ad hoc structured\n probability model. Also similar accuracy for Binary Decision Tree and\n Bayesian Classifier methods applied by the same authors in\n unpublished results.\n\n Predicted Attribute: Localization site of protein. ( non-numeric ).\n\n 4. The references below describe a predecessor to this dataset and its\n development. They also give results (not cross-validated) for classification\n by a rule-based expert system with that version of the dataset.\n\n Reference: "Expert Sytem for Predicting Protein Localization Sites in\n Gram-Negative Bacteria", Kenta Nakai & Minoru Kanehisa,\n PROTEINS: Structure, Function, and Genetics 11:95-110, 1991.\n\n Reference: "A Knowledge Base for Predicting Protein Localization Sites in\n Eukaryotic Cells", Kenta Nakai & Minoru Kanehisa,\n Genomics 14:897-911, 1992.\n\n 5. Number of Instances: 336 for the E.coli dataset and\n\n 6. Number of Attributes.\n for E.coli dataset: 8 ( 7 predictive, 1 name )\n\n 7. Attribute Information.\n\n 1. Sequence Name: Accession number for the SWISS-PROT database\n 2. mcg: McGeoch\'s method for signal sequence recognition.\n 3. gvh: von Heijne\'s method for signal sequence recognition.\n 4. lip: von Heijne\'s Signal Peptidase II consensus sequence score.\n Binary attribute.\n 5. chg: Presence of charge on N-terminus of predicted lipoproteins.\n Binary attribute.\n 6. aac: score of discriminant analysis of the amino acid content of\n outer membrane and periplasmic proteins.\n 7. alm1: score of the ALOM membrane spanning region prediction program.\n 8. alm2: score of ALOM program after excluding putative cleavable signal\n regions from the sequence.\n\n 8. Missing Attribute Values: None.\n\n 9. Class Distribution. The class is the localization site. Please see Nakai & Kanehisa referenced above for more details.\n\n cp (cytoplasm) 143\n im (inner membrane without signal sequence) 77\n pp (perisplasm) 52\n imU (inner membrane, uncleavable signal sequence) 35\n om (outer membrane) 20\n omL (outer membrane lipoprotein) 5\n imL (inner membrane lipoprotein) 2\n imS (inner membrane, cleavable signal sequence) 2\n ' with resources.path('pytorch_widedeep.datasets.data', 'ecoli.parquet.brotli') as fpath: df = pd.read_parquet(fpath) if as_frame: return df else: return df.to_numpy()
def load_california_housing(as_frame: bool=False) -> Union[(np.ndarray, pd.DataFrame)]: 'Load and return the higly imbalanced regression California housing dataset.\n\n Characteristics:\n Number of Instances: 20640\n Number of Attributes: 8 numeric, predictive attributes and the target\n Attribute Information:\n - MedInc median income in block group\n - HouseAge median house age in block group\n - AveRooms average number of rooms per household\n - AveBedrms average number of bedrooms per household\n - Population block group population\n - AveOccup average number of household members\n - Latitude block group latitude\n - Longitude block group longitude\n\n This dataset was obtained from the StatLib repository.\n https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html\n\n The target variable is the median house value for California districts,\n expressed in hundreds of thousands of dollars ($100,000).\n\n This dataset was derived from the 1990 U.S. census, using one row per census\n block group. A block group is the smallest geographical unit for which the U.S.\n Census Bureau publishes sample data (a block group typically has a population\n of 600 to 3,000 people).\n\n An household is a group of people residing within a home. Since the average\n number of rooms and bedrooms in this dataset are provided per household, these\n columns may take surpinsingly large values for block groups with few households\n and many empty houses, such as vacation resorts.\n\n References\n ----------\n Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,\n Statistics and Probability Letters, 33 (1997) 291-297.\n ' with resources.path('pytorch_widedeep.datasets.data', 'california_housing.parquet.brotli') as fpath: df = pd.read_parquet(fpath) if as_frame: return df else: return df.to_numpy()
def load_birds(as_frame: bool=False) -> Union[(np.ndarray, pd.DataFrame)]: 'Load and return the multi-label classification bird dataset.\n\n References\n ----------\n http://mulan.sourceforge.net/datasets-mlc.html\n\n F. Briggs, Yonghong Huang, R. Raich, K. Eftaxias, Zhong Lei, W. Cukierski, S. Hadley, A. Hadley,\n M. Betts, X. Fern, J. Irvine, L. Neal, A. Thomas, G. Fodor, G. Tsoumakas, Hong Wei Ng,\n Thi Ngoc Tho Nguyen, H. Huttunen, P. Ruusuvuori, T. Manninen, A. Diment, T. Virtanen,\n J. Marzat, J. Defretin, D. Callender, C. Hurlburt, K. Larrey, M. Milakov.\n "The 9th annual MLSP competition: New methods for acoustic classification of multiple\n simultaneous bird species in a noisy environment", in proc. 2013 IEEE International Workshop\n on Machine Learning for Signal Processing (MLSP)\n ' with resources.path('pytorch_widedeep.datasets.data', 'birds_train.parquet.brotli') as fpath: df = pd.read_parquet(fpath) if as_frame: return df else: return df.to_numpy()
def load_rf1(as_frame: bool=False) -> Union[(np.ndarray, pd.DataFrame)]: 'Load and return the multi-target regression River Flow(RF1) dataset.\n\n Characterisctics:\n The river flow data set (RF1) concerns a prediction task in which flows in a river network are\n predicted for 48 hours in the future at 8 different locations in the Mississippi River network\n in the United States [18]. RF1 is one of the multi-target regression problems listed in the\n literature survey on multi-target regression problems by Borchani et al. [2], and therefore\n serves as a good test case for the active learning algorithm. Each row includes the most recent\n observation for each of the 8 sites as well as time-lagged observations from 6, 12, 18, 24, 36,\n 48 and 60 hours in the past. Therefore, the data set consists in total of 64 attribute variables\n and 8 target variables. The data set contains over 1 year of hourly observations (over 9000\n data points) collected from September 2011 to September 2012 by the US National Weather\n Service. From these 9000 data points, 1000 points have been randomly sampled for training\n and 2000 for evaluation.\n ' with resources.path('pytorch_widedeep.datasets.data', 'rf1_train.parquet.brotli') as fpath: df = pd.read_parquet(fpath) if as_frame: return df else: return df.to_numpy()
def load_womens_ecommerce(as_frame: bool=False) -> Union[(np.ndarray, pd.DataFrame)]: '\n Context\n This is a Women’s Clothing E-Commerce dataset revolving around the reviews written by customers.\n Its nine supportive features offer a great environment to parse out the text through its multiple\n dimensions. Because this is real commercial data, it has been anonymized, and references to the company\n in the review text and body have been replaced with “retailer”.\n\n Content\n This dataset includes 23486 rows and 10 feature variables. Each row corresponds to a customer review,\n and includes the variables:\n\n Clothing ID: Integer Categorical variable that refers to the specific piece being reviewed.\n Age: Positive Integer variable of the reviewers age.\n Title: String variable for the title of the review.\n Review Text: String variable for the review body.\n Rating: Positive Ordinal Integer variable for the product score granted by the customer from\n 1 Worst, to 5 Best.\n Recommended IND: Binary variable stating where the customer recommends the product where 1 is recommended,\n 0 is not recommended.\n Positive Feedback Count: Positive Integer documenting the number of other customers who found this\n review positive.\n Division Name: Categorical name of the product high level division.\n Department Name: Categorical name of the product department name.\n Class Name: Categorical name of the product class name.\n ' with resources.path('pytorch_widedeep.datasets.data', 'WomensClothingE-CommerceReviews.parquet.brotli') as fpath: df = pd.read_parquet(fpath) if as_frame: return df else: return df.to_numpy()
def load_movielens100k(as_frame: bool=False) -> Union[(Tuple[(np.ndarray, np.ndarray, np.ndarray)], Tuple[(pd.DataFrame, pd.DataFrame, pd.DataFrame)])]: 'Load and return the MovieLens 100k dataset in 3 separate files.\n\n SUMMARY & USAGE LICENSE:\n =============================================\n MovieLens data sets were collected by the GroupLens Research Project\n at the University of Minnesota.\n\n This data set consists of:\n * 100,000 ratings (1-5) from 943 users on 1682 movies.\n * Each user has rated at least 20 movies.\n * Simple demographic info for the users (age, gender, occupation, zip)\n\n The data was collected through the MovieLens web site\n (movielens.umn.edu) during the seven-month period from September 19th,\n 1997 through April 22nd, 1998. This data has been cleaned up - users\n who had less than 20 ratings or did not have complete demographic\n information were removed from this data set. Detailed descriptions of\n the data file can be found at the end of this file.\n\n Neither the University of Minnesota nor any of the researchers\n involved can guarantee the correctness of the data, its suitability\n for any particular purpose, or the validity of results based on the\n use of the data set. The data set may be used for any research\n purposes under the following conditions:\n\n * The user may not state or imply any endorsement from the\n University of Minnesota or the GroupLens Research Group.\n\n * The user must acknowledge the use of the data set in\n publications resulting from the use of the data set\n (see below for citation information).\n\n * The user may not redistribute the data without separate\n permission.\n\n * The user may not use this information for any commercial or\n revenue-bearing purposes without first obtaining permission\n from a faculty member of the GroupLens Research Project at the\n University of Minnesota.\n\n If you have any further questions or comments, please contact GroupLens\n <grouplens-info@cs.umn.edu>.\n\n CITATION:\n =============================================\n To acknowledge use of the dataset in publications, please cite the\n following paper:\n\n F. Maxwell Harper and Joseph A. Konstan. 2015. The MovieLens Datasets:\n History and Context. ACM Transactions on Interactive Intelligent\n Systems (TiiS) 5, 4, Article 19 (December 2015), 19 pages.\n DOI=http://dx.doi.org/10.1145/2827872\n\n Returns\n -------\n df_data: Union[np.ndarray, pd.DataFrame]\n The full u data set, 100000 ratings by 943 users on 1682 items.\n Each user has rated at least 20 movies. Users and items are\n numbered consecutively from 1. The data is randomly\n ordered. The time stamps are unix seconds since 1/1/1970 UTC\n df_items: Union[np.ndarray, pd.DataFrame]\n Information about the items (movies).\n The last 19 fields are the genres, a 1 indicates the movie\n is of that genre, a 0 indicates it is not; movies can be in\n several genres at once.\n The movie ids are the ones used in the df_data data set.\n df_users: Union[np.ndarray, pd.DataFrame]\n Demographic information about the users.\n The user ids are the ones used in the df_data data set.\n ' with resources.path('pytorch_widedeep.datasets.data', 'MovieLens100k_data.parquet.brotli') as fpath: df_data = pd.read_parquet(fpath) with resources.path('pytorch_widedeep.datasets.data', 'MovieLens100k_items.parquet.brotli') as fpath: df_items = pd.read_parquet(fpath) with resources.path('pytorch_widedeep.datasets.data', 'MovieLens100k_users.parquet.brotli') as fpath: df_users = pd.read_parquet(fpath) if as_frame: return (df_data, df_users, df_items) else: return (df_data.to_numpy(), df_users.to_numpy(), df_items.to_numpy())
class Initializer(object): def __call__(self, submodel: nn.Module): raise NotImplementedError('Initializer must implement this method')
class MultipleInitializer(object): def __init__(self, initializers: Dict[(str, Union[(Initializer, object)])], verbose=True): self.verbose = verbose instantiated_initializers = {} for (model_name, initializer) in initializers.items(): if isinstance(initializer, type): instantiated_initializers[model_name] = initializer() else: instantiated_initializers[model_name] = initializer self._initializers = instantiated_initializers def apply(self, submodel: nn.Module): for (name, child) in submodel.named_children(): try: self._initializers[name](child) except KeyError: if self.verbose: warnings.warn('No initializer found for {}'.format(name), UserWarning)
class Normal(Initializer): def __init__(self, mean=0.0, std=1.0, bias=False, pattern='.'): self.mean = mean self.std = std self.bias = bias self.pattern = pattern super(Normal, self).__init__() def __call__(self, submodel: nn.Module): for (n, p) in submodel.named_parameters(): if re.search(self.pattern, n): if (self.bias and ('bias' in n)): nn.init.normal_(p, mean=self.mean, std=self.std) elif ('bias' in n): pass elif p.requires_grad: nn.init.normal_(p, mean=self.mean, std=self.std)
class Uniform(Initializer): def __init__(self, a=0, b=1, bias=False, pattern='.'): self.a = a self.b = b self.bias = bias self.pattern = pattern super(Uniform, self).__init__() def __call__(self, submodel: nn.Module): for (n, p) in submodel.named_parameters(): if re.search(self.pattern, n): if (self.bias and ('bias' in n)): nn.init.uniform_(p, a=self.a, b=self.b) elif ('bias' in n): pass elif p.requires_grad: nn.init.uniform_(p, a=self.a, b=self.b)
class ConstantInitializer(Initializer): def __init__(self, value, bias=False, pattern='.'): self.bias = bias self.value = value self.pattern = pattern super(ConstantInitializer, self).__init__() def __call__(self, submodel: nn.Module): for (n, p) in submodel.named_parameters(): if re.search(self.pattern, n): if (self.bias and ('bias' in n)): nn.init.constant_(p, val=self.value) elif ('bias' in n): pass elif p.requires_grad: nn.init.constant_(p, val=self.value)
class XavierUniform(Initializer): def __init__(self, gain=1, pattern='.'): self.gain = gain self.pattern = pattern super(XavierUniform, self).__init__() def __call__(self, submodel: nn.Module): for (n, p) in submodel.named_parameters(): if re.search(self.pattern, n): if ('bias' in n): nn.init.constant_(p, val=0) elif p.requires_grad: try: nn.init.xavier_uniform_(p, gain=self.gain) except Exception: pass
class XavierNormal(Initializer): def __init__(self, gain=1, pattern='.'): self.gain = gain self.pattern = pattern super(XavierNormal, self).__init__() def __call__(self, submodel: nn.Module): for (n, p) in submodel.named_parameters(): if re.search(self.pattern, n): if ('bias' in n): nn.init.constant_(p, val=0) elif p.requires_grad: try: nn.init.xavier_normal_(p, gain=self.gain) except Exception: pass
class KaimingUniform(Initializer): def __init__(self, a=0, mode='fan_in', nonlinearity='leaky_relu', pattern='.'): self.a = a self.mode = mode self.nonlinearity = nonlinearity self.pattern = pattern super(KaimingUniform, self).__init__() def __call__(self, submodel: nn.Module): for (n, p) in submodel.named_parameters(): if re.search(self.pattern, n): if ('bias' in n): nn.init.constant_(p, val=0) elif p.requires_grad: try: nn.init.kaiming_normal_(p, a=self.a, mode=self.mode, nonlinearity=self.nonlinearity) except Exception: pass
class KaimingNormal(Initializer): def __init__(self, a=0, mode='fan_in', nonlinearity='leaky_relu', pattern='.'): self.a = a self.mode = mode self.nonlinearity = nonlinearity self.pattern = pattern super(KaimingNormal, self).__init__() def __call__(self, submodel: nn.Module): for (n, p) in submodel.named_parameters(): if re.search(self.pattern, n): if ('bias' in n): nn.init.constant_(p, val=0) elif p.requires_grad: try: nn.init.kaiming_normal_(p, a=self.a, mode=self.mode, nonlinearity=self.nonlinearity) except Exception: pass
class Orthogonal(Initializer): def __init__(self, gain=1, pattern='.'): self.gain = gain self.pattern = pattern super(Orthogonal, self).__init__() def __call__(self, submodel: nn.Module): for (n, p) in submodel.named_parameters(): if re.search(self.pattern, n): if ('bias' in n): nn.init.constant_(p, val=0) elif p.requires_grad: try: nn.init.orthogonal_(p, gain=self.gain) except Exception: pass
class TabFromFolder(): '\n This class is used to load tabular data from disk. The current constrains are:\n\n 1. The only file format supported right now is csv\n 2. The csv file must contain headers\n\n For examples, please, see the examples folder in the repo.\n\n Parameters\n ----------\n fname: str\n the name of the csv file\n directory: str, Optional, default = None\n the path to the directory where the csv file is located. If None,\n a `TabFromFolder` reference object must be provided\n target_col: str, Optional, default = None\n the name of the target column. If None, a `TabFromFolder` reference\n object must be provided\n preprocessor: `TabularPreprocessor`, Optional, default = None\n a fitted `TabularPreprocessor` object. If None, a `TabFromFolder`\n reference object must be provided\n text_col: str, Optional, default = None\n the name of the column with the texts themselves or the names of the\n files that contain the text dataset. If None, either there is no text\n column or a `TabFromFolder` reference object must be provided\n img_col: str, Optional, default = None\n the name of the column with the the names of the images. If None,\n either there is no image column or a `TabFromFolder` reference object\n must be provided\n ignore_target: bool, default = False\n whether to ignore the target column. This is normally set to True when\n this class is used for a test dataset.\n reference: `TabFromFolder`, Optional, default = None\n a reference `TabFromFolder` object. If provided, the `TabFromFolder`\n object will be created using the attributes of the reference object.\n This is useful to instantiate a `TabFromFolder` object for evaluation\n or test purposes\n verbose: int, default = 1\n verbosity. If 0, no output will be printed during the process.\n ' def __init__(self, fname: str, directory: Optional[str]=None, target_col: Optional[str]=None, preprocessor: Optional[TabularPreprocessor]=None, text_col: Optional[str]=None, img_col: Optional[str]=None, ignore_target: bool=False, reference: Type['TabFromFolder']=None, verbose: Optional[int]=1): self.fname = fname self.ignore_target = ignore_target self.verbose = verbose if (reference is not None): (self.directory, self.target_col, self.preprocessor, self.text_col, self.img_col) = self._set_from_reference(reference, preprocessor) else: assert ((directory is not None) and ((target_col is not None) and (not ignore_target)) and (preprocessor is not None)), "if no reference is provided, 'directory', 'target_col' and 'preprocessor' must be provided" self.directory = directory self.target_col = target_col self.preprocessor = preprocessor self.text_col = text_col self.img_col = img_col assert self.preprocessor.is_fitted, 'The preprocessor must be fitted before passing it to this class' def get_item(self, idx: int) -> Tuple[(np.ndarray, str, str, Optional[Union[(int, float)]])]: path = os.path.join(self.directory, self.fname) try: if (not hasattr(self, 'colnames')): self.colnames = pd.read_csv(path, nrows=0).columns.tolist() _sample = pd.read_csv(path, skiprows=(lambda x: (x != (idx + 1))), header=None).values sample = pd.DataFrame(_sample, columns=self.colnames) except Exception: raise ValueError('Currently only csv format is supported.') text_fname_or_text: str = (sample[self.text_col].to_list()[0] if (self.text_col is not None) else None) img_fname: str = (sample[self.img_col].to_list()[0] if (self.img_col is not None) else None) processed_sample = self.preprocessor.transform_sample(sample) if (not self.ignore_target): target = sample[self.target_col].to_list()[0] else: target = None return (processed_sample, text_fname_or_text, img_fname, target) def _set_from_reference(self, reference: Type['TabFromFolder'], preprocessor: Optional[TabularPreprocessor]) -> Tuple[(str, str, TabularPreprocessor, Optional[str], Optional[str])]: (directory, target_col, _preprocessor, text_col, img_col) = self._get_from_reference(reference) if (preprocessor is not None): preprocessor = preprocessor if self.verbose: UserWarning('The preprocessor from the reference object is overwritten by the provided preprocessor') else: preprocessor = _preprocessor return (directory, target_col, preprocessor, text_col, img_col) @staticmethod def _get_from_reference(reference: Type['TabFromFolder']) -> Tuple[(str, str, TabularPreprocessor, Optional[str], Optional[str])]: return (reference.directory, reference.target_col, reference.preprocessor, reference.text_col, reference.img_col) def __repr__(self) -> str: list_of_params: List[str] = [] if (self.fname is not None): list_of_params.append('fname={self.fname}') if (self.directory is not None): list_of_params.append('directory={directory}') if (self.target_col is not None): list_of_params.append('target_col={target_col}') if (self.preprocessor is not None): list_of_params.append(f'preprocessor={self.preprocessor.__class__.__name__}') if (self.text_col is not None): list_of_params.append('text_col={text_col}') if (self.img_col is not None): list_of_params.append('img_col={img_col}') if (self.ignore_target is not None): list_of_params.append('ignore_target={ignore_target}') if (self.verbose is not None): list_of_params.append('verbose={verbose}') all_params = ', '.join(list_of_params) return f'self.__class__.__name__({all_params.format(**self.__dict__)})'
class WideFromFolder(TabFromFolder): '\n This class is mostly identical to `TabFromFolder` but exists because we\n want to separate the treatment of the wide and the deep tabular\n components\n\n Parameters\n ----------\n fname: str\n the name of the csv file\n directory: str, Optional, default = None\n the path to the directory where the csv file is located. If None,\n a `WideFromFolder` reference object must be provided\n target_col: str, Optional, default = None\n the name of the target column. If None, a `WideFromFolder` reference\n object must be provided\n preprocessor: `TabularPreprocessor`, Optional, default = None\n a fitted `TabularPreprocessor` object. If None, a `WideFromFolder`\n reference object must be provided\n text_col: str, Optional, default = None\n the name of the column with the texts themselves or the names of the\n files that contain the text dataset. If None, either there is no text\n column or a `WideFromFolder` reference object must be provided=\n img_col: str, Optional, default = None\n the name of the column with the the names of the images. If None,\n either there is no image column or a `WideFromFolder` reference object\n must be provided\n ignore_target: bool, default = False\n whether to ignore the target column. This is normally used when this\n class is used for a test dataset.\n reference: `WideFromFolder`, Optional, default = None\n a reference `WideFromFolder` object. If provided, the `WideFromFolder`\n object will be created using the attributes of the reference object.\n This is useful to instantiate a `WideFromFolder` object for evaluation\n or test purposes\n verbose: int, default = 1\n verbosity. If 0, no output will be printed during the process.\n ' def __init__(self, fname: str, directory: Optional[str]=None, target_col: Optional[str]=None, preprocessor: Optional[TabularPreprocessor]=None, text_col: Optional[str]=None, img_col: Optional[str]=None, ignore_target: bool=False, reference: Type['WideFromFolder']=None, verbose: int=1): super(WideFromFolder, self).__init__(fname=fname, directory=directory, target_col=target_col, preprocessor=preprocessor, text_col=text_col, img_col=img_col, reference=reference, ignore_target=ignore_target, verbose=verbose)
class TextFromFolder(): '\n This class is used to load the text dataset (i.e. the text files) from a\n folder, or to retrieve the text given a texts column specified within the\n preprocessor object.\n\n For examples, please, see the examples folder in the repo.\n\n Parameters\n ----------\n preprocessor: Union[TextPreprocessor, ChunkTextPreprocessor]\n The preprocessor used to process the text. It must be fitted before using\n this class\n ' def __init__(self, preprocessor: Union[(TextPreprocessor, ChunkTextPreprocessor)]): assert preprocessor.is_fitted, 'The preprocessor must be fitted before using this class' self.preprocessor = preprocessor def get_item(self, text: str) -> np.ndarray: if (isinstance(self.preprocessor, ChunkTextPreprocessor) and (self.preprocessor.root_dir is not None)): path = os.path.join(self.preprocessor.root_dir, text) with open(path, 'r') as f: sample = f.read().replace('\n', '') else: sample = text processed_sample = self.preprocessor.transform_sample(sample) return processed_sample def __repr__(self): return f'{self.__class__.__name__}({self.preprocessor.__class__.__name__})'
class WideDeepDatasetFromFolder(Dataset): '\n This class is the Dataset counterpart of the `WideDeepDataset` class.\n\n Given a reference tabular dataset, with columns that indicate the path to\n the images and to the text files or the texts themselves, it will use the\n `[...]FromFolder` classes to load the data consistently from disk per batch.\n\n For examples, please, see the examples folder in the repo.\n\n Parameters\n ----------\n n_samples: int\n Number of samples in the dataset\n tab_from_folder: TabFromFolder\n Instance of the TabFromFolder class\n wide_from_folder: Optional[WideFromFolder], default = None\n Instance of the WideFromFolder class\n text_from_folder: Optional[TextFromFolder], default = None\n Instance of the TextFromFolder class\n img_from_folder: Optional[ImageFromFolder], default = None\n Instance of the ImageFromFolder class\n reference: Type["WideDeepDatasetFromFolder"], default = None\n If not None, the \'text_from_folder\' and \'img_from_folder\' objects will\n be retrieved from the reference class. This is useful when we want to\n use a `WideDeepDatasetFromFolder` class used for a train dataset as a\n reference for the validation and test datasets. In this case, the\n `text_from_folder` and `img_from_folder` objects will be the same for\n all three datasets, so there is no need to create a new instance for\n each dataset.\n ' def __init__(self, n_samples: int, tab_from_folder: Optional[TabFromFolder]=None, wide_from_folder: Optional[WideFromFolder]=None, text_from_folder: Optional[TextFromFolder]=None, img_from_folder: Optional[ImageFromFolder]=None, reference: Type['WideDeepDatasetFromFolder']=None): super(WideDeepDatasetFromFolder, self).__init__() if ((tab_from_folder is None) and (wide_from_folder is None)): raise ValueError("Either 'tab_from_folder' or 'wide_from_folder' must be not None") if (reference is not None): assert ((img_from_folder is None) and (text_from_folder is None)), "If reference is not None, 'img_from_folder' and 'text_from_folder' must be None" (self.text_from_folder, self.img_from_folder) = self._get_from_reference(reference) else: self.text_from_folder = text_from_folder self.img_from_folder = img_from_folder self.n_samples = n_samples self.tab_from_folder = tab_from_folder self.wide_from_folder = wide_from_folder def __getitem__(self, idx: int): x = Bunch() if (self.tab_from_folder is not None): (X_tab, text_fname_or_text, img_fname, y) = self.tab_from_folder.get_item(idx=idx) x.deeptabular = X_tab if (self.wide_from_folder is not None): if (self.tab_from_folder is None): (X_wide, text_fname_or_text, img_fname, y) = self.wide_from_folder.get_item(idx=idx) else: (X_wide, _, _, _) = self.wide_from_folder.get_item(idx=idx) x.wide = X_wide if (text_fname_or_text is not None): X_text = self.text_from_folder.get_item(text_fname_or_text) x.deeptext = X_text if (img_fname is not None): X_img = self.img_from_folder.get_item(img_fname) x.deepimage = X_img if (y is not None): return (x, y) else: return x def __len__(self): return self.n_samples @staticmethod def _get_from_reference(reference: Type['WideDeepDatasetFromFolder']) -> Tuple[(Optional[TextFromFolder], Optional[ImageFromFolder])]: return (reference.text_from_folder, reference.img_from_folder) def __repr__(self) -> str: list_of_params: List[str] = [] list_of_params.append('n_samples={n_samples}') if (self.tab_from_folder is not None): list_of_params.append(f'tab_from_folder={self.tab_from_folder.__class__.__name__}') if (self.wide_from_folder is not None): list_of_params.append(f'wide_from_folder={self.wide_from_folder.__class__.__name__}') if (self.text_from_folder is not None): list_of_params.append(f'text_from_folder={self.text_from_folder.__class__.__name__}') if (self.img_from_folder is not None): list_of_params.append(f'img_from_folder={self.img_from_folder.__class__.__name__}') all_params = ', '.join(list_of_params) return f'WideDeepDatasetFromFolder({all_params.format(**self.__dict__)})'
class Metric(object): def __init__(self): self._name = '' def reset(self): raise NotImplementedError('Custom Metrics must implement this function') def __call__(self, y_pred: Tensor, y_true: Tensor): raise NotImplementedError('Custom Metrics must implement this function')
class MultipleMetrics(object): def __init__(self, metrics: List[Union[(Metric, object)]], prefix: str=''): instantiated_metrics = [] for metric in metrics: if isinstance(metric, type): instantiated_metrics.append(metric()) else: instantiated_metrics.append(metric) self._metrics = instantiated_metrics self.prefix = prefix def reset(self): for metric in self._metrics: metric.reset() def __call__(self, y_pred: Tensor, y_true: Tensor) -> Dict: logs = {} for metric in self._metrics: if isinstance(metric, Metric): logs[(self.prefix + metric._name)] = metric(y_pred, y_true) elif isinstance(metric, TorchMetric): metric.update(y_pred, y_true.int()) logs[(self.prefix + type(metric).__name__)] = metric.compute().detach().cpu().numpy() return logs
class Accuracy(Metric): 'Class to calculate the accuracy for both binary and categorical problems\n\n Parameters\n ----------\n top_k: int, default = 1\n Accuracy will be computed using the top k most likely classes in\n multiclass problems\n\n Examples\n --------\n >>> import torch\n >>>\n >>> from pytorch_widedeep.metrics import Accuracy\n >>>\n >>> acc = Accuracy()\n >>> y_true = torch.tensor([0, 1, 0, 1]).view(-1, 1)\n >>> y_pred = torch.tensor([[0.3, 0.2, 0.6, 0.7]]).view(-1, 1)\n >>> acc(y_pred, y_true)\n array(0.5)\n >>>\n >>> acc = Accuracy(top_k=2)\n >>> y_true = torch.tensor([0, 1, 2])\n >>> y_pred = torch.tensor([[0.3, 0.5, 0.2], [0.1, 0.1, 0.8], [0.1, 0.5, 0.4]])\n >>> acc(y_pred, y_true)\n array(0.66666667)\n ' def __init__(self, top_k: int=1): super(Accuracy, self).__init__() self.top_k = top_k self.correct_count = 0 self.total_count = 0 self._name = 'acc' def reset(self): '\n resets counters to 0\n ' self.correct_count = 0 self.total_count = 0 def __call__(self, y_pred: Tensor, y_true: Tensor) -> np.ndarray: num_classes = y_pred.size(1) if (num_classes == 1): y_pred = y_pred.round() y_true = y_true elif (num_classes > 1): y_pred = y_pred.topk(self.top_k, 1)[1] y_true = y_true.view((- 1), 1).expand_as(y_pred) self.correct_count += y_pred.eq(y_true).sum().item() self.total_count += len(y_pred) accuracy = (float(self.correct_count) / float(self.total_count)) return np.array(accuracy)
class Precision(Metric): 'Class to calculate the precision for both binary and categorical problems\n\n Parameters\n ----------\n average: bool, default = True\n This applies only to multiclass problems. if ``True`` calculate\n precision for each label, and finds their unweighted mean.\n\n Examples\n --------\n >>> import torch\n >>>\n >>> from pytorch_widedeep.metrics import Precision\n >>>\n >>> prec = Precision()\n >>> y_true = torch.tensor([0, 1, 0, 1]).view(-1, 1)\n >>> y_pred = torch.tensor([[0.3, 0.2, 0.6, 0.7]]).view(-1, 1)\n >>> prec(y_pred, y_true)\n array(0.5)\n >>>\n >>> prec = Precision(average=True)\n >>> y_true = torch.tensor([0, 1, 2])\n >>> y_pred = torch.tensor([[0.7, 0.1, 0.2], [0.1, 0.1, 0.8], [0.1, 0.5, 0.4]])\n >>> prec(y_pred, y_true)\n array(0.33333334)\n ' def __init__(self, average: bool=True): super(Precision, self).__init__() self.average = average self.true_positives = 0 self.all_positives = 0 self.eps = 1e-20 self._name = 'prec' def reset(self): '\n resets counters to 0\n ' self.true_positives = 0 self.all_positives = 0 def __call__(self, y_pred: Tensor, y_true: Tensor) -> np.ndarray: num_class = y_pred.size(1) if (num_class == 1): y_pred = y_pred.round() y_true = y_true elif (num_class > 1): y_true = torch.eye(num_class)[y_true.squeeze().cpu().long()] y_pred = y_pred.topk(1, 1)[1].view((- 1)) y_pred = torch.eye(num_class)[y_pred.cpu().long()] self.true_positives += (y_true * y_pred).sum(dim=0) self.all_positives += y_pred.sum(dim=0) precision = (self.true_positives / (self.all_positives + self.eps)) if self.average: return np.array(precision.mean().item()) else: return precision.detach().cpu().numpy()
class Recall(Metric): 'Class to calculate the recall for both binary and categorical problems\n\n Parameters\n ----------\n average: bool, default = True\n This applies only to multiclass problems. if ``True`` calculate recall\n for each label, and finds their unweighted mean.\n\n Examples\n --------\n >>> import torch\n >>>\n >>> from pytorch_widedeep.metrics import Recall\n >>>\n >>> rec = Recall()\n >>> y_true = torch.tensor([0, 1, 0, 1]).view(-1, 1)\n >>> y_pred = torch.tensor([[0.3, 0.2, 0.6, 0.7]]).view(-1, 1)\n >>> rec(y_pred, y_true)\n array(0.5)\n >>>\n >>> rec = Recall(average=True)\n >>> y_true = torch.tensor([0, 1, 2])\n >>> y_pred = torch.tensor([[0.7, 0.1, 0.2], [0.1, 0.1, 0.8], [0.1, 0.5, 0.4]])\n >>> rec(y_pred, y_true)\n array(0.33333334)\n ' def __init__(self, average: bool=True): super(Recall, self).__init__() self.average = average self.true_positives = 0 self.actual_positives = 0 self.eps = 1e-20 self._name = 'rec' def reset(self): '\n resets counters to 0\n ' self.true_positives = 0 self.actual_positives = 0 def __call__(self, y_pred: Tensor, y_true: Tensor) -> np.ndarray: num_class = y_pred.size(1) if (num_class == 1): y_pred = y_pred.round() y_true = y_true elif (num_class > 1): y_true = torch.eye(num_class)[y_true.squeeze().cpu().long()] y_pred = y_pred.topk(1, 1)[1].view((- 1)) y_pred = torch.eye(num_class)[y_pred.cpu().long()] self.true_positives += (y_true * y_pred).sum(dim=0) self.actual_positives += y_true.sum(dim=0) recall = (self.true_positives / (self.actual_positives + self.eps)) if self.average: return np.array(recall.mean().item()) else: return recall.detach().cpu().numpy()
class FBetaScore(Metric): 'Class to calculate the fbeta score for both binary and categorical problems\n\n $$\n F_{\\beta} = ((1 + {\\beta}^2) * \\frac{(precision * recall)}{({\\beta}^2 * precision + recall)}\n $$\n\n Parameters\n ----------\n beta: int\n Coefficient to control the balance between precision and recall\n average: bool, default = True\n This applies only to multiclass problems. if ``True`` calculate fbeta\n for each label, and find their unweighted mean.\n\n Examples\n --------\n >>> import torch\n >>>\n >>> from pytorch_widedeep.metrics import FBetaScore\n >>>\n >>> fbeta = FBetaScore(beta=2)\n >>> y_true = torch.tensor([0, 1, 0, 1]).view(-1, 1)\n >>> y_pred = torch.tensor([[0.3, 0.2, 0.6, 0.7]]).view(-1, 1)\n >>> fbeta(y_pred, y_true)\n array(0.5)\n >>>\n >>> fbeta = FBetaScore(beta=2)\n >>> y_true = torch.tensor([0, 1, 2])\n >>> y_pred = torch.tensor([[0.7, 0.1, 0.2], [0.1, 0.1, 0.8], [0.1, 0.5, 0.4]])\n >>> fbeta(y_pred, y_true)\n array(0.33333334)\n ' def __init__(self, beta: int, average: bool=True): super(FBetaScore, self).__init__() self.beta = beta self.average = average self.precision = Precision(average=False) self.recall = Recall(average=False) self.eps = 1e-20 self._name = ''.join(['f', str(self.beta)]) def reset(self): '\n resets precision and recall\n ' self.precision.reset() self.recall.reset() def __call__(self, y_pred: Tensor, y_true: Tensor) -> np.ndarray: prec = self.precision(y_pred, y_true) rec = self.recall(y_pred, y_true) beta2 = (self.beta ** 2) fbeta = ((((1 + beta2) * prec) * rec) / (((beta2 * prec) + rec) + self.eps)) if self.average: return np.array(fbeta.mean().item()) else: return fbeta
class F1Score(Metric): 'Class to calculate the f1 score for both binary and categorical problems\n\n Parameters\n ----------\n average: bool, default = True\n This applies only to multiclass problems. if ``True`` calculate f1 for\n each label, and find their unweighted mean.\n\n Examples\n --------\n >>> import torch\n >>>\n >>> from pytorch_widedeep.metrics import F1Score\n >>>\n >>> f1 = F1Score()\n >>> y_true = torch.tensor([0, 1, 0, 1]).view(-1, 1)\n >>> y_pred = torch.tensor([[0.3, 0.2, 0.6, 0.7]]).view(-1, 1)\n >>> f1(y_pred, y_true)\n array(0.5)\n >>>\n >>> f1 = F1Score()\n >>> y_true = torch.tensor([0, 1, 2])\n >>> y_pred = torch.tensor([[0.7, 0.1, 0.2], [0.1, 0.1, 0.8], [0.1, 0.5, 0.4]])\n >>> f1(y_pred, y_true)\n array(0.33333334)\n ' def __init__(self, average: bool=True): super(F1Score, self).__init__() self.average = average self.f1 = FBetaScore(beta=1, average=self.average) self._name = self.f1._name def reset(self): '\n resets counters to 0\n ' self.f1.reset() def __call__(self, y_pred: Tensor, y_true: Tensor) -> np.ndarray: return self.f1(y_pred, y_true)
class R2Score(Metric): '\n Calculates R-Squared, the\n [coefficient of determination](https://en.wikipedia.org/wiki/Coefficient_of_determination>):\n\n $$\n R^2 = 1 - \\frac{\\sum_{j=1}^n(y_j - \\hat{y_j})^2}{\\sum_{j=1}^n(y_j - \\bar{y})^2}\n $$\n\n where $\\hat{y_j}$ is the ground truth, $y_j$ is the predicted value and\n $\\bar{y}$ is the mean of the ground truth.\n\n Examples\n --------\n >>> import torch\n >>>\n >>> from pytorch_widedeep.metrics import R2Score\n >>>\n >>> r2 = R2Score()\n >>> y_true = torch.tensor([3, -0.5, 2, 7]).view(-1, 1)\n >>> y_pred = torch.tensor([2.5, 0.0, 2, 8]).view(-1, 1)\n >>> r2(y_pred, y_true)\n array(0.94860814)\n ' def __init__(self): self.numerator = 0 self.denominator = 0 self.num_examples = 0 self.y_true_sum = 0 self._name = 'r2' def reset(self): '\n resets counters to 0\n ' self.numerator = 0 self.denominator = 0 self.num_examples = 0 self.y_true_sum = 0 def __call__(self, y_pred: Tensor, y_true: Tensor) -> np.ndarray: self.numerator += ((y_pred - y_true) ** 2).sum().item() self.num_examples += y_true.shape[0] self.y_true_sum += y_true.sum().item() y_true_avg = (self.y_true_sum / self.num_examples) self.denominator += ((y_true - y_true_avg) ** 2).sum().item() return np.array((1 - (self.numerator / self.denominator)))
class BaseWDModelComponent(nn.Module): @property def output_dim(self) -> int: return NotImplementedError("All models passed to the WideDeep class must contain an 'output_dim' property or attribute. This is the dimension of the output tensor coming from the backbone model that will be connected to the final prediction layer or fully connected head")
class GEGLU(nn.Module): def forward(self, x): (x, gates) = x.chunk(2, dim=(- 1)) return (x * F.gelu(gates))
class REGLU(nn.Module): def forward(self, x): (x, gates) = x.chunk(2, dim=(- 1)) return (x * F.gelu(gates))
def get_activation_fn(activation): if (activation == 'relu'): return nn.ReLU(inplace=True) elif (activation == 'leaky_relu'): return nn.LeakyReLU(inplace=True) elif (activation == 'tanh'): return nn.Tanh() elif (activation == 'gelu'): return nn.GELU() elif (activation == 'geglu'): return GEGLU() elif (activation == 'reglu'): return REGLU() elif (activation == 'softplus'): return nn.Softplus() else: raise ValueError("Only the following activation functions are currently supported: {}. Note that 'geglu' and 'reglu' should only be used as transformer's activations".format(', '.join(allowed_activations)))
def conv_layer(ni: int, nf: int, kernel_size: int=3, stride: int=1, maxpool: bool=True, adaptiveavgpool: bool=False): layer = nn.Sequential(nn.Conv2d(ni, nf, kernel_size=kernel_size, stride=stride, bias=False, padding=(kernel_size // 2)), nn.BatchNorm2d(nf, momentum=0.01), nn.LeakyReLU(negative_slope=0.1, inplace=True)) if maxpool: layer.add_module('maxpool', nn.MaxPool2d(2, 2)) if adaptiveavgpool: layer.add_module('adaptiveavgpool', nn.AdaptiveAvgPool2d(output_size=(1, 1))) return layer
class Vision(BaseWDModelComponent): 'Defines a standard image classifier/regressor using a pretrained\n network or a sequence of convolution layers that can be used as the\n `deepimage` component of a Wide & Deep model or independently by\n itself.\n\n :information_source: **NOTE**: this class represents the integration\n between `pytorch-widedeep` and `torchvision`. New architectures will be\n available as they are added to `torchvision`. In a distant future we aim\n to bring transformer-based architectures as well. However, simple\n CNN-based architectures (and even MLP-based) seem to produce SoTA\n results. For the time being, we describe below the options available\n through this class\n\n Parameters\n ----------\n pretrained_model_setup: Optional, str or dict, default = None\n Name of the pretrained model. Should be a variant of the following\n architectures: _\'resnet\'_, _\'shufflenet\'_, _\'resnext\'_,\n _\'wide_resnet\'_, _\'regnet\'_, _\'densenet\'_, _\'mobilenetv3\'_,\n _\'mobilenetv2\'_, _\'mnasnet\'_, _\'efficientnet\'_ and _\'squeezenet\'_. if\n `pretrained_model_setup = None` a basic, fully trainable CNN will be\n used. Alternatively, since Torchvision 0.13 one can use pretrained\n models with different weigths. Therefore, `pretrained_model_setup` can\n also be dictionary with the name of the model and the weights (e.g.\n `{\'resnet50\': ResNet50_Weights.DEFAULT}` or\n `{\'resnet50\': "IMAGENET1K_V2"}`). <br/> Aliased as `pretrained_model_name`.\n n_trainable: Optional, int, default = None\n Number of trainable layers starting from the layer closer to the\n output neuron(s). Note that this number DOES NOT take into account\n the so-called _\'head\'_ which is ALWAYS trainable. If\n `trainable_params` is not None this parameter will be ignored\n trainable_params: Optional, list, default = None\n List of strings containing the names (or substring within the name) of\n the parameters that will be trained. For example, if we use a\n _\'resnet18\'_ pretrained model and we set `trainable_params =\n [\'layer4\']` only the parameters of _\'layer4\'_ of the network\n (and the head, as mentioned before) will be trained. Note that\n setting this or the previous parameter involves some knowledge of\n the architecture used.\n channel_sizes: list, default = [64, 128, 256, 512]\n List of integers with the channel sizes of a CNN in case we choose not\n to use a pretrained model\n kernel_sizes: list or int, default = 3\n List of integers with the kernel sizes of a CNN in case we choose not\n to use a pretrained model. Must be of length equal to `len(channel_sizes) - 1`.\n strides: list or int, default = 1\n List of integers with the stride sizes of a CNN in case we choose not\n to use a pretrained model. Must be of length equal to `len(channel_sizes) - 1`.\n head_hidden_dims: Optional, list, default = None\n List with the number of neurons per dense layer in the head. e.g: _[64,32]_\n head_activation: str, default = "relu"\n Activation function for the dense layers in the head. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported\n head_dropout: float, default = 0.1\n float indicating the dropout between the dense layers.\n head_batchnorm: bool, default = False\n Boolean indicating whether or not batch normalization will be applied\n to the dense layers\n head_batchnorm_last: bool, default = False\n Boolean indicating whether or not batch normalization will be applied\n to the last of the dense layers\n head_linear_first: bool, default = False\n Boolean indicating the order of the operations in the dense\n layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->\n LIN -> ACT]`\n\n Attributes\n ----------\n features: nn.Module\n The pretrained model or Standard CNN plus the optional head\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import Vision\n >>> X_img = torch.rand((2,3,224,224))\n >>> model = Vision(channel_sizes=[64, 128], kernel_sizes = [3, 3], strides=[1, 1], head_hidden_dims=[32, 8])\n >>> out = model(X_img)\n ' @Alias('pretrained_model_setup', ['pretrained_model_name']) def __init__(self, pretrained_model_setup: Union[(str, Dict[(str, Union[(str, WeightsEnum)])])]=None, n_trainable: Optional[int]=None, trainable_params: Optional[List[str]]=None, channel_sizes: List[int]=[64, 128, 256, 512], kernel_sizes: Union[(int, List[int])]=[7, 3, 3, 3], strides: Union[(int, List[int])]=[2, 1, 1, 1], head_hidden_dims: Optional[List[int]]=None, head_activation: str='relu', head_dropout: Union[(float, List[float])]=0.1, head_batchnorm: bool=False, head_batchnorm_last: bool=False, head_linear_first: bool=False): super(Vision, self).__init__() self._check_pretrained_model_setup(pretrained_model_setup, n_trainable, trainable_params) self.pretrained_model_setup = pretrained_model_setup self.n_trainable = n_trainable self.trainable_params = trainable_params self.channel_sizes = channel_sizes self.kernel_sizes = kernel_sizes self.strides = strides self.head_hidden_dims = head_hidden_dims self.head_activation = head_activation self.head_dropout = head_dropout self.head_batchnorm = head_batchnorm self.head_batchnorm_last = head_batchnorm_last self.head_linear_first = head_linear_first (self.features, self.backbone_output_dim) = self._get_features() if (pretrained_model_setup is not None): self._freeze(self.features) if (self.head_hidden_dims is not None): head_hidden_dims = ([self.backbone_output_dim] + self.head_hidden_dims) self.vision_mlp = MLP(head_hidden_dims, self.head_activation, self.head_dropout, self.head_batchnorm, self.head_batchnorm_last, self.head_linear_first) def forward(self, X: Tensor) -> Tensor: x = self.features(X) if (len(x.shape) > 2): if (x.shape[2] > 1): x = nn.functional.adaptive_avg_pool2d(x, (1, 1)) x = torch.flatten(x, 1) if (self.head_hidden_dims is not None): x = self.vision_mlp(x) return x @property def output_dim(self) -> int: 'The output dimension of the model. This is a required property\n neccesary to build the `WideDeep` class\n ' return (self.head_hidden_dims[(- 1)] if (self.head_hidden_dims is not None) else self.backbone_output_dim) def _get_features(self) -> Tuple[(nn.Module, int)]: if (self.pretrained_model_setup is not None): if isinstance(self.pretrained_model_setup, str): if (self.pretrained_model_setup in allowed_pretrained_models.keys()): model = allowed_pretrained_models[self.pretrained_model_setup] pretrained_model = torchvision.models.__dict__[model](weights=torchvision.models.get_model_weights(model).DEFAULT) warnings.warn(f'{self.pretrained_model_setup} defaulting to {model}', UserWarning) else: pretrained_model = torchvision.models.__dict__[self.pretrained_model_setup](weights='IMAGENET1K_V1') elif isinstance(self.pretrained_model_setup, Dict): model_name = next(iter(self.pretrained_model_setup)) model_weights = self.pretrained_model_setup[model_name] if (model_name in allowed_pretrained_models.keys()): model_name = allowed_pretrained_models[model_name] pretrained_model = torchvision.models.__dict__[model_name](weights=model_weights) output_dim: int = self.get_backbone_output_dim(pretrained_model) features = nn.Sequential(*list(pretrained_model.children())[:(- 1)]) else: features = self._basic_cnn() output_dim = self.channel_sizes[(- 1)] return (features, output_dim) def _basic_cnn(self): channel_sizes = ([3] + self.channel_sizes) kernel_sizes = (([self.kernel_sizes] * len(self.channel_sizes)) if isinstance(self.kernel_sizes, int) else self.kernel_sizes) strides = (([self.strides] * len(self.channel_sizes)) if isinstance(self.strides, int) else self.strides) BasicCNN = nn.Sequential() for i in range(1, len(channel_sizes)): BasicCNN.add_module('conv_layer_{}'.format((i - 1)), conv_layer(channel_sizes[(i - 1)], channel_sizes[i], kernel_sizes[(i - 1)], strides[(i - 1)], maxpool=(i == 1), adaptiveavgpool=(i == (len(channel_sizes) - 1)))) return BasicCNN def _freeze(self, features): if (self.trainable_params is not None): for (name, param) in features.named_parameters(): for tl in self.trainable_params: param.requires_grad = (tl in name) elif (self.n_trainable is not None): for (i, (name, param)) in enumerate(reversed(list(features.named_parameters()))): param.requires_grad = (i < self.n_trainable) else: warnings.warn("Both 'trainable_params' and 'n_trainable' are 'None' and the entire network will be trained", UserWarning) @staticmethod def get_backbone_output_dim(features): try: return features.fc.in_features except AttributeError: try: features.classifier.__dict__['_modules']['0'].in_features except AttributeError: try: return features.classifier.__dict__['_modules']['1'].in_features except AttributeError: return features.classifier.__dict__['_modules']['1'].in_channels @staticmethod def _check_pretrained_model_setup(pretrained_model_setup, n_trainable, trainable_params): if (pretrained_model_setup is not None): if isinstance(pretrained_model_setup, str): pretrained_model_name = pretrained_model_setup elif isinstance(pretrained_model_setup, Dict): pretrained_model_name = list(pretrained_model_setup.keys())[0] else: pretrained_model_name = None if (pretrained_model_name is not None): valid_pretrained_model_name = any([(name in pretrained_model_name) for name in allowed_pretrained_models]) if (not valid_pretrained_model_name): raise ValueError(f'{pretrained_model_setup} is not among the allowed pretrained models. These are {allowed_pretrained_models.keys()}. Please choose a variant of these architectures') if ((n_trainable is not None) and (trainable_params is not None)): raise UserWarning("Both 'n_trainable' and 'trainable_params' are not None. 'trainable_params' will be used")
class BaseTabularModelWithoutAttention(BaseWDModelComponent): def __init__(self, column_idx: Dict[(str, int)], cat_embed_input: Optional[List[Tuple[(str, int, int)]]], cat_embed_dropout: float, use_cat_bias: bool, cat_embed_activation: Optional[str], continuous_cols: Optional[List[str]], cont_norm_layer: str, embed_continuous: bool, cont_embed_dim: int, cont_embed_dropout: float, use_cont_bias: bool, cont_embed_activation: Optional[str]): super().__init__() self.column_idx = column_idx self.cat_embed_input = cat_embed_input self.cat_embed_dropout = cat_embed_dropout self.use_cat_bias = use_cat_bias self.cat_embed_activation = cat_embed_activation self.continuous_cols = continuous_cols self.cont_norm_layer = cont_norm_layer self.embed_continuous = embed_continuous self.cont_embed_dim = cont_embed_dim self.cont_embed_dropout = cont_embed_dropout self.use_cont_bias = use_cont_bias self.cont_embed_activation = cont_embed_activation self.cat_and_cont_embed = DiffSizeCatAndContEmbeddings(column_idx, cat_embed_input, cat_embed_dropout, use_cat_bias, continuous_cols, cont_norm_layer, embed_continuous, cont_embed_dim, cont_embed_dropout, use_cont_bias) self.cat_embed_act_fn = (get_activation_fn(cat_embed_activation) if (cat_embed_activation is not None) else None) self.cont_embed_act_fn = (get_activation_fn(cont_embed_activation) if (cont_embed_activation is not None) else None) def _get_embeddings(self, X: Tensor) -> Tensor: (x_cat, x_cont) = self.cat_and_cont_embed(X) if (x_cat is not None): x = (self.cat_embed_act_fn(x_cat) if (self.cat_embed_act_fn is not None) else x_cat) if (x_cont is not None): if (self.cont_embed_act_fn is not None): x_cont = self.cont_embed_act_fn(x_cont) x = (torch.cat([x, x_cont], 1) if (x_cat is not None) else x_cont) return x
class BaseTabularModelWithAttention(BaseWDModelComponent): def __init__(self, column_idx: Dict[(str, int)], cat_embed_input: Optional[List[Tuple[(str, int)]]], cat_embed_dropout: float, use_cat_bias: bool, cat_embed_activation: Optional[str], full_embed_dropout: bool, shared_embed: bool, add_shared_embed: bool, frac_shared_embed: float, continuous_cols: Optional[List[str]], cont_norm_layer: str, embed_continuous: bool, cont_embed_dropout: float, use_cont_bias: bool, cont_embed_activation: Optional[str], input_dim: int): super().__init__() self.column_idx = column_idx self.cat_embed_input = cat_embed_input self.cat_embed_dropout = cat_embed_dropout self.use_cat_bias = use_cat_bias self.cat_embed_activation = cat_embed_activation self.full_embed_dropout = full_embed_dropout self.shared_embed = shared_embed self.add_shared_embed = add_shared_embed self.frac_shared_embed = frac_shared_embed self.continuous_cols = continuous_cols self.cont_norm_layer = cont_norm_layer self.embed_continuous = embed_continuous self.cont_embed_dropout = cont_embed_dropout self.use_cont_bias = use_cont_bias self.cont_embed_activation = cont_embed_activation self.input_dim = input_dim self.cat_and_cont_embed = SameSizeCatAndContEmbeddings(input_dim, column_idx, cat_embed_input, cat_embed_dropout, use_cat_bias, full_embed_dropout, shared_embed, add_shared_embed, frac_shared_embed, continuous_cols, cont_norm_layer, embed_continuous, cont_embed_dropout, use_cont_bias) self.cat_embed_act_fn = (get_activation_fn(cat_embed_activation) if (cat_embed_activation is not None) else None) self.cont_embed_act_fn = (get_activation_fn(cont_embed_activation) if (cont_embed_activation is not None) else None) def _get_embeddings(self, X: Tensor) -> Tensor: (x_cat, x_cont) = self.cat_and_cont_embed(X) if (x_cat is not None): x = (self.cat_embed_act_fn(x_cat) if (self.cat_embed_act_fn is not None) else x_cat) if (x_cont is not None): if (self.cont_embed_act_fn is not None): x_cont = self.cont_embed_act_fn(x_cont) x = (torch.cat([x, x_cont], 1) if (x_cat is not None) else x_cont) return x @property def attention_weights(self): raise NotImplementedError
class Wide(nn.Module): 'Defines a `Wide` (linear) model where the non-linearities are\n captured via the so-called crossed-columns. This can be used as the\n `wide` component of a Wide & Deep model.\n\n Parameters\n -----------\n input_dim: int\n size of the Embedding layer. `input_dim` is the summation of all the\n individual values for all the features that go through the wide\n model. For example, if the wide model receives 2 features with\n 5 individual values each, `input_dim = 10`\n pred_dim: int, default = 1\n size of the ouput tensor containing the predictions. Note that unlike\n all the other models, the wide model is connected directly to the\n output neuron(s) when used to build a Wide and Deep model. Therefore,\n it requires the `pred_dim` parameter.\n\n Attributes\n -----------\n wide_linear: nn.Module\n the linear layer that comprises the wide branch of the model\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import Wide\n >>> X = torch.empty(4, 4).random_(4)\n >>> wide = Wide(input_dim=X.unique().size(0), pred_dim=1)\n >>> out = wide(X)\n ' @Alias('pred_dim', ['pred_size', 'num_class']) def __init__(self, input_dim: int, pred_dim: int=1): super(Wide, self).__init__() self.input_dim = input_dim self.pred_dim = pred_dim self.wide_linear = nn.Embedding((input_dim + 1), pred_dim, padding_idx=0) self.bias = nn.Parameter(torch.zeros(pred_dim)) self._reset_parameters() def _reset_parameters(self) -> None: 'initialize Embedding and bias like nn.Linear. See [original\n implementation](https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear).\n ' nn.init.kaiming_uniform_(self.wide_linear.weight, a=math.sqrt(5)) (fan_in, _) = nn.init._calculate_fan_in_and_fan_out(self.wide_linear.weight) bound = (1 / math.sqrt(fan_in)) nn.init.uniform_(self.bias, (- bound), bound) def forward(self, X: Tensor) -> Tensor: 'Forward pass. Simply connecting the Embedding layer with the ouput\n neuron(s)' out = (self.wide_linear(X.long()).sum(dim=1) + self.bias) return out
class ContextAttention(nn.Module): 'Attention mechanism inspired by `Hierarchical Attention Networks for\n Document Classification\n <https://www.cs.cmu.edu/~./hovy/papers/16HLT-hierarchical-attention-networks.pdf>`_\n ' def __init__(self, input_dim: int, dropout: float, sum_along_seq: bool=False): super(ContextAttention, self).__init__() self.inp_proj = nn.Linear(input_dim, input_dim) self.context = nn.Linear(input_dim, 1, bias=False) self.dropout = nn.Dropout(dropout) self.sum_along_seq = sum_along_seq def forward(self, X: Tensor) -> Tensor: scores = torch.tanh_(self.inp_proj(X)) attn_weights = self.context(scores).softmax(dim=1) self.attn_weights = attn_weights.squeeze(2) attn_weights = self.dropout(attn_weights) output = ((attn_weights * X).sum(1) if self.sum_along_seq else (attn_weights * X)) return output
class QueryKeySelfAttention(nn.Module): 'Attention mechanism inspired by the well known multi-head attention. Here,\n rather than learning a value projection matrix that will be multiplied by\n the attention weights, we multiply such weights directly by the input\n tensor.\n\n The rationale behind this implementation comes, among other\n considerations, from the fact that Transformer based models tend to\n heavily overfit tabular. Therefore, by reducing the number of trainable\n parameters and multiply directly by the incoming tensor we help\n mitigating such overfitting\n ' def __init__(self, input_dim: int, dropout: float, use_bias: bool, n_heads: int): super(QueryKeySelfAttention, self).__init__() assert ((input_dim % n_heads) == 0), "'input_dim' must be divisible by 'n_heads'" self.head_dim = (input_dim // n_heads) self.n_heads = n_heads self.qk_proj = nn.Linear(input_dim, (input_dim * 2), bias=use_bias) self.dropout = nn.Dropout(dropout) def forward(self, X: Tensor) -> Tensor: (q, k) = self.qk_proj(X).chunk(2, dim=(- 1)) (q, k, x_rearr) = map((lambda t: einops.rearrange(t, 'b m (h d) -> b h m d', h=self.n_heads)), (q, k, X)) scores = (einsum('b h s d, b h l d -> b h s l', q, k) / math.sqrt(self.head_dim)) attn_weights = scores.softmax(dim=(- 1)) self.attn_weights = attn_weights attn_weights = self.dropout(attn_weights) attn_output = einsum('b h s l, b h l d -> b h s d', attn_weights, x_rearr) output = einops.rearrange(attn_output, 'b h s d -> b s (h d)', h=self.n_heads) return output
class ContextAttentionEncoder(nn.Module): def __init__(self, input_dim: int, dropout: float, with_addnorm: bool, activation: str): super(ContextAttentionEncoder, self).__init__() self.with_addnorm = with_addnorm self.attn = ContextAttention(input_dim, dropout) if with_addnorm: self.attn_addnorm = AddNorm(input_dim, dropout) self.slp_addnorm = AddNorm(input_dim, dropout) self.slp = SLP(input_dim, dropout, activation, (not with_addnorm)) def forward(self, X: Tensor) -> Tensor: if self.with_addnorm: x = self.attn_addnorm(X, self.attn) out = self.slp_addnorm(x, self.slp) else: out = self.slp(self.attn(X)) return out
class SelfAttentionEncoder(nn.Module): def __init__(self, input_dim: int, dropout: float, use_bias: bool, n_heads: int, with_addnorm: bool, activation: str): super(SelfAttentionEncoder, self).__init__() self.with_addnorm = with_addnorm self.attn = QueryKeySelfAttention(input_dim, dropout, use_bias, n_heads) if with_addnorm: self.attn_addnorm = AddNorm(input_dim, dropout) self.slp_addnorm = AddNorm(input_dim, dropout) self.slp = SLP(input_dim, dropout, activation, (not with_addnorm)) def forward(self, X: Tensor) -> Tensor: if self.with_addnorm: x = self.attn_addnorm(X, self.attn) out = self.slp_addnorm(x, self.slp) else: out = self.slp(self.attn(X)) return out
def dense_layer(inp: int, out: int, activation: str, p: float, bn: bool, linear_first: bool): act_fn = get_activation_fn(activation) layers = ([nn.BatchNorm1d((out if linear_first else inp))] if bn else []) if (p != 0): layers.append(nn.Dropout(p)) lin = [nn.Linear(inp, out, bias=(not bn)), act_fn] layers = ((lin + layers) if linear_first else (layers + lin)) return nn.Sequential(*layers)
class SLP(nn.Module): def __init__(self, input_dim: int, dropout: float, activation: str, normalise: bool): super(SLP, self).__init__() self.lin = nn.Linear(input_dim, ((input_dim * 2) if activation.endswith('glu') else input_dim)) self.dropout = nn.Dropout(dropout) self.activation = get_activation_fn(activation) if normalise: self.norm: Union[(nn.LayerNorm, nn.Identity)] = nn.LayerNorm(input_dim) else: self.norm = nn.Identity() def forward(self, X: Tensor) -> Tensor: return self.dropout(self.norm(self.activation(self.lin(X))))
class MLP(nn.Module): def __init__(self, d_hidden: List[int], activation: str, dropout: Optional[Union[(float, List[float])]], batchnorm: bool, batchnorm_last: bool, linear_first: bool): super(MLP, self).__init__() if (not dropout): dropout = ([0.0] * len(d_hidden)) elif isinstance(dropout, float): dropout = ([dropout] * len(d_hidden)) self.mlp = nn.Sequential() for i in range(1, len(d_hidden)): self.mlp.add_module('dense_layer_{}'.format((i - 1)), dense_layer(d_hidden[(i - 1)], d_hidden[i], activation, dropout[(i - 1)], (batchnorm and ((i != (len(d_hidden) - 1)) or batchnorm_last)), linear_first)) def forward(self, X: Tensor) -> Tensor: return self.mlp(X)
class TabMlp(BaseTabularModelWithoutAttention): 'Defines a `TabMlp` model that can be used as the `deeptabular`\n component of a Wide & Deep model or independently by itself.\n\n This class combines embedding representations of the categorical features\n with numerical (aka continuous) features, embedded or not. These are then\n passed through a series of dense layers (i.e. a MLP).\n\n Parameters\n ----------\n column_idx: Dict\n Dict containing the index of the columns that will be passed through\n the `TabMlp` model. Required to slice the tensors. e.g. _{\'education\':\n 0, \'relationship\': 1, \'workclass\': 2, ...}_.\n cat_embed_input: List, Optional, default = None\n List of Tuples with the column name, number of unique values and\n embedding dimension. e.g. _[(education, 11, 32), ...]_\n cat_embed_dropout: float, default = 0.1\n Categorical embeddings dropout\n use_cat_bias: bool, default = False,\n Boolean indicating if bias will be used for the categorical embeddings\n cat_embed_activation: Optional, str, default = None,\n Activation function for the categorical embeddings, if any. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported\n continuous_cols: List, Optional, default = None\n List with the name of the numeric (aka continuous) columns\n cont_norm_layer: str, default = "batchnorm"\n Type of normalization layer applied to the continuous features. Options\n are: _\'layernorm\'_, _\'batchnorm\'_ or None.\n embed_continuous: bool, default = False,\n Boolean indicating if the continuous columns will be embedded\n (i.e. passed each through a linear layer with or without activation)\n cont_embed_dim: int, default = 32,\n Size of the continuous embeddings\n cont_embed_dropout: float, default = 0.1,\n Dropout for the continuous embeddings\n use_cont_bias: bool, default = True,\n Boolean indicating if bias will be used for the continuous embeddings\n cont_embed_activation: Optional, str, default = None,\n Activation function for the continuous embeddings if any. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported\n mlp_hidden_dims: List, default = [200, 100]\n List with the number of neurons per dense layer in the mlp.\n mlp_activation: str, default = "relu"\n Activation function for the dense layers of the MLP. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported\n mlp_dropout: float or List, default = 0.1\n float or List of floats with the dropout between the dense layers.\n e.g: _[0.5,0.5]_\n mlp_batchnorm: bool, default = False\n Boolean indicating whether or not batch normalization will be applied\n to the dense layers\n mlp_batchnorm_last: bool, default = False\n Boolean indicating whether or not batch normalization will be applied\n to the last of the dense layers\n mlp_linear_first: bool, default = False\n Boolean indicating the order of the operations in the dense\n layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->\n LIN -> ACT]`\n\n Attributes\n ----------\n cat_and_cont_embed: nn.Module\n This is the module that processes the categorical and continuous columns\n encoder: nn.Module\n mlp model that will receive the concatenation of the embeddings and\n the continuous columns\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import TabMlp\n >>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)\n >>> colnames = [\'a\', \'b\', \'c\', \'d\', \'e\']\n >>> cat_embed_input = [(u,i,j) for u,i,j in zip(colnames[:4], [4]*4, [8]*4)]\n >>> column_idx = {k:v for v,k in enumerate(colnames)}\n >>> model = TabMlp(mlp_hidden_dims=[8,4], column_idx=column_idx, cat_embed_input=cat_embed_input,\n ... continuous_cols = [\'e\'])\n >>> out = model(X_tab)\n ' def __init__(self, column_idx: Dict[(str, int)], cat_embed_input: Optional[List[Tuple[(str, int, int)]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str='batchnorm', embed_continuous: bool=False, cont_embed_dim: int=32, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, mlp_hidden_dims: List[int]=[200, 100], mlp_activation: str='relu', mlp_dropout: Union[(float, List[float])]=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=False): super(TabMlp, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=embed_continuous, cont_embed_dim=cont_embed_dim, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation) self.mlp_hidden_dims = mlp_hidden_dims self.mlp_activation = mlp_activation self.mlp_dropout = mlp_dropout self.mlp_batchnorm = mlp_batchnorm self.mlp_batchnorm_last = mlp_batchnorm_last self.mlp_linear_first = mlp_linear_first mlp_input_dim = self.cat_and_cont_embed.output_dim mlp_hidden_dims = ([mlp_input_dim] + mlp_hidden_dims) self.encoder = MLP(mlp_hidden_dims, mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first) def forward(self, X: Tensor) -> Tensor: x = self._get_embeddings(X) return self.encoder(x) @property def output_dim(self) -> int: 'The output dimension of the model. This is a required property\n neccesary to build the `WideDeep` class' return self.mlp_hidden_dims[(- 1)]
class TabMlpDecoder(nn.Module): 'Companion decoder model for the `TabMlp` model (which can be considered\n an encoder itself).\n\n This class is designed to be used with the `EncoderDecoderTrainer` when\n using self-supervised pre-training (see the corresponding section in the\n docs). The `TabMlpDecoder` will receive the output from the MLP\n and \'_reconstruct_\' the embeddings.\n\n Parameters\n ----------\n embed_dim: int\n Size of the embeddings tensor that needs to be reconstructed.\n mlp_hidden_dims: List, default = [200, 100]\n List with the number of neurons per dense layer in the mlp.\n mlp_activation: str, default = "relu"\n Activation function for the dense layers of the MLP. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported\n mlp_dropout: float or List, default = 0.1\n float or List of floats with the dropout between the dense layers.\n e.g: _[0.5,0.5]_\n mlp_batchnorm: bool, default = False\n Boolean indicating whether or not batch normalization will be applied\n to the dense layers\n mlp_batchnorm_last: bool, default = False\n Boolean indicating whether or not batch normalization will be applied\n to the last of the dense layers\n mlp_linear_first: bool, default = False\n Boolean indicating the order of the operations in the dense\n layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->\n LIN -> ACT]`\n\n Attributes\n ----------\n decoder: nn.Module\n mlp model that will receive the output of the encoder\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import TabMlpDecoder\n >>> x_inp = torch.rand(3, 8)\n >>> decoder = TabMlpDecoder(embed_dim=32, mlp_hidden_dims=[8,16])\n >>> res = decoder(x_inp)\n >>> res.shape\n torch.Size([3, 32])\n ' def __init__(self, embed_dim: int, mlp_hidden_dims: List[int]=[100, 200], mlp_activation: str='relu', mlp_dropout: Union[(float, List[float])]=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=False): super(TabMlpDecoder, self).__init__() self.embed_dim = embed_dim self.mlp_hidden_dims = mlp_hidden_dims self.mlp_activation = mlp_activation self.mlp_dropout = mlp_dropout self.mlp_batchnorm = mlp_batchnorm self.mlp_batchnorm_last = mlp_batchnorm_last self.mlp_linear_first = mlp_linear_first self.decoder = MLP((mlp_hidden_dims + [self.embed_dim]), mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first) def forward(self, X: Tensor) -> Tensor: return self.decoder(X)
class BasicBlock(nn.Module): def __init__(self, inp: int, out: int, dropout: float=0.0, simplify: bool=False, resize: nn.Module=None): super(BasicBlock, self).__init__() self.simplify = simplify self.resize = resize self.lin1 = nn.Linear(inp, out, bias=False) self.bn1 = nn.BatchNorm1d(out) self.leaky_relu = nn.LeakyReLU(inplace=True) if (dropout > 0.0): self.dropout = True self.dp = nn.Dropout(dropout) else: self.dropout = False if (not self.simplify): self.lin2 = nn.Linear(out, out, bias=False) self.bn2 = nn.BatchNorm1d(out) def forward(self, X: Tensor) -> Tensor: identity = X out = self.lin1(X) out = self.bn1(out) out = self.leaky_relu(out) if self.dropout: out = self.dp(out) if (not self.simplify): out = self.lin2(out) out = self.bn2(out) if (self.resize is not None): identity = self.resize(X) out += identity out = self.leaky_relu(out) return out
class DenseResnet(nn.Module): def __init__(self, input_dim: int, blocks_dims: List[int], dropout: float, simplify: bool): super(DenseResnet, self).__init__() if (input_dim != blocks_dims[0]): self.dense_resnet = nn.Sequential(OrderedDict([('lin_inp', nn.Linear(input_dim, blocks_dims[0], bias=False)), ('bn_inp', nn.BatchNorm1d(blocks_dims[0]))])) else: self.dense_resnet = nn.Sequential() for i in range(1, len(blocks_dims)): resize = None if (blocks_dims[(i - 1)] != blocks_dims[i]): resize = nn.Sequential(nn.Linear(blocks_dims[(i - 1)], blocks_dims[i], bias=False), nn.BatchNorm1d(blocks_dims[i])) self.dense_resnet.add_module('block_{}'.format((i - 1)), BasicBlock(blocks_dims[(i - 1)], blocks_dims[i], dropout, simplify, resize)) def forward(self, X: Tensor) -> Tensor: return self.dense_resnet(X)
class TabResnet(BaseTabularModelWithoutAttention): 'Defines a `TabResnet` model that can be used as the `deeptabular`\n component of a Wide & Deep model or independently by itself.\n\n This class combines embedding representations of the categorical features\n with numerical (aka continuous) features, embedded or not. These are then\n passed through a series of Resnet blocks. See\n `pytorch_widedeep.models.tab_resnet._layers` for details on the\n structure of each block.\n\n Parameters\n ----------\n column_idx: Dict\n Dict containing the index of the columns that will be passed through\n the model. Required to slice the tensors. e.g.\n _{\'education\': 0, \'relationship\': 1, \'workclass\': 2, ...}_\n cat_embed_input: List\n List of Tuples with the column name, number of unique values and\n embedding dimension. e.g. _[(education, 11, 32), ...]_.\n cat_embed_dropout: float, default = 0.1\n Categorical embeddings dropout\n use_cat_bias: bool, default = False,\n Boolean indicating if bias will be used for the categorical embeddings\n cat_embed_activation: Optional, str, default = None,\n Activation function for the categorical embeddings, if any. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky\'_relu` and _\'gelu\'_ are supported\n continuous_cols: List, Optional, default = None\n List with the name of the numeric (aka continuous) columns\n cont_norm_layer: str, default = "batchnorm"\n Type of normalization layer applied to the continuous features. Options\n are: _\'layernorm\'_, _\'batchnorm\'_ or `None`.\n embed_continuous: bool, default = False,\n Boolean indicating if the continuous columns will be embedded\n (i.e. passed each through a linear layer with or without activation)\n cont_embed_dim: int, default = 32,\n Size of the continuous embeddings\n cont_embed_dropout: float, default = 0.1,\n Continuous embeddings dropout\n use_cont_bias: bool, default = True,\n Boolean indicating if bias will be used for the continuous embeddings\n cont_embed_activation: Optional, str, default = None,\n Activation function for the continuous embeddings, if any. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky\'_relu` and _\'gelu\'_ are supported\n blocks_dims: List, default = [200, 100, 100]\n List of integers that define the input and output units of each block.\n For example: _[200, 100, 100]_ will generate 2 blocks. The first will\n receive a tensor of size 200 and output a tensor of size 100, and the\n second will receive a tensor of size 100 and output a tensor of size\n 100. See `pytorch_widedeep.models.tab_resnet._layers` for\n details on the structure of each block.\n blocks_dropout: float, default = 0.1\n Block\'s internal dropout.\n simplify_blocks: bool, default = False,\n Boolean indicating if the simplest possible residual blocks (`X -> [\n [LIN, BN, ACT] + X ]`) will be used instead of a standard one\n (`X -> [ [LIN1, BN1, ACT1] -> [LIN2, BN2] + X ]`).\n mlp_hidden_dims: List, Optional, default = None\n List with the number of neurons per dense layer in the MLP. e.g:\n _[64, 32]_. If `None` the output of the Resnet Blocks will be\n connected directly to the output neuron(s).\n mlp_activation: str, default = "relu"\n Activation function for the dense layers of the MLP. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky\'_relu` and _\'gelu\'_ are supported\n mlp_dropout: float, default = 0.1\n float with the dropout between the dense layers of the MLP.\n mlp_batchnorm: bool, default = False\n Boolean indicating whether or not batch normalization will be applied\n to the dense layers\n mlp_batchnorm_last: bool, default = False\n Boolean indicating whether or not batch normalization will be applied\n to the last of the dense layers\n mlp_linear_first: bool, default = False\n Boolean indicating the order of the operations in the dense\n layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->\n LIN -> ACT]`\n\n Attributes\n ----------\n cat_and_cont_embed: nn.Module\n This is the module that processes the categorical and continuous columns\n encoder: nn.Module\n deep dense Resnet model that will receive the concatenation of the\n embeddings and the continuous columns\n mlp: nn.Module\n if `mlp_hidden_dims` is `True`, this attribute will be an mlp\n model that will receive the results of the concatenation of the\n embeddings and the continuous columns -- if present --.\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import TabResnet\n >>> X_deep = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)\n >>> colnames = [\'a\', \'b\', \'c\', \'d\', \'e\']\n >>> cat_embed_input = [(u,i,j) for u,i,j in zip(colnames[:4], [4]*4, [8]*4)]\n >>> column_idx = {k:v for v,k in enumerate(colnames)}\n >>> model = TabResnet(blocks_dims=[16,4], column_idx=column_idx, cat_embed_input=cat_embed_input,\n ... continuous_cols = [\'e\'])\n >>> out = model(X_deep)\n ' def __init__(self, column_idx: Dict[(str, int)], cat_embed_input: Optional[List[Tuple[(str, int, int)]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str='batchnorm', embed_continuous: bool=False, cont_embed_dim: int=32, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, blocks_dims: List[int]=[200, 100, 100], blocks_dropout: float=0.1, simplify_blocks: bool=False, mlp_hidden_dims: Optional[List[int]]=None, mlp_activation: str='relu', mlp_dropout: float=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=False): super(TabResnet, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=embed_continuous, cont_embed_dim=cont_embed_dim, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation) if (len(blocks_dims) < 2): raise ValueError("'blocks' must contain at least two elements, e.g. [256, 128]") self.blocks_dims = blocks_dims self.blocks_dropout = blocks_dropout self.simplify_blocks = simplify_blocks self.mlp_hidden_dims = mlp_hidden_dims self.mlp_activation = mlp_activation self.mlp_dropout = mlp_dropout self.mlp_batchnorm = mlp_batchnorm self.mlp_batchnorm_last = mlp_batchnorm_last self.mlp_linear_first = mlp_linear_first cat_out_dim = self.cat_and_cont_embed.cat_out_dim cont_out_dim = self.cat_and_cont_embed.cont_out_dim dense_resnet_input_dim = (cat_out_dim + cont_out_dim) self.encoder = DenseResnet(dense_resnet_input_dim, blocks_dims, blocks_dropout, self.simplify_blocks) if (self.mlp_hidden_dims is not None): mlp_hidden_dims = ([self.blocks_dims[(- 1)]] + mlp_hidden_dims) self.mlp = MLP(mlp_hidden_dims, mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first) else: self.mlp = None def forward(self, X: Tensor) -> Tensor: x = self._get_embeddings(X) x = self.encoder(x) if (self.mlp is not None): x = self.mlp(x) return x @property def output_dim(self) -> int: 'The output dimension of the model. This is a required property\n neccesary to build the `WideDeep` class\n ' return (self.mlp_hidden_dims[(- 1)] if (self.mlp_hidden_dims is not None) else self.blocks_dims[(- 1)])
class TabResnetDecoder(nn.Module): 'Companion decoder model for the `TabResnet` model (which can be\n considered an encoder itself)\n\n This class is designed to be used with the `EncoderDecoderTrainer` when\n using self-supervised pre-training (see the corresponding section in the\n docs). This class will receive the output from the ResNet blocks or the\n MLP(if present) and \'_reconstruct_\' the embeddings.\n\n Parameters\n ----------\n embed_dim: int\n Size of the embeddings tensor to be reconstructed.\n blocks_dims: List, default = [200, 100, 100]\n List of integers that define the input and output units of each block.\n For example: _[200, 100, 100]_ will generate 2 blocks. The first will\n receive a tensor of size 200 and output a tensor of size 100, and the\n second will receive a tensor of size 100 and output a tensor of size\n 100. See `pytorch_widedeep.models.tab_resnet._layers` for\n details on the structure of each block.\n blocks_dropout: float, default = 0.1\n Block\'s internal dropout.\n simplify_blocks: bool, default = False,\n Boolean indicating if the simplest possible residual blocks (`X -> [\n [LIN, BN, ACT] + X ]`) will be used instead of a standard one\n (`X -> [ [LIN1, BN1, ACT1] -> [LIN2, BN2] + X ]`).\n mlp_hidden_dims: List, Optional, default = None\n List with the number of neurons per dense layer in the MLP. e.g:\n _[64, 32]_. If `None` the output of the Resnet Blocks will be\n connected directly to the output neuron(s).\n mlp_activation: str, default = "relu"\n Activation function for the dense layers of the MLP. Currently\n _\'tanh\'_, _\'relu\'_, _\'leaky\'_relu` and _\'gelu\'_ are supported\n mlp_dropout: float, default = 0.1\n float with the dropout between the dense layers of the MLP.\n mlp_batchnorm: bool, default = False\n Boolean indicating whether or not batch normalization will be applied\n to the dense layers\n mlp_batchnorm_last: bool, default = False\n Boolean indicating whether or not batch normalization will be applied\n to the last of the dense layers\n mlp_linear_first: bool, default = False\n Boolean indicating the order of the operations in the dense\n layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->\n LIN -> ACT]`\n\n Attributes\n ----------\n decoder: nn.Module\n deep dense Resnet model that will receive the output of the encoder IF\n `mlp_hidden_dims` is None\n mlp: nn.Module\n if `mlp_hidden_dims` is not None, the overall decoder will consist\n in an MLP that will receive the output of the encoder followed by the\n deep dense Resnet.\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import TabResnetDecoder\n >>> x_inp = torch.rand(3, 8)\n >>> decoder = TabResnetDecoder(embed_dim=32, blocks_dims=[8, 16, 16])\n >>> res = decoder(x_inp)\n >>> res.shape\n torch.Size([3, 32])\n ' def __init__(self, embed_dim: int, blocks_dims: List[int]=[100, 100, 200], blocks_dropout: float=0.1, simplify_blocks: bool=False, mlp_hidden_dims: Optional[List[int]]=None, mlp_activation: str='relu', mlp_dropout: float=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=False): super(TabResnetDecoder, self).__init__() if (len(blocks_dims) < 2): raise ValueError("'blocks' must contain at least two elements, e.g. [256, 128]") self.embed_dim = embed_dim self.blocks_dims = blocks_dims self.blocks_dropout = blocks_dropout self.simplify_blocks = simplify_blocks self.mlp_hidden_dims = mlp_hidden_dims self.mlp_activation = mlp_activation self.mlp_dropout = mlp_dropout self.mlp_batchnorm = mlp_batchnorm self.mlp_batchnorm_last = mlp_batchnorm_last self.mlp_linear_first = mlp_linear_first if (self.mlp_hidden_dims is not None): self.mlp = MLP(mlp_hidden_dims, mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first) else: self.mlp = None if (self.mlp is not None): self.decoder = DenseResnet(mlp_hidden_dims[(- 1)], blocks_dims, blocks_dropout, self.simplify_blocks) else: self.decoder = DenseResnet(blocks_dims[0], blocks_dims, blocks_dropout, self.simplify_blocks) self.reconstruction_layer = nn.Linear(blocks_dims[(- 1)], embed_dim, bias=False) def forward(self, X: Tensor) -> Tensor: x = (self.mlp(X) if (self.mlp is not None) else X) return self.reconstruction_layer(self.decoder(x))
def cut_mix(x: Tensor, lam: float=0.8) -> Tensor: batch_size = x.size()[0] mask = torch.from_numpy(np.random.choice(2, x.shape, p=[lam, (1 - lam)])).to(x.device) rand_idx = torch.randperm(batch_size).to(x.device) x_ = x[rand_idx].clone() x_[(mask == 0)] = x[(mask == 0)] return x_
def mix_up(p: Tensor, lam: float=0.8) -> Tensor: batch_size = p.size()[0] rand_idx = torch.randperm(batch_size).to(p.device) p_ = ((lam * p) + ((1 - lam) * p[(rand_idx, ...)])) return p_
class RandomObfuscator(nn.Module): 'Creates and applies an obfuscation masks\n\n Note that the class will return a mask tensor with 1s IF the feature value\n is considered for reconstruction\n\n Parameters:\n ----------\n p: float\n Ratio of features that will be discarded for reconstruction\n ' def __init__(self, p: float): super(RandomObfuscator, self).__init__() self.p = p def forward(self, x: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]: mask = torch.bernoulli((self.p * torch.ones(x.shape))).to(x.device) masked_input = torch.mul((1 - mask), x) return (masked_input, mask)
class EncoderDecoderModel(nn.Module): "This Class, which is referred as a 'Model', implements an Encoder-Decoder\n Self Supervised 'routine' inspired by `TabNet: Attentive Interpretable\n Tabular Learning <https://arxiv.org/abs/1908.07442>_`\n\n This class is in principle not exposed to the user and its documentation\n is detailed in its corresponding Trainer: see\n ``pytorch_widedeep.self_supervised_training.EncoderDecoderTrainer``\n " def __init__(self, encoder: ModelWithoutAttention, decoder: Optional[DecoderWithoutAttention], masked_prob: float): super(EncoderDecoderModel, self).__init__() self.encoder = encoder if (decoder is None): self.decoder = self._build_decoder(encoder) else: self.decoder = decoder self.masker = RandomObfuscator(p=masked_prob) self.is_tabnet = isinstance(self.encoder, TabNet) def forward(self, X: Tensor) -> Tuple[(Tensor, Tensor, Tensor)]: if self.is_tabnet: return self._forward_tabnet(X) else: return self._forward(X) def _forward(self, X: Tensor) -> Tuple[(Tensor, Tensor, Tensor)]: x_embed = self.encoder._get_embeddings(X) if self.training: (masked_x, mask) = self.masker(x_embed) x_embed_rec = self.decoder(self.encoder(X)) else: x_embed_rec = self.decoder(self.encoder(X)) mask = torch.ones(x_embed.shape).to(X.device) return (x_embed, x_embed_rec, mask) def _forward_tabnet(self, X: Tensor) -> Tuple[(Tensor, Tensor, Tensor)]: x_embed = self.encoder._get_embeddings(X) if self.training: (masked_x, mask) = self.masker(x_embed) prior = (1 - mask) (steps_out, _) = self.encoder.encoder(masked_x, prior=prior) x_embed_rec = self.decoder(steps_out) else: (steps_out, _) = self.encoder(x_embed) x_embed_rec = self.decoder(steps_out) mask = torch.ones(x_embed.shape).to(X.device) return (x_embed_rec, x_embed, mask) def _build_decoder(self, encoder: ModelWithoutAttention) -> DecoderWithoutAttention: if isinstance(encoder, TabMlp): decoder = self._build_tabmlp_decoder() if isinstance(encoder, TabResnet): decoder = self._build_tabresnet_decoder() if isinstance(encoder, TabNet): decoder = self._build_tabnet_decoder() return decoder def _build_tabmlp_decoder(self) -> DecoderWithoutAttention: common_params = (inspect.signature(TabMlp).parameters.keys() & inspect.signature(TabMlpDecoder).parameters.keys()) decoder_param = {} for cpn in common_params: decoder_param[cpn] = getattr(self.encoder, cpn) decoder_param['mlp_hidden_dims'] = decoder_param['mlp_hidden_dims'][::(- 1)] decoder_param['embed_dim'] = self.encoder.cat_and_cont_embed.output_dim return TabMlpDecoder(**decoder_param) def _build_tabresnet_decoder(self) -> DecoderWithoutAttention: common_params = (inspect.signature(TabResnet).parameters.keys() & inspect.signature(TabResnetDecoder).parameters.keys()) decoder_param = {} for cpn in common_params: decoder_param[cpn] = getattr(self.encoder, cpn) decoder_param['blocks_dims'] = decoder_param['blocks_dims'][::(- 1)] if (decoder_param['mlp_hidden_dims'] is not None): decoder_param['mlp_hidden_dims'] = decoder_param['mlp_hidden_dims'][::(- 1)] decoder_param['embed_dim'] = self.encoder.cat_and_cont_embed.output_dim return TabResnetDecoder(**decoder_param) def _build_tabnet_decoder(self) -> DecoderWithoutAttention: common_params = (inspect.signature(TabNet).parameters.keys() & inspect.signature(TabNetDecoder).parameters.keys()) decoder_param = {} for cpn in common_params: decoder_param[cpn] = getattr(self.encoder, cpn) decoder_param['embed_dim'] = self.encoder.cat_and_cont_embed.output_dim return TabNetDecoder(**decoder_param)
def create_explain_matrix(model: WideDeep) -> csc_matrix: '\n Returns a sparse matrix used to compute the feature importances after\n training\n\n Parameters\n ----------\n model: WideDeep\n object of type ``WideDeep``\n\n Examples\n --------\n >>> from pytorch_widedeep.models import TabNet, WideDeep\n >>> from pytorch_widedeep.models.tabular.tabnet._utils import create_explain_matrix\n >>> embed_input = [("a", 4, 2), ("b", 4, 2), ("c", 4, 2)]\n >>> cont_cols = ["d", "e"]\n >>> column_idx = {k: v for v, k in enumerate(["a", "b", "c", "d", "e"])}\n >>> deeptabular = TabNet(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=cont_cols)\n >>> model = WideDeep(deeptabular=deeptabular)\n >>> reduce_mtx = create_explain_matrix(model)\n >>> reduce_mtx.todense()\n matrix([[1., 0., 0., 0., 0.],\n [1., 0., 0., 0., 0.],\n [0., 1., 0., 0., 0.],\n [0., 1., 0., 0., 0.],\n [0., 0., 1., 0., 0.],\n [0., 0., 1., 0., 0.],\n [0., 0., 0., 1., 0.],\n [0., 0., 0., 0., 1.]])\n ' tabnet_backbone = list(model.deeptabular.children())[0] embed_out_dim: int = tabnet_backbone.embed_out_dim column_idx: Dict = tabnet_backbone.column_idx cat_setup = extract_cat_setup(tabnet_backbone) cont_setup = extract_cont_setup(tabnet_backbone) n_feat = len((cat_setup + cont_setup)) col_embeds = {} embeds_colname = [] for cats in cat_setup: col_embeds[cats[0]] = (cats[2] - 1) embeds_colname.append(cats[0]) if (len(cont_setup) > 0): if isinstance(cont_setup[0], tuple): for conts in cont_setup: col_embeds[conts[0]] = (conts[1] - 1) embeds_colname.append(conts[0]) cont_colname = [] else: cont_colname = cont_setup else: cont_colname = [] embed_cum_counter = 0 indices_trick = [] for (colname, idx) in column_idx.items(): if (colname in cont_colname): indices_trick.append([(idx + embed_cum_counter)]) elif (colname in embeds_colname): indices_trick.append(range((idx + embed_cum_counter), (((idx + embed_cum_counter) + col_embeds[colname]) + 1))) embed_cum_counter += col_embeds[colname] reducing_matrix = np.zeros((embed_out_dim, n_feat)) for (i, cols) in enumerate(indices_trick): reducing_matrix[(cols, i)] = 1 return csc_matrix(reducing_matrix)
def extract_cat_setup(backbone: nn.Module) -> List: cat_cols: List = backbone.cat_embed_input if (cat_cols is not None): return cat_cols else: return []
def extract_cont_setup(backbone: nn.Module) -> List: cont_cols: List = backbone.continuous_cols embed_continuous = backbone.embed_continuous if (cont_cols is not None): if embed_continuous: cont_embed_dim = ([backbone.cont_embed_dim] * len(cont_cols)) cont_setup: List = [(colname, embed_dim) for (colname, embed_dim) in zip(cont_cols, cont_embed_dim)] else: cont_setup = cont_cols else: cont_setup = [] return cont_setup
def _make_ix_like(input, dim=0): d = input.size(dim) rho = torch.arange(1, (d + 1), device=input.device, dtype=input.dtype) view = ([1] * input.dim()) view[0] = (- 1) return rho.view(view).transpose(0, dim)
class SparsemaxFunction(Function): '\n An implementation of sparsemax (Martins & Astudillo, 2016). See\n :cite:`DBLP:journals/corr/MartinsA16` for detailed description.\n By Ben Peters and Vlad Niculae\n ' @staticmethod def forward(ctx, input, dim=(- 1)): 'sparsemax: normalizing sparse transform (a la softmax)\n\n Parameters\n ----------\n ctx : torch.autograd.function._ContextMethodMixin\n input : torch.Tensor\n any shape\n dim : int\n dimension along which to apply sparsemax\n\n Returns\n -------\n output : torch.Tensor\n same shape as input\n\n ' ctx.dim = dim (max_val, _) = input.max(dim=dim, keepdim=True) input -= max_val (tau, supp_size) = SparsemaxFunction._threshold_and_support(input, dim=dim) output = torch.clamp((input - tau), min=0) ctx.save_for_backward(supp_size, output) return output @staticmethod def backward(ctx, grad_output): (supp_size, output) = ctx.saved_tensors dim = ctx.dim grad_input = grad_output.clone() grad_input[(output == 0)] = 0 v_hat = (grad_input.sum(dim=dim) / supp_size.to(output.dtype).squeeze()) v_hat = v_hat.unsqueeze(dim) grad_input = torch.where((output != 0), (grad_input - v_hat), grad_input) return (grad_input, None) @staticmethod def _threshold_and_support(input, dim=(- 1)): 'Sparsemax building block: compute the threshold\n\n Parameters\n ----------\n input: torch.Tensor\n any dimension\n dim : int\n dimension along which to apply the sparsemax\n\n Returns\n -------\n tau : torch.Tensor\n the threshold value\n support_size : torch.Tensor\n\n ' (input_srt, _) = torch.sort(input, descending=True, dim=dim) input_cumsum = (input_srt.cumsum(dim) - 1) rhos = _make_ix_like(input, dim) support = ((rhos * input_srt) > input_cumsum) support_size = support.sum(dim=dim).unsqueeze(dim) tau = input_cumsum.gather(dim, (support_size - 1)) tau /= support_size.to(input.dtype) return (tau, support_size)
class Sparsemax(nn.Module): def __init__(self, dim=(- 1)): self.dim = dim super(Sparsemax, self).__init__() def forward(self, input): return sparsemax(input, self.dim)
class Entmax15Function(Function): '\n An implementation of exact Entmax with alpha=1.5 (B. Peters, V. Niculae, A. Martins). See\n :cite:`https://arxiv.org/abs/1905.05702 for detailed description.\n Source: https://github.com/deep-spin/entmax\n ' @staticmethod def forward(ctx, input, dim=(- 1)): ctx.dim = dim (max_val, _) = input.max(dim=dim, keepdim=True) input = (input - max_val) input = (input / 2) (tau_star, _) = Entmax15Function._threshold_and_support(input, dim) output = (torch.clamp((input - tau_star), min=0) ** 2) ctx.save_for_backward(output) return output @staticmethod def backward(ctx, grad_output): (Y,) = ctx.saved_tensors gppr = Y.sqrt() dX = (grad_output * gppr) q = (dX.sum(ctx.dim) / gppr.sum(ctx.dim)) q = q.unsqueeze(ctx.dim) dX -= (q * gppr) return (dX, None) @staticmethod def _threshold_and_support(input, dim=(- 1)): (Xsrt, _) = torch.sort(input, descending=True, dim=dim) rho = _make_ix_like(input, dim) mean = (Xsrt.cumsum(dim) / rho) mean_sq = ((Xsrt ** 2).cumsum(dim) / rho) ss = (rho * (mean_sq - (mean ** 2))) delta = ((1 - ss) / rho) delta_nz = torch.clamp(delta, 0) tau = (mean - torch.sqrt(delta_nz)) support_size = (tau <= Xsrt).sum(dim).unsqueeze(dim) tau_star = tau.gather(dim, (support_size - 1)) return (tau_star, support_size)
class Entmax15(nn.Module): def __init__(self, dim=(- 1)): self.dim = dim super(Entmax15, self).__init__() def forward(self, input): return entmax15(input, self.dim)
class TabNet(BaseTabularModelWithoutAttention): 'Defines a [TabNet model](https://arxiv.org/abs/1908.07442) that\n can be used as the `deeptabular` component of a Wide & Deep model or\n independently by itself.\n\n The implementation in this library is fully based on that\n [here](https://github.com/dreamquark-ai/tabnet) by the dreamquark-ai team,\n simply adapted so that it can work within the `WideDeep` frame.\n Therefore, **ALL CREDIT TO THE DREAMQUARK-AI TEAM**.\n\n Parameters\n ----------\n column_idx: Dict\n Dict containing the index of the columns that will be passed through\n the `TabNet` model. Required to slice the tensors. e.g. _{\'education\':\n 0, \'relationship\': 1, \'workclass\': 2, ...}_\n cat_embed_input: List, Optional, default = None\n List of Tuples with the column name, number of unique values and\n embedding dimension. e.g. _[(education, 11, 32), ...]_\n cat_embed_dropout: float, default = 0.1\n Categorical embeddings dropout\n use_cat_bias: bool, default = False,\n Boolean indicating if bias will be used for the categorical embeddings\n cat_embed_activation: Optional, str, default = None,\n Activation function for the categorical embeddings, if any. _\'tanh\'_,\n _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported.\n continuous_cols: List, Optional, default = None\n List with the name of the numeric (aka continuous) columns\n cont_norm_layer: str, default = "batchnorm"\n Type of normalization layer applied to the continuous features. Options\n are: _\'layernorm\'_, _\'batchnorm\'_ or `None`.\n embed_continuous: bool, default = False,\n Boolean indicating if the continuous columns will be embedded\n (i.e. passed each through a linear layer with or without activation)\n cont_embed_dim: int, default = 32,\n Size of the continuous embeddings\n cont_embed_dropout: float, default = 0.1,\n Dropout for the continuous embeddings\n use_cont_bias: bool, default = True,\n Boolean indicating if bias will be used for the continuous embeddings\n cont_embed_activation: Optional, str, default = None,\n Activation function for the continuous embeddings, if any. _\'tanh\'_,\n _\'relu\'_, _\'leaky_relu\'_ and _\'gelu\'_ are supported.\n n_steps: int, default = 3\n number of decision steps. For a better understanding of the function\n of `n_steps` and the upcoming parameters, please see the\n [paper](https://arxiv.org/abs/1908.07442).\n step_dim: int, default = 8\n Step\'s output dimension. This is the output dimension that\n `WideDeep` will collect and connect to the output neuron(s).\n attn_dim: int, default = 8\n Attention dimension\n dropout: float, default = 0.0\n GLU block\'s internal dropout\n n_glu_step_dependent: int, default = 2\n number of GLU Blocks (`[FC -> BN -> GLU]`) that are step dependent\n n_glu_shared: int, default = 2\n number of GLU Blocks (`[FC -> BN -> GLU]`) that will be shared\n across decision steps\n ghost_bn: bool, default=True\n Boolean indicating if [Ghost Batch Normalization](https://arxiv.org/abs/1705.08741)\n will be used.\n virtual_batch_size: int, default = 128\n Batch size when using Ghost Batch Normalization\n momentum: float, default = 0.02\n Ghost Batch Normalization\'s momentum. The dreamquark-ai advises for\n very low values. However high values are used in the original\n publication. During our tests higher values lead to better results\n gamma: float, default = 1.3\n Relaxation parameter in the paper. When gamma = 1, a feature is\n enforced to be used only at one decision step. As gamma increases,\n more flexibility is provided to use a feature at multiple decision\n steps\n epsilon: float, default = 1e-15\n Float to avoid log(0). Always keep low\n mask_type: str, default = "sparsemax"\n Mask function to use. Either _\'sparsemax\'_ or _\'entmax\'_\n\n Attributes\n ----------\n cat_and_cont_embed: nn.Module\n This is the module that processes the categorical and continuous columns\n encoder: nn.Module\n the TabNet encoder. For details see the [original publication](https://arxiv.org/abs/1908.07442).\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import TabNet\n >>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)\n >>> colnames = [\'a\', \'b\', \'c\', \'d\', \'e\']\n >>> cat_embed_input = [(u,i,j) for u,i,j in zip(colnames[:4], [4]*4, [8]*4)]\n >>> column_idx = {k:v for v,k in enumerate(colnames)}\n >>> model = TabNet(column_idx=column_idx, cat_embed_input=cat_embed_input, continuous_cols = [\'e\'])\n >>> out = model(X_tab)\n ' def __init__(self, column_idx: Dict[(str, int)], cat_embed_input: Optional[List[Tuple[(str, int, int)]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str=None, embed_continuous: bool=False, cont_embed_dim: int=32, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, n_steps: int=3, step_dim: int=8, attn_dim: int=8, dropout: float=0.0, n_glu_step_dependent: int=2, n_glu_shared: int=2, ghost_bn: bool=True, virtual_batch_size: int=128, momentum: float=0.02, gamma: float=1.3, epsilon: float=1e-15, mask_type: str='sparsemax'): super(TabNet, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=embed_continuous, cont_embed_dim=cont_embed_dim, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation) self.n_steps = n_steps self.step_dim = step_dim self.attn_dim = attn_dim self.dropout = dropout self.n_glu_step_dependent = n_glu_step_dependent self.n_glu_shared = n_glu_shared self.ghost_bn = ghost_bn self.virtual_batch_size = virtual_batch_size self.momentum = momentum self.gamma = gamma self.epsilon = epsilon self.mask_type = mask_type self.embed_out_dim = self.cat_and_cont_embed.output_dim self.encoder = TabNetEncoder(self.embed_out_dim, n_steps, step_dim, attn_dim, dropout, n_glu_step_dependent, n_glu_shared, ghost_bn, virtual_batch_size, momentum, gamma, epsilon, mask_type) def forward(self, X: Tensor, prior: Optional[Tensor]=None) -> Tuple[(Tensor, Tensor)]: x = self._get_embeddings(X) (steps_output, M_loss) = self.encoder(x, prior) res = torch.sum(torch.stack(steps_output, dim=0), dim=0) return (res, M_loss) def forward_masks(self, X: Tensor) -> Tuple[(Tensor, Dict[(int, Tensor)])]: x = self._get_embeddings(X) return self.encoder.forward_masks(x) @property def output_dim(self) -> int: 'The output dimension of the model. This is a required property\n neccesary to build the `WideDeep` class\n ' return self.step_dim
class TabNetPredLayer(nn.Module): def __init__(self, inp, out): "This class is a 'hack' required because TabNet is a very particular\n model within `WideDeep`.\n\n TabNet's forward method within `WideDeep` outputs two tensors, one\n with the last layer's activations and the sparse regularization\n factor. Since the output needs to be collected by `WideDeep` to then\n Sequentially build the output layer (connection to the output\n neuron(s)) I need to code a custom TabNetPredLayer that accepts two\n inputs. This will be used by the `WideDeep` class.\n " super(TabNetPredLayer, self).__init__() self.pred_layer = nn.Linear(inp, out, bias=False) initialize_non_glu(self.pred_layer, inp, out) def forward(self, tabnet_tuple: Tuple[(Tensor, Tensor)]) -> Tuple[(Tensor, Tensor)]: (res, M_loss) = (tabnet_tuple[0], tabnet_tuple[1]) return (self.pred_layer(res), M_loss)
class TabNetDecoder(nn.Module): "Companion decoder model for the `TabNet` model (which can be\n considered an encoder itself)\n\n This class is designed to be used with the `EncoderDecoderTrainer` when\n using self-supervised pre-training (see the corresponding section in the\n docs). This class will receive the output from the `TabNet` encoder\n (i.e. the output from the so called 'steps') and '_reconstruct_' the\n embeddings.\n\n Parameters\n ----------\n embed_dim: int\n Size of the embeddings tensor to be reconstructed.\n n_steps: int, default = 3\n number of decision steps. For a better understanding of the function\n of `n_steps` and the upcoming parameters, please see the\n [paper](https://arxiv.org/abs/1908.07442).\n step_dim: int, default = 8\n Step's output dimension. This is the output dimension that\n `WideDeep` will collect and connect to the output neuron(s).\n dropout: float, default = 0.0\n GLU block's internal dropout\n n_glu_step_dependent: int, default = 2\n number of GLU Blocks (`[FC -> BN -> GLU]`) that are step dependent\n n_glu_shared: int, default = 2\n number of GLU Blocks (`[FC -> BN -> GLU]`) that will be shared\n across decision steps\n ghost_bn: bool, default=True\n Boolean indicating if [Ghost Batch Normalization](https://arxiv.org/abs/1705.08741)\n will be used.\n virtual_batch_size: int, default = 128\n Batch size when using Ghost Batch Normalization\n momentum: float, default = 0.02\n Ghost Batch Normalization's momentum. The dreamquark-ai advises for\n very low values. However high values are used in the original\n publication. During our tests higher values lead to better results\n\n Attributes\n ----------\n decoder: nn.Module\n decoder that will receive the output from the encoder's steps and will\n reconstruct the embeddings\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import TabNetDecoder\n >>> x_inp = [torch.rand(3, 8), torch.rand(3, 8), torch.rand(3, 8)]\n >>> decoder = TabNetDecoder(embed_dim=32, ghost_bn=False)\n >>> res = decoder(x_inp)\n >>> res.shape\n torch.Size([3, 32])\n " def __init__(self, embed_dim: int, n_steps: int=3, step_dim: int=8, dropout: float=0.0, n_glu_step_dependent: int=2, n_glu_shared: int=2, ghost_bn: bool=True, virtual_batch_size: int=128, momentum: float=0.02): super(TabNetDecoder, self).__init__() self.n_steps = n_steps self.step_dim = step_dim self.dropout = dropout self.n_glu_step_dependent = n_glu_step_dependent self.n_glu_shared = n_glu_shared self.ghost_bn = ghost_bn self.virtual_batch_size = virtual_batch_size self.momentum = momentum shared_layers = nn.ModuleList() for i in range(n_glu_shared): if (i == 0): shared_layers.append(nn.Linear(step_dim, (2 * step_dim), bias=False)) else: shared_layers.append(nn.Linear(step_dim, (2 * step_dim), bias=False)) self.decoder = nn.ModuleList() for step in range(n_steps): transformer = FeatTransformer(step_dim, step_dim, dropout, shared_layers, n_glu_step_dependent, ghost_bn, virtual_batch_size, momentum=momentum) self.decoder.append(transformer) self.reconstruction_layer = nn.Linear(step_dim, embed_dim, bias=False) initialize_non_glu(self.reconstruction_layer, step_dim, embed_dim) def forward(self, X: List[Tensor]) -> Tensor: out = torch.tensor(0.0) for (i, x) in enumerate(X): x = self.decoder[i](x) out = torch.add(out, x) out = self.reconstruction_layer(out) return out
class FeedForward(nn.Module): def __init__(self, input_dim: int, dropout: float, mult: float, activation: str, *, ff_hidden_dim: Optional[int]=None): super(FeedForward, self).__init__() ff_hid_dim = (ff_hidden_dim if (ff_hidden_dim is not None) else int((input_dim * mult))) self.w_1 = nn.Linear(input_dim, ((ff_hid_dim * 2) if activation.endswith('glu') else ff_hid_dim)) self.w_2 = nn.Linear(ff_hid_dim, input_dim) self.dropout = nn.Dropout(dropout) self.activation = get_activation_fn(activation) def forward(self, X: Tensor) -> Tensor: return self.w_2(self.dropout(self.activation(self.w_1(X))))