repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
rankpredictor | rankpredictor-master/sub/gluonts/distribution/box_cox_tranform.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, List, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.core.component import validated
from gluonts.distribution.bijection import Bijection, InverseBijection
from gluonts.distribution.bijection_output import BijectionOutput
from gluonts.model.common import Tensor
# Relative imports
from .distribution import getF, softplus
class BoxCoxTranform(Bijection):
r"""
Implements Box-Cox transformation of a uni-variate random variable.
The Box-Cox transformation of an observation :math:`z` is given by
.. math::
BoxCox(z; \lambda_1, \lambda_2) = \begin{cases}
((z + \lambda_2)^{\lambda_1} - 1) / \lambda_1, \quad & \text{if }
\lambda_1 \neq 0, \\
\log (z + \lambda_2), \quad & \text{otherwise.}
\end{cases}
Here, :math:`\lambda_1` and :math:`\lambda_2` are learnable parameters. Note that the domain
of the transformation is not restricted.
For numerical stability, instead of checking :math:`\lambda_1` is exactly zero, we use the condition
.. math::
|\lambda_1| < tol\_lambda\_1
for a pre-specified tolerance `tol_lambda_1`.
Inverse of the Box-Cox Transform is given by
.. math::
BoxCox^{-1}(y; \lambda_1, \lambda_2) = \begin{cases}
(y \lambda_1 + 1)^{(1/\lambda_1)} - \lambda_2, \quad & \text{if }
\lambda_1 \neq 0, \\
\exp (y) - \lambda_2, \quad & \text{otherwise.}
\end{cases}
**Notes on numerical stability:**
1. For the forward transformation, :math:`\lambda_2` must always be chosen such that
.. math::
z + \lambda_2 > 0.
To achieve this one needs to know a priori the lower bound on the observations.
This is set in `BoxCoxTransformOutput`, since :math:`\lambda_2` is learnable.
2. Similarly for the inverse transformation to work reliably, a sufficient condition is
.. math::
y \lambda_1 + 1 \geq 0,
where :math:`y` is the input to the inverse transformation.
This cannot always be guaranteed especially when :math:`y` is a sample from a transformed distribution.
Hence we always truncate :math:`y \lambda_1 + 1` at zero.
An example showing why this could happen in our case:
consider transforming observations from the unit interval (0, 1) with parameters
.. math::
\begin{align}
\lambda_1 = &\ 1.1, \\
\lambda_2 = &\ 0.
\end{align}
Then the range of the transformation is (-0.9090, 0.0).
If Gaussian is fit to the transformed observations and a sample is drawn from it,
then it is likely that the sample is outside this range, e.g., when the mean is close to -0.9.
The subsequent inverse transformation of the sample is not a real number anymore.
>>> y = -0.91
>>> lambda_1 = 1.1
>>> lambda_2 = 0.0
>>> (y * lambda_1 + 1) ** (1 / lambda_1) + lambda_2
(-0.0017979146510711471+0.0005279153735965289j)
Parameters
----------
lambda_1
lambda_2
tol_lambda_1
For numerical stability, treat `lambda_1` as zero if it is less than
`tol_lambda_1`
F
"""
arg_names = ["box_cox.lambda_1", "box_cox.lambda_2"]
@validated()
def __init__(
self,
lambda_1: Tensor,
lambda_2: Tensor,
tol_lambda_1: float = 1e-2,
F=None,
) -> None:
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.tol_lambda_1 = tol_lambda_1
self.F = F if F else getF(lambda_1)
# Addressing mxnet madness
self._power = self.F.power if self.F == mx.nd else self.F.pow
@property
def args(self) -> List:
r"""
List: current values of the parameters
"""
return [self.lambda_1, self.lambda_2]
@property
def event_dim(self) -> int:
return 0
@property
def sign(self) -> Tensor:
return 1.0
def f(self, z: Tensor) -> Tensor:
r"""
Forward transformation of observations `z`
Parameters
----------
z
observations
Returns
-------
Tensor
Transformed observations
"""
F = self.F
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
tol_lambda_1 = self.tol_lambda_1
_power = self._power
return F.where(
condition=(F.abs(lambda_1).__ge__(tol_lambda_1).broadcast_like(z)),
x=(_power(z + lambda_2, lambda_1) - 1.0) / lambda_1,
y=F.log(z + lambda_2),
name="Box_Cox_trans",
)
def f_inv(self, y: Tensor) -> Tensor:
r"""Inverse of the Box-Cox Transform
Parameters
----------
y
Transformed observations
Returns
-------
Tensor
Observations
"""
F = self.F
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
tol_lambda_1 = self.tol_lambda_1
_power = self._power
# For numerical stability we truncate :math:`y * \lambda_1 + 1.0` at zero.
base = F.relu(y * lambda_1 + 1.0)
return F.where(
condition=(F.abs(lambda_1).__ge__(tol_lambda_1)).broadcast_like(y),
x=_power(base, 1.0 / lambda_1) - lambda_2,
y=F.exp(y) - lambda_2,
name="Box_Cox_inverse_trans",
)
def log_abs_det_jac(self, z: Tensor, y: Tensor = None) -> Tensor:
r"""
Logarithm of the absolute value of the Jacobian determinant corresponding to the Box-Cox Transform
is given by
.. math::
\log \frac{d}{dz} BoxCox(z; \lambda_1, \lambda_2) = \begin{cases}
\log (z + \lambda_2) (\lambda_1 - 1), \quad & \text{if } \lambda_1 \neq 0, \\
-\log (z + \lambda_2), \quad & \text{otherwise.}
\end{cases}
Note that the derivative of the transformation is always non-negative.
Parameters
----------
z
observations
y
not used
Returns
-------
Tensor
"""
F = self.F
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
tol_lambda_1 = self.tol_lambda_1
return F.where(
condition=F.abs(lambda_1).__ge__(tol_lambda_1),
x=F.log(z + lambda_2) * (lambda_1 - 1.0),
y=-F.log(z + lambda_2),
name="Box_Cox_trans_log_det_jac",
)
class BoxCoxTransformOutput(BijectionOutput):
bij_cls: type = BoxCoxTranform
args_dim: Dict[str, int] = dict(zip(BoxCoxTranform.arg_names, [1, 1]))
@validated()
def __init__(self, lb_obs: float = 0.0, fix_lambda_2: bool = True) -> None:
super().__init__()
self.lb_obs = lb_obs
self.fix_lambda_2 = fix_lambda_2
def domain_map(self, F, *args: Tensor) -> Tuple[Tensor, ...]:
lambda_1, lambda_2 = args
if self.fix_lambda_2:
lambda_2 = -self.lb_obs * F.ones_like(lambda_2)
else:
# This makes sure that :math:`z + \lambda_2 > 0`, where :math:`z > lb_obs`
lambda_2 = softplus(F, lambda_2) - self.lb_obs * F.ones_like(
lambda_2
)
# we squeeze the output since event_shape is ()
return lambda_1.squeeze(axis=-1), lambda_2.squeeze(axis=-1)
@property
def event_shape(self) -> Tuple:
return ()
class InverseBoxCoxTransform(InverseBijection):
"""
Implements the inverse of Box-Cox transformation as a bijection.
"""
arg_names = ["box_cox.lambda_1", "box_cox.lambda_2"]
@validated()
def __init__(
self,
lambda_1: Tensor,
lambda_2: Tensor,
tol_lambda_1: float = 1e-2,
F=None,
) -> None:
super().__init__(BoxCoxTranform(lambda_1, lambda_2, tol_lambda_1, F))
@property
def event_dim(self) -> int:
return 0
class InverseBoxCoxTransformOutput(BoxCoxTransformOutput):
bij_cls: type = InverseBoxCoxTransform
args_dim: Dict[str, int] = dict(
zip(InverseBoxCoxTransform.arg_names, [1, 1])
)
@property
def event_shape(self) -> Tuple:
return ()
| 9,282 | 29.536184 | 113 | py |
rankpredictor | rankpredictor-master/sub/gluonts/distribution/mixture.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional, Tuple
# Third-party imports
from mxnet import gluon
import numpy as np
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
# Relative imports
from .distribution import Distribution, _expand_param, getF
from .distribution_output import DistributionOutput
class MixtureDistribution(Distribution):
r"""
A mixture distribution where each component is a Distribution.
Parameters
----------
mixture_probs
A tensor of mixing probabilities. The entries should all be positive
and sum to 1 across the last dimension. Shape: (..., k), where k is
the number of distributions to be mixed. All axis except the last one
should either coincide with the ones from the component distributions,
or be 1 (in which case, the mixing coefficient is shared across
the axis).
components
A list of k Distribution objects representing the mixture components.
Distributions can be of different types. Each component's support
should be made of tensors of shape (..., d).
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet
"""
is_reparameterizable = False
@validated()
def __init__(
self, mixture_probs: Tensor, components: List[Distribution], F=None
) -> None:
self.F = F if F else getF(mixture_probs)
# TODO: handle case with all components of the same type more efficiently when sampling
# self.all_same = len(set(c.__class__.__name__ for c in components)) == 1
self.mixture_probs = mixture_probs
self.components = components
@property
def batch_shape(self) -> Tuple:
return self.components[0].batch_shape
@property
def event_shape(self) -> Tuple:
return self.components[0].event_shape
@property
def event_dim(self) -> int:
return self.components[0].event_dim
def log_prob(self, x: Tensor) -> Tensor:
F = self.F
log_mix_weights = F.log(self.mixture_probs)
# compute log probabilities of components
component_log_likelihood = F.stack(
*[c.log_prob(x) for c in self.components], axis=-1
)
# compute mixture log probability by log-sum-exp
summands = log_mix_weights + component_log_likelihood
max_val = F.max_axis(summands, axis=-1, keepdims=True)
sum_exp = F.sum(
F.exp(F.broadcast_minus(summands, max_val)), axis=-1, keepdims=True
)
log_sum_exp = F.log(sum_exp) + max_val
return log_sum_exp.squeeze(axis=-1)
@property
def mean(self) -> Tensor:
F = self.F
mean_values = F.stack(*[c.mean for c in self.components], axis=-1)
return F.sum(
F.broadcast_mul(mean_values, self.mixture_probs, axis=-1), axis=-1
)
def cdf(self, x: Tensor) -> Tensor:
F = self.F
cdf_values = F.stack(*[c.cdf(x) for c in self.components], axis=-1)
erg = F.sum(
F.broadcast_mul(cdf_values, self.mixture_probs, axis=-1), axis=-1
)
return erg
@property
def stddev(self) -> Tensor:
F = self.F
stddev_values = F.stack(*[c.stddev for c in self.components], axis=-1)
return F.sum(
F.broadcast_mul(stddev_values, self.mixture_probs, axis=-1),
axis=-1,
)
def sample(
self, num_samples: Optional[int] = None, dtype=np.float32
) -> Tensor:
samples_list = [c.sample(num_samples, dtype) for c in self.components]
samples = self.F.stack(*samples_list, axis=-1)
mixture_probs = _expand_param(self.mixture_probs, num_samples)
idx = self.F.random.multinomial(mixture_probs)
for _ in range(self.event_dim):
idx = idx.expand_dims(axis=-1)
idx = idx.broadcast_like(samples_list[0])
selected_samples = self.F.pick(data=samples, index=idx, axis=-1)
return selected_samples
class MixtureArgs(gluon.HybridBlock):
def __init__(
self,
distr_outputs: List[DistributionOutput],
prefix: Optional[str] = None,
) -> None:
super().__init__()
self.num_components = len(distr_outputs)
self.component_projections: List[gluon.HybridBlock] = []
with self.name_scope():
self.proj_mixture_probs = gluon.nn.HybridSequential()
self.proj_mixture_probs.add(
gluon.nn.Dense(
self.num_components, prefix=f"{prefix}_pi_", flatten=False
)
)
self.proj_mixture_probs.add(gluon.nn.HybridLambda("softmax"))
for k, do in enumerate(distr_outputs):
self.component_projections.append(
do.get_args_proj(prefix=str(k))
)
self.register_child(self.component_projections[-1])
def hybrid_forward(self, F, x: Tensor) -> Tuple[Tensor, ...]:
mixture_probs = self.proj_mixture_probs(x)
component_args = [c_proj(x) for c_proj in self.component_projections]
return tuple([mixture_probs] + component_args)
class MixtureDistributionOutput(DistributionOutput):
@validated()
def __init__(self, distr_outputs: List[DistributionOutput]) -> None:
self.num_components = len(distr_outputs)
self.distr_outputs = distr_outputs
def get_args_proj(self, prefix: Optional[str] = None) -> MixtureArgs:
return MixtureArgs(self.distr_outputs, prefix=prefix)
# Overwrites the parent class method.
def distribution(
self, distr_args, scale: Optional[Tensor] = None, **kwargs
) -> MixtureDistribution:
mixture_probs = distr_args[0]
component_args = distr_args[1:]
return MixtureDistribution(
mixture_probs=mixture_probs,
components=[
do.distribution(args, scale=scale)
for do, args in zip(self.distr_outputs, component_args)
],
)
@property
def event_shape(self) -> Tuple:
return self.distr_outputs[0].event_shape
| 6,791 | 33.477157 | 95 | py |
rankpredictor | rankpredictor-master/sub/gluonts/distribution/lds.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Tuple, Optional, NamedTuple
# Third-party imports
import mxnet as mx
import numpy as np
# First-party imports
from gluonts.distribution import Distribution, Gaussian, MultivariateGaussian
from gluonts.distribution.distribution import getF
from gluonts.model.common import Tensor
from gluonts.core.component import validated
from gluonts.support.util import make_nd_diag, _broadcast_param
from gluonts.support.linalg_util import jitter_cholesky
class ParameterBounds:
@validated()
def __init__(self, lower, upper) -> None:
assert (
lower <= upper
), "lower bound should be smaller or equal to upper bound"
self.lower = lower
self.upper = upper
class LDS(Distribution):
r"""
Implements Linear Dynamical System (LDS) as a distribution.
The LDS is given by
.. math::
z_t = A_t l_{t-1} + b_t + \epsilon_t \\
l_t = C_t l_{t-1} + g_t \nu
where
.. math::
\epsilon_t = N(0, S_v) \\
\nu = N(0, 1)
:math:`A_t`, :math:`C_t` and :math:`g_t` are the emission, transition and
innovation coefficients respectively. The residual terms are denoted
by :math:`b_t`.
The target :math:`z_t` can be :math:`d`-dimensional in which case
.. math::
A_t \in R^{d \times h}, b_t \in R^{d}, C_t \in R^{h \times h}, g_t \in R^{h}
where :math:`h` is dimension of the latent state.
Parameters
----------
emission_coeff
Tensor of shape (batch_size, seq_length, obs_dim, latent_dim)
transition_coeff
Tensor of shape (batch_size, seq_length, latent_dim, latent_dim)
innovation_coeff
Tensor of shape (batch_size, seq_length, latent_dim)
noise_std
Tensor of shape (batch_size, seq_length, obs_dim)
residuals
Tensor of shape (batch_size, seq_length, obs_dim)
prior_mean
Tensor of shape (batch_size, latent_dim)
prior_cov
Tensor of shape (batch_size, latent_dim, latent_dim)
latent_dim
Dimension of the latent state
output_dim
Dimension of the output
seq_length
Sequence length
F
"""
@validated()
def __init__(
self,
emission_coeff: Tensor,
transition_coeff: Tensor,
innovation_coeff: Tensor,
noise_std: Tensor,
residuals: Tensor,
prior_mean: Tensor,
prior_cov: Tensor,
latent_dim: int,
output_dim: int,
seq_length: int,
F=None,
) -> None:
self.latent_dim = latent_dim
self.output_dim = output_dim
self.seq_length = seq_length
# Split coefficients along time axis for easy access
# emission_coef[t]: (batch_size, obs_dim, latent_dim)
self.emission_coeff = emission_coeff.split(
axis=1, num_outputs=self.seq_length, squeeze_axis=True
)
# innovation_coef[t]: (batch_size, latent_dim)
self.innovation_coeff = innovation_coeff.split(
axis=1, num_outputs=self.seq_length, squeeze_axis=False
)
# transition_coeff: (batch_size, latent_dim, latent_dim)
self.transition_coeff = transition_coeff.split(
axis=1, num_outputs=self.seq_length, squeeze_axis=True
)
# noise_std[t]: (batch_size, obs_dim)
self.noise_std = noise_std.split(
axis=1, num_outputs=self.seq_length, squeeze_axis=True
)
# residuals[t]: (batch_size, obs_dim)
self.residuals = residuals.split(
axis=1, num_outputs=self.seq_length, squeeze_axis=True
)
self.prior_mean = prior_mean
self.prior_cov = prior_cov
self.F = F if F else getF(noise_std)
@property
def batch_shape(self) -> Tuple:
return self.emission_coeff[0].shape[:1] + (self.seq_length,)
@property
def event_shape(self) -> Tuple:
return (self.output_dim,)
@property
def event_dim(self) -> int:
return 2
def log_prob(
self,
x: Tensor,
scale: Optional[Tensor] = None,
observed: Optional[Tensor] = None,
):
"""
Compute the log probability of observations.
This method also returns the final state of the system.
Parameters
----------
x
Observations, shape (batch_size, seq_length, output_dim)
scale
Scale of each sequence in x, shape (batch_size, output_dim)
observed
Flag tensor indicating which observations are genuine (1.0) and
which are missing (0.0)
Returns
-------
Tensor
Log probabilities, shape (batch_size, seq_length)
Tensor
Final mean, shape (batch_size, latent_dim)
Tensor
Final covariance, shape (batch_size, latent_dim, latent_dim)
"""
if scale is not None:
x = self.F.broadcast_div(x, scale.expand_dims(axis=1))
# TODO: Based on form of the prior decide to do either filtering
# or residual-sum-of-squares
log_p, final_mean, final_cov = self.kalman_filter(x, observed)
return log_p, final_mean, final_cov
def kalman_filter(
self, targets: Tensor, observed: Tensor
) -> Tuple[Tensor, ...]:
"""
Performs Kalman filtering given observations.
Parameters
----------
targets
Observations, shape (batch_size, seq_length, output_dim)
observed
Flag tensor indicating which observations are genuine (1.0) and
which are missing (0.0)
Returns
-------
Tensor
Log probabilities, shape (batch_size, seq_length)
Tensor
Mean of p(l_T | l_{T-1}), where T is seq_length, with shape
(batch_size, latent_dim)
Tensor
Covariance of p(l_T | l_{T-1}), where T is seq_length, with shape
(batch_size, latent_dim, latent_dim)
"""
F = self.F
# targets[t]: (batch_size, obs_dim)
targets = targets.split(
axis=1, num_outputs=self.seq_length, squeeze_axis=True
)
log_p_seq = []
mean = self.prior_mean
cov = self.prior_cov
observed = (
observed.split(
axis=1, num_outputs=self.seq_length, squeeze_axis=True
)
if observed is not None
else None
)
for t in range(self.seq_length):
# Compute the filtered distribution
# p(l_t | z_1, ..., z_{t + 1})
# and log - probability
# log p(z_t | z_0, z_{t - 1})
filtered_mean, filtered_cov, log_p = kalman_filter_step(
F,
target=targets[t],
prior_mean=mean,
prior_cov=cov,
emission_coeff=self.emission_coeff[t],
residual=self.residuals[t],
noise_std=self.noise_std[t],
latent_dim=self.latent_dim,
output_dim=self.output_dim,
)
log_p_seq.append(log_p.expand_dims(axis=1))
# Mean of p(l_{t+1} | l_t)
mean = F.linalg_gemm2(
self.transition_coeff[t],
(
filtered_mean.expand_dims(axis=-1)
if observed is None
else F.where(
observed[t], x=filtered_mean, y=mean
).expand_dims(axis=-1)
),
).squeeze(axis=-1)
# Covariance of p(l_{t+1} | l_t)
cov = F.linalg_gemm2(
self.transition_coeff[t],
F.linalg_gemm2(
(
filtered_cov
if observed is None
else F.where(observed[t], x=filtered_cov, y=cov)
),
self.transition_coeff[t],
transpose_b=True,
),
) + F.linalg_gemm2(
self.innovation_coeff[t],
self.innovation_coeff[t],
transpose_a=True,
)
# Return sequence of log likelihoods, as well as
# final mean and covariance of p(l_T | l_{T-1} where T is seq_length
return F.concat(*log_p_seq, dim=1), mean, cov
def sample(
self, num_samples: Optional[int] = None, scale: Optional[Tensor] = None
) -> Tensor:
r"""
Generates samples from the LDS: p(z_1, z_2, \ldots, z_{`seq_length`}).
Parameters
----------
num_samples
Number of samples to generate
scale
Scale of each sequence in x, shape (batch_size, output_dim)
Returns
-------
Tensor
Samples, shape (num_samples, batch_size, seq_length, output_dim)
"""
F = self.F
# Note on shapes: here we work with tensors of the following shape
# in each time step t: (num_samples, batch_size, dim, dim),
# where dim can be obs_dim or latent_dim or a constant 1 to facilitate
# generalized matrix multiplication (gemm2)
# Sample observation noise for all time steps
# noise_std: (batch_size, seq_length, obs_dim, 1)
noise_std = F.stack(*self.noise_std, axis=1).expand_dims(axis=-1)
# samples_eps_obs[t]: (num_samples, batch_size, obs_dim, 1)
samples_eps_obs = (
Gaussian(noise_std.zeros_like(), noise_std)
.sample(num_samples)
.split(axis=-3, num_outputs=self.seq_length, squeeze_axis=True)
)
# Sample standard normal for all time steps
# samples_eps_std_normal[t]: (num_samples, batch_size, obs_dim, 1)
samples_std_normal = (
Gaussian(noise_std.zeros_like(), noise_std.ones_like())
.sample(num_samples)
.split(axis=-3, num_outputs=self.seq_length, squeeze_axis=True)
)
# Sample the prior state.
# samples_lat_state: (num_samples, batch_size, latent_dim, 1)
# The prior covariance is observed to be slightly negative definite whenever there is
# excessive zero padding at the beginning of the time series.
# We add positive tolerance to the diagonal to avoid numerical issues.
# Note that `jitter_cholesky` adds positive tolerance only if the decomposition without jitter fails.
state = MultivariateGaussian(
self.prior_mean,
jitter_cholesky(
F, self.prior_cov, self.latent_dim, float_type=np.float32
),
)
samples_lat_state = state.sample(num_samples).expand_dims(axis=-1)
samples_seq = []
for t in range(self.seq_length):
# Expand all coefficients to include samples in axis 0
# emission_coeff_t: (num_samples, batch_size, obs_dim, latent_dim)
# transition_coeff_t:
# (num_samples, batch_size, latent_dim, latent_dim)
# innovation_coeff_t: (num_samples, batch_size, 1, latent_dim)
emission_coeff_t, transition_coeff_t, innovation_coeff_t = [
_broadcast_param(coeff, axes=[0], sizes=[num_samples])
if num_samples is not None
else coeff
for coeff in [
self.emission_coeff[t],
self.transition_coeff[t],
self.innovation_coeff[t],
]
]
# Expand residuals as well
# residual_t: (num_samples, batch_size, obs_dim, 1)
residual_t = (
_broadcast_param(
self.residuals[t].expand_dims(axis=-1),
axes=[0],
sizes=[num_samples],
)
if num_samples is not None
else self.residuals[t].expand_dims(axis=-1)
)
# (num_samples, batch_size, 1, obs_dim)
samples_t = (
F.linalg_gemm2(emission_coeff_t, samples_lat_state)
+ residual_t
+ samples_eps_obs[t]
)
samples_t = (
samples_t.swapaxes(dim1=2, dim2=3)
if num_samples is not None
else samples_t.swapaxes(dim1=1, dim2=2)
)
samples_seq.append(samples_t)
# sample next state: (num_samples, batch_size, latent_dim, 1)
samples_lat_state = F.linalg_gemm2(
transition_coeff_t, samples_lat_state
) + F.linalg_gemm2(
innovation_coeff_t, samples_std_normal[t], transpose_a=True
)
# (num_samples, batch_size, seq_length, obs_dim)
samples = F.concat(*samples_seq, dim=-2)
return (
samples
if scale is None
else F.broadcast_mul(
samples,
scale.expand_dims(axis=1).expand_dims(axis=0)
if num_samples is not None
else scale.expand_dims(axis=1),
)
)
def sample_marginals(
self, num_samples: Optional[int] = None, scale: Optional[Tensor] = None
) -> Tensor:
r"""
Generates samples from the marginals p(z_t),
t = 1, \ldots, `seq_length`.
Parameters
----------
num_samples
Number of samples to generate
scale
Scale of each sequence in x, shape (batch_size, output_dim)
Returns
-------
Tensor
Samples, shape (num_samples, batch_size, seq_length, output_dim)
"""
F = self.F
state_mean = self.prior_mean.expand_dims(axis=-1)
state_cov = self.prior_cov
output_mean_seq = []
output_cov_seq = []
for t in range(self.seq_length):
# compute and store observation mean at time t
output_mean = F.linalg_gemm2(
self.emission_coeff[t], state_mean
) + self.residuals[t].expand_dims(axis=-1)
output_mean_seq.append(output_mean)
# compute and store observation cov at time t
output_cov = F.linalg_gemm2(
self.emission_coeff[t],
F.linalg_gemm2(
state_cov, self.emission_coeff[t], transpose_b=True
),
) + make_nd_diag(
F=F, x=self.noise_std[t] * self.noise_std[t], d=self.output_dim
)
output_cov_seq.append(output_cov.expand_dims(axis=1))
state_mean = F.linalg_gemm2(self.transition_coeff[t], state_mean)
state_cov = F.linalg_gemm2(
self.transition_coeff[t],
F.linalg_gemm2(
state_cov, self.transition_coeff[t], transpose_b=True
),
) + F.linalg_gemm2(
self.innovation_coeff[t],
self.innovation_coeff[t],
transpose_a=True,
)
output_mean = F.concat(*output_mean_seq, dim=1)
output_cov = F.concat(*output_cov_seq, dim=1)
L = F.linalg_potrf(output_cov)
output_distribution = MultivariateGaussian(output_mean, L)
samples = output_distribution.sample(num_samples=num_samples)
return (
samples
if scale is None
else F.broadcast_mul(samples, scale.expand_dims(axis=1))
)
class LDSArgsProj(mx.gluon.HybridBlock):
def __init__(
self,
output_dim: int,
noise_std_bounds: ParameterBounds,
innovation_bounds: ParameterBounds,
) -> None:
super().__init__()
self.output_dim = output_dim
self.dense_noise_std = mx.gluon.nn.Dense(
units=1, flatten=False, activation="sigmoid"
)
self.dense_innovation = mx.gluon.nn.Dense(
units=1, flatten=False, activation="sigmoid"
)
self.dense_residual = mx.gluon.nn.Dense(
units=output_dim, flatten=False
)
self.innovation_bounds = innovation_bounds
self.noise_std_bounds = noise_std_bounds
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, x: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
noise_std = (
self.dense_noise_std(x)
* (self.noise_std_bounds.upper - self.noise_std_bounds.lower)
+ self.noise_std_bounds.lower
)
innovation = (
self.dense_innovation(x)
* (self.innovation_bounds.upper - self.innovation_bounds.lower)
+ self.innovation_bounds.lower
)
residual = self.dense_residual(x)
return noise_std, innovation, residual
def kalman_filter_step(
F,
target: Tensor,
prior_mean: Tensor,
prior_cov: Tensor,
emission_coeff: Tensor,
residual: Tensor,
noise_std: Tensor,
latent_dim: int,
output_dim: int,
):
"""
One step of the Kalman filter.
This function computes the filtered state (mean and covariance) given the
linear system coefficients the prior state (mean and variance),
as well as observations.
Parameters
----------
F
target
Observations of the system output, shape (batch_size, output_dim)
prior_mean
Prior mean of the latent state, shape (batch_size, latent_dim)
prior_cov
Prior covariance of the latent state, shape
(batch_size, latent_dim, latent_dim)
emission_coeff
Emission coefficient, shape (batch_size, output_dim, latent_dim)
residual
Residual component, shape (batch_size, output_dim)
noise_std
Standard deviation of the output noise, shape (batch_size, output_dim)
latent_dim
Dimension of the latent state vector
Returns
-------
Tensor
Filtered_mean, shape (batch_size, latent_dim)
Tensor
Filtered_covariance, shape (batch_size, latent_dim, latent_dim)
Tensor
Log probability, shape (batch_size, )
"""
# output_mean: mean of the target (batch_size, obs_dim)
output_mean = F.linalg_gemm2(
emission_coeff, prior_mean.expand_dims(axis=-1)
).squeeze(axis=-1)
# noise covariance
noise_cov = make_nd_diag(F=F, x=noise_std * noise_std, d=output_dim)
S_hh_x_A_tr = F.linalg_gemm2(prior_cov, emission_coeff, transpose_b=True)
# covariance of the target
output_cov = F.linalg_gemm2(emission_coeff, S_hh_x_A_tr) + noise_cov
# compute the Cholesky decomposition output_cov = LL^T
L_output_cov = F.linalg_potrf(output_cov)
# Compute Kalman gain matrix K:
# K = S_hh X with X = A^T output_cov^{-1}
# We have X = A^T output_cov^{-1} => X output_cov = A^T => X LL^T = A^T
# We can thus obtain X by solving two linear systems involving L
kalman_gain = F.linalg_trsm(
L_output_cov,
F.linalg_trsm(
L_output_cov, S_hh_x_A_tr, rightside=True, transpose=True
),
rightside=True,
)
# compute the error
target_minus_residual = target - residual
delta = target_minus_residual - output_mean
# filtered estimates
filtered_mean = prior_mean.expand_dims(axis=-1) + F.linalg_gemm2(
kalman_gain, delta.expand_dims(axis=-1)
)
filtered_mean = filtered_mean.squeeze(axis=-1)
# Joseph's symmetrized update for covariance:
ImKA = F.broadcast_sub(
F.eye(latent_dim), F.linalg_gemm2(kalman_gain, emission_coeff)
)
filtered_cov = F.linalg_gemm2(
ImKA, F.linalg_gemm2(prior_cov, ImKA, transpose_b=True)
) + F.linalg_gemm2(
kalman_gain, F.linalg_gemm2(noise_cov, kalman_gain, transpose_b=True)
)
# likelihood term: (batch_size,)
log_p = MultivariateGaussian(output_mean, L_output_cov).log_prob(
target_minus_residual
)
return filtered_mean, filtered_cov, log_p
| 20,583 | 31.881789 | 109 | py |
rankpredictor | rankpredictor-master/sub/gluonts/distribution/distribution_output.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Callable, Dict, Optional, Tuple
# Third-party imports
import numpy as np
from mxnet import gluon
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.distribution.bijection import AffineTransformation
from gluonts.model.common import Tensor
# Relative imports
from .distribution import Distribution
from .transformed_distribution import TransformedDistribution
class ArgProj(gluon.HybridBlock):
r"""
A block that can be used to project from a dense layer to distribution
arguments.
Parameters
----------
dim_args
Dictionary with string key and int value
dimension of each arguments that will be passed to the domain
map, the names are used as parameters prefix.
domain_map
Function returning a tuple containing one tensor
a function or a HybridBlock. This will be called with num_args
arguments and should return a tuple of outputs that will be
used when calling the distribution constructor.
"""
def __init__(
self,
args_dim: Dict[str, int],
domain_map: Callable[..., Tuple[Tensor]],
dtype: DType = np.float32,
prefix: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.args_dim = args_dim
self.dtype = dtype
self.proj = [
gluon.nn.Dense(
dim,
flatten=False,
dtype=self.dtype,
prefix=f"{prefix}_distr_{name}_",
)
for name, dim in args_dim.items()
]
for dense in self.proj:
self.register_child(dense)
self.domain_map = domain_map
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, x: Tensor) -> Tuple[Tensor]:
params_unbounded = [proj(x) for proj in self.proj]
return self.domain_map(*params_unbounded)
class Output:
r"""
Class to connect a network to some output
"""
args_dim: Dict[str, int]
_dtype: DType = np.float32
@property
def dtype(self):
return self._dtype
@dtype.setter
def dtype(self, dtype: DType):
self._dtype = dtype
def get_args_proj(self, prefix: Optional[str] = None) -> ArgProj:
return ArgProj(
args_dim=self.args_dim,
domain_map=gluon.nn.HybridLambda(self.domain_map),
prefix=prefix,
dtype=self.dtype,
)
def domain_map(self, F, *args: Tensor):
raise NotImplementedError()
class DistributionOutput(Output):
r"""
Class to construct a distribution given the output of a network.
"""
distr_cls: type
@validated()
def __init__(self) -> None:
pass
def distribution(
self, distr_args, scale: Optional[Tensor] = None
) -> Distribution:
r"""
Construct the associated distribution, given the collection of
constructor arguments and, optionally, a scale tensor.
Parameters
----------
distr_args
Constructor arguments for the underlying Distribution type.
scale
Optional tensor, of the same shape as the
batch_shape+event_shape of the resulting distribution.
"""
if scale is None:
return self.distr_cls(*distr_args)
else:
distr = self.distr_cls(*distr_args)
return TransformedDistribution(
distr, [AffineTransformation(scale=scale)]
)
@property
def event_shape(self) -> Tuple:
r"""
Shape of each individual event contemplated by the distributions
that this object constructs.
"""
raise NotImplementedError()
@property
def event_dim(self) -> int:
r"""
Number of event dimensions, i.e., length of the `event_shape` tuple,
of the distributions that this object constructs.
"""
return len(self.event_shape)
def domain_map(self, F, *args: Tensor):
r"""
Converts arguments to the right shape and domain. The domain depends
on the type of distribution, while the correct shape is obtained by
reshaping the trailing axis in such a way that the returned tensors
define a distribution of the right event_shape.
"""
raise NotImplementedError()
| 5,021 | 29.253012 | 76 | py |
rankpredictor | rankpredictor-master/sub/gluonts/distribution/transformed_distribution_output.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from collections import ChainMap
from typing import Optional, Tuple, List
# Third-party imports
import numpy as np
from mxnet import gluon
# First-party imports
from gluonts.distribution import Distribution
from gluonts.distribution.bijection import AffineTransformation
from gluonts.distribution.bijection_output import BijectionOutput
from gluonts.distribution.distribution_output import (
ArgProj,
DistributionOutput,
)
from gluonts.distribution.transformed_distribution import (
TransformedDistribution,
)
from gluonts.model.common import Tensor
from gluonts.core.component import validated
class TransformedDistributionOutput(DistributionOutput):
r"""
Class to connect a network to a distribution that is transformed
by a sequence of learnable bijections.
"""
@validated()
def __init__(
self,
base_distr_output: DistributionOutput,
transforms_output: List[BijectionOutput],
) -> None:
super().__init__()
self.base_distr_output = base_distr_output
self.transforms_output = transforms_output
self.base_distr_args_dim = base_distr_output.args_dim
self.transforms_args_dim = [
transform.args_dim for transform in transforms_output
]
def _fuse(t1: Tuple, t2: Tuple) -> Tuple:
if len(t1) > len(t2):
t1, t2 = t2, t1
# from here on len(t2) >= len(t1)
assert t2[-len(t1) :] == t1
return t2
self._event_shape: Tuple[int, ...] = ()
for to in self.transforms_output:
self._event_shape = _fuse(self._event_shape, to.event_shape)
def get_args_proj(self, prefix: Optional[str] = None) -> ArgProj:
return ArgProj(
args_dim=dict(
self.base_distr_args_dim,
**dict(ChainMap(*self.transforms_args_dim)),
),
domain_map=gluon.nn.HybridLambda(self.domain_map),
prefix=prefix,
)
def _split_args(self, args):
# Since hybrid_forward does not support dictionary,
# we have to separate the raw outputs of the network based on the indices
# and map them to the learnable parameters
num_distr_args = len(self.base_distr_args_dim)
distr_args = args[0:num_distr_args]
num_transforms_args = [
len(transform_dim_args)
for transform_dim_args in self.transforms_args_dim
]
# starting indices of arguments for each transformation
num_args_cumsum = np.cumsum([num_distr_args] + num_transforms_args)
# get the arguments for each of the transformations
transforms_args = list(
map(
lambda ixs: args[ixs[0] : ixs[1]],
zip(num_args_cumsum, num_args_cumsum[1:]),
)
)
return distr_args, transforms_args
def domain_map(self, F, *args: Tensor):
distr_args, transforms_args = self._split_args(args)
distr_params = self.base_distr_output.domain_map(F, *distr_args)
transforms_params = [
transform_output.domain_map(F, *transform_args)
for transform_output, transform_args in zip(
self.transforms_output, transforms_args
)
]
# flatten the nested tuple
return sum(tuple([distr_params] + transforms_params), ())
def distribution(
self, distr_args, scale: Optional[Tensor] = None, **kwargs
) -> Distribution:
distr_args, transforms_args = self._split_args(distr_args)
distr = self.base_distr_output.distr_cls(*distr_args)
transforms = [
transform_output.bij_cls(*bij_args)
for transform_output, bij_args in zip(
self.transforms_output, transforms_args
)
]
trans_distr = TransformedDistribution(distr, transforms)
# Apply scaling as well at the end if scale is not None!
if scale is None:
return trans_distr
else:
return TransformedDistribution(
trans_distr, [AffineTransformation(scale=scale)]
)
@property
def event_shape(self) -> Tuple:
return self._event_shape
| 4,855 | 33.197183 | 81 | py |
rankpredictor | rankpredictor-master/sub/gluonts/distribution/piecewise_linear.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional, Tuple, cast, List
# Third-party imports
import numpy as np
# First-party imports
from gluonts.core.component import validated
from gluonts.distribution.bijection import AffineTransformation, Bijection
from gluonts.distribution.distribution import Distribution, getF
from gluonts.distribution.distribution_output import DistributionOutput
from gluonts.distribution.transformed_distribution import (
TransformedDistribution,
)
from gluonts.model.common import Tensor
from gluonts.support import util
class PiecewiseLinear(Distribution):
r"""
Piecewise linear distribution.
This class represents the *quantile function* (i.e., the inverse CDF)
associated with the a distribution, as a continuous, non-decreasing,
piecewise linear function defined in the [0, 1] interval:
.. math::
q(x; \gamma, b, d) = \gamma + \sum_{l=0}^L b_l (x_l - d_l)_+
where the input :math:`x \in [0,1]` and the parameters are
- :math:`\gamma`: intercept at 0
- :math:`b`: differences of the slopes in consecutive pieces
- :math:`d`: knot positions
Parameters
----------
gamma
Tensor containing the intercepts at zero
slopes
Tensor containing the slopes of each linear piece.
All coefficients must be positive.
Shape: ``(*gamma.shape, num_pieces)``
knot_spacings
Tensor containing the spacings between knots in the splines.
All coefficients must be positive and sum to one on the last axis.
Shape: ``(*gamma.shape, num_pieces)``
F
"""
is_reparameterizable = False
@validated()
def __init__(
self, gamma: Tensor, slopes: Tensor, knot_spacings: Tensor, F=None
) -> None:
self.F = F if F else getF(gamma)
self.gamma = gamma
# Since most of the calculations are easily expressed in the original parameters, we transform the
# learned parameters back
self.b, self.knot_positions = PiecewiseLinear._to_orig_params(
self.F, slopes, knot_spacings
)
@staticmethod
def _to_orig_params(
F, slopes: Tensor, knot_spacings: Tensor
) -> Tuple[Tensor, Tensor]:
r"""
Convert the trainable parameters to the original parameters of the
splines, i.e., convert the slopes of each piece to the difference
between slopes of consecutive pieces and knot spacings to knot
positions.
Parameters
----------
F
slopes
Tensor of shape (*gamma.shape, num_pieces)
knot_spacings
Tensor of shape (*gamma.shape, num_pieces)
Returns
-------
Tensor
Tensor of shape (*gamma.shape, num_pieces)
Tensor
Tensor of shape (*gamma.shape, num_pieces)
"""
# b: the difference between slopes of consecutive pieces
# shape (..., num_pieces - 1)
b = F.slice_axis(slopes, axis=-1, begin=1, end=None) - F.slice_axis(
slopes, axis=-1, begin=0, end=-1
)
# Add slope of first piece to b: b_0 = m_0
m_0 = F.slice_axis(slopes, axis=-1, begin=0, end=1)
b = F.concat(m_0, b, dim=-1)
# The actual position of the knots is obtained by cumulative sum of
# the knot spacings. The first knot position is always 0 for quantile
# functions; cumsum will take care of that.
knot_positions = util.cumsum(F, knot_spacings, exclusive=True)
return b, knot_positions
def sample(
self, num_samples: Optional[int] = None, dtype=np.float32
) -> Tensor:
F = self.F
# if num_samples=None then u should have the same shape as gamma, i.e., (dim,)
# else u should be (num_samples, dim)
# Note: there is no need to extend the parameters to (num_samples, dim, ...)
# Thankfully samples returned by `uniform_like` have the expected datatype.
u = F.random.uniform_like(
data=(
self.gamma
if num_samples is None
else self.gamma.expand_dims(axis=0).repeat(
axis=0, repeats=num_samples
)
)
)
sample = self.quantile(u)
if num_samples is None:
sample = F.squeeze(sample, axis=0)
return sample
# overwrites the loss method of the Distribution class
def loss(self, x: Tensor) -> Tensor:
return self.crps(x)
def crps(self, x: Tensor) -> Tensor:
r"""
Compute CRPS in analytical form.
Parameters
----------
x
Observation to evaluate. Shape equals to gamma.shape.
Returns
-------
Tensor
Tensor containing the CRPS.
"""
F = self.F
gamma, b, knot_positions = self.gamma, self.b, self.knot_positions
a_tilde = self.cdf(x)
max_a_tilde_knots = F.broadcast_maximum(
a_tilde.expand_dims(axis=-1), knot_positions
)
knots_cubed = F.broadcast_power(self.knot_positions, F.ones(1) * 3.0)
coeff = (
(1.0 - knots_cubed) / 3.0
- knot_positions
- F.square(max_a_tilde_knots)
+ 2 * max_a_tilde_knots * knot_positions
)
crps = (
(2 * a_tilde - 1) * x
+ (1 - 2 * a_tilde) * gamma
+ F.sum(b * coeff, axis=-1, keepdims=False)
)
return crps
def cdf(self, x: Tensor) -> Tensor:
r"""
Computes the quantile level :math:`\alpha` such that
:math:`q(\alpha) = x`.
Parameters
----------
x
Tensor of shape gamma.shape
Returns
-------
Tensor
Tensor of shape gamma.shape
"""
F = self.F
gamma, b, knot_positions = self.gamma, self.b, self.knot_positions
quantiles_at_knots = self.quantile_internal(knot_positions, axis=-2)
# Mask to nullify the terms corresponding to knots larger than l_0, which is the largest knot
# (quantile level) such that the quantile at l_0, s(l_0) < x.
# (..., num_pieces)
mask = F.broadcast_lesser(quantiles_at_knots, x.expand_dims(axis=-1))
slope_l0 = F.sum(b * mask, axis=-1, keepdims=False)
# slope_l0 can be zero in which case a_tilde = 0.
# The following is to circumvent mxnet issue with "where" operator which returns nan even if the statement
# you are interested in does not result in nan (but the "else" statement evaluates to nan).
slope_l0_nz = F.where(
slope_l0 == F.zeros_like(slope_l0), F.ones_like(x), slope_l0
)
a_tilde = F.where(
slope_l0 == F.zeros_like(slope_l0),
F.zeros_like(x),
(
x
- gamma
+ F.sum(b * knot_positions * mask, axis=-1, keepdims=False)
)
/ slope_l0_nz,
)
return a_tilde
def quantile(self, level: Tensor) -> Tensor:
return self.quantile_internal(level, axis=0)
def quantile_internal(
self, x: Tensor, axis: Optional[int] = None
) -> Tensor:
r"""
Evaluates the quantile function at the quantile levels contained in `x`.
Parameters
----------
x
Tensor of shape ``*gamma.shape`` if axis=None, or containing an
additional axis on the specified position, otherwise.
axis
Index of the axis containing the different quantile levels which
are to be computed.
Returns
-------
Tensor
Quantiles tensor, of the same shape as x.
"""
F = self.F
# shapes of self
# self.gamma: (*batch_shape)
# self.knot_positions, self.b: (*batch_shape, num_pieces)
# axis=None - passed at inference when num_samples is None
# The shape of x is (*batch_shape).
# The shapes of the parameters should be:
# gamma: (*batch_shape), knot_positions, b: (*batch_shape, num_pieces)
# They match the self. counterparts so no reshaping is needed
# axis=0 - passed at inference when num_samples is not None
# The shape of x is (num_samples, *batch_shape).
# The shapes of the parameters should be:
# gamma: (num_samples, *batch_shape), knot_positions, b: (num_samples, *batch_shape, num_pieces),
# They do not match the self. counterparts and we need to expand the axis=0 to all of them.
# axis=-2 - passed at training when we evaluate quantiles at knot_positions in order to compute a_tilde
# The shape of x is shape(x) = shape(knot_positions) = (*batch_shape, num_pieces).
# The shape of the parameters shopuld be:
# gamma: (*batch_shape, 1), knot_positions: (*batch_shape, 1, num_pieces), b: (*batch_shape, 1, num_pieces)
# They do not match the self. counterparts and we need to expand axis=-1 for gamma and axis=-2 for the rest.
if axis is not None:
gamma = self.gamma.expand_dims(axis=axis if axis == 0 else -1)
knot_positions = self.knot_positions.expand_dims(axis=axis)
b = self.b.expand_dims(axis=axis)
else:
gamma, knot_positions, b = self.gamma, self.knot_positions, self.b
x_minus_knots = F.broadcast_minus(
x.expand_dims(axis=-1), knot_positions
)
quantile = F.broadcast_add(
gamma, F.sum(F.broadcast_mul(b, F.relu(x_minus_knots)), axis=-1)
)
return quantile
@property
def batch_shape(self) -> Tuple:
return self.gamma.shape
@property
def event_shape(self) -> Tuple:
return ()
@property
def event_dim(self) -> int:
return 0
class PiecewiseLinearOutput(DistributionOutput):
distr_cls: type = PiecewiseLinear
@validated()
def __init__(self, num_pieces: int) -> None:
assert (
isinstance(num_pieces, int) and num_pieces > 1
), "num_pieces should be an integer larger than 1"
self.num_pieces = num_pieces
self.args_dim = cast(
Dict[str, int],
{"gamma": 1, "slopes": num_pieces, "knot_spacings": num_pieces},
)
@classmethod
def domain_map(cls, F, gamma, slopes, knot_spacings):
# slopes of the pieces are non-negative
thresh = F.zeros_like(slopes) + 20.0
I_grt_thresh = F.broadcast_greater_equal(slopes, thresh)
I_sml_thresh = F.broadcast_lesser(slopes, thresh)
slopes_proj = (
I_sml_thresh
* F.Activation(data=(I_sml_thresh * slopes), act_type="softrelu")
+ I_grt_thresh * slopes
)
# slopes = F.Activation(slopes, 'softrelu')
# the spacing between the knots should be in [0, 1] and sum to 1
knot_spacings_proj = F.softmax(knot_spacings)
return gamma.squeeze(axis=-1), slopes_proj, knot_spacings_proj
def distribution(
self, distr_args, scale: Optional[Tensor] = None, **kwargs
) -> PiecewiseLinear:
if scale is None:
return self.distr_cls(*distr_args)
else:
distr = self.distr_cls(*distr_args)
return TransformedPiecewiseLinear(
distr, [AffineTransformation(scale=scale)]
)
@property
def event_shape(self) -> Tuple:
return ()
# Need to inherit from PiecewiseLinear to get the overwritten loss method.
class TransformedPiecewiseLinear(TransformedDistribution, PiecewiseLinear):
@validated()
def __init__(
self, base_distribution: PiecewiseLinear, transforms: List[Bijection]
) -> None:
super().__init__(base_distribution, transforms)
def crps(self, y: Tensor) -> Tensor:
# TODO: use event_shape
F = getF(y)
for t in self.transforms[::-1]:
assert isinstance(
t, AffineTransformation
), "Not an AffineTransformation"
assert (
t.scale is not None and t.loc is None
), "Not a scaling transformation"
scale = t.scale
x = t.f_inv(y)
# (..., 1)
p = self.base_distribution.crps(x)
return F.broadcast_mul(p, scale)
| 12,984 | 31.873418 | 116 | py |
rankpredictor | rankpredictor-master/sub/gluonts/support/linalg_util.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Optional
# Third-party imports
import mxnet as mx
import numpy as np
# First-party imports
from gluonts.core.component import DType
from gluonts.model.common import Tensor
def batch_diagonal(
F,
matrix: Tensor,
num_data_points: Optional[int] = None,
ctx=mx.cpu(),
float_type=np.float32,
) -> Tensor:
"""
This function extracts the diagonal of a batch matrix.
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
matrix
matrix of shape (batch_size, num_data_points, num_data_points).
num_data_points
Number of rows in the kernel_matrix.
Returns
-------
Tensor
Diagonals of kernel_matrix of shape (batch_size, num_data_points, 1).
"""
return F.linalg.gemm2(
F.broadcast_mul(
F.eye(num_data_points, ctx=ctx, dtype=float_type), matrix
),
F.ones_like(F.slice_axis(matrix, axis=2, begin=0, end=1)),
)
def lower_triangular_ones(F, d: int, offset: int = 0) -> Tensor:
"""
Constructs a lower triangular matrix consisting of ones.
Parameters
----------
F
d
Dimension of the output tensor, whose shape will be (d, d).
offset
Indicates how many diagonals to set to zero in the lower triangular
part. By default, offset = 0, so the output matrix contains also the
main diagonal. For example, if offset = 1 then the output will be a
strictly lower triangular matrix (i.e. the main diagonal will be zero).
Returns
-------
Tensor
Tensor of shape (d, d) consisting of ones in the strictly lower
triangular part, and zeros elsewhere.
"""
mask = F.zeros_like(F.eye(d))
for k in range(offset, d):
mask = mask + F.eye(d, d, -k)
return mask
# noinspection PyMethodOverriding,PyPep8Naming
def jitter_cholesky_eig(
F,
matrix: Tensor,
num_data_points: Optional[int] = None,
ctx: mx.Context = mx.Context("cpu"),
float_type: DType = np.float64,
diag_weight: float = 1e-6,
) -> Tensor:
"""
This function applies the jitter method using the eigenvalue decomposition.
The eigenvalues are bound below by the jitter, which is proportional to the mean of the
diagonal elements
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
matrix
Matrix of shape (batch_size, num_data_points, num_data_points).
num_data_points
Number of rows in the kernel_matrix.
ctx
Determines whether to compute on the cpu or gpu.
float_type
Determines whether to use single or double precision.
Returns
-------
Tensor
Returns the approximate lower triangular Cholesky factor `L`
of shape (batch_size, num_data_points, num_data_points)
"""
diag = batch_diagonal(
F, matrix, num_data_points, ctx, float_type
) # shape (batch_size, num_data_points, 1)
diag_mean = diag.mean(axis=1).expand_dims(
axis=2
) # shape (batch_size, 1, 1)
U, Lambda = F.linalg.syevd(matrix)
jitter = F.broadcast_mul(diag_mean, F.ones_like(diag)) * diag_weight
# `K = U^TLambdaU`, where the rows of `U` are the eigenvectors of `K`.
# The eigendecomposition :math:`U^TLambdaU` is used instead of :math: ULambdaU^T, sine
# to utilize row-based computation (see Section 4, Seeger et al., 2018)
return F.linalg.potrf(
F.linalg.gemm2(
U,
F.linalg.gemm2(
F.broadcast_mul(
F.eye(num_data_points, ctx=ctx, dtype=float_type),
F.maximum(jitter, Lambda.expand_dims(axis=2)),
),
U,
),
transpose_a=True,
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def jitter_cholesky(
F,
matrix: Tensor,
num_data_points: Optional[int] = None,
ctx: mx.Context = mx.Context("cpu"),
float_type: DType = np.float64,
max_iter_jitter: int = 10,
neg_tol: float = -1e-8,
diag_weight: float = 1e-6,
increase_jitter: int = 10,
) -> Optional[Tensor]:
"""
This function applies the jitter method. It iteratively tries to compute the Cholesky decomposition and
adds a positive tolerance to the diagonal that increases at each iteration until the matrix is positive definite
or the maximum number of iterations has been reached.
Parameters
----------
matrix
Kernel matrix of shape (batch_size, num_data_points, num_data_points).
num_data_points
Number of rows in the kernel_matrix.
ctx
Determines whether to compute on the cpu or gpu.
float_type
Determines whether to use single or double precision.
max_iter_jitter
Maximum number of iterations for jitter to iteratively make the matrix positive definite.
neg_tol
Parameter in the jitter methods to eliminate eliminate matrices with diagonal elements smaller than this
when checking if a matrix is positive definite.
diag_weight
Multiple of mean of diagonal entries to initialize the jitter.
increase_jitter
Each iteration multiply by jitter by this amount
Returns
-------
Optional[Tensor]
The method either fails to make the matrix positive definite within the maximum number of iterations
and outputs an error or succeeds and returns the lower triangular Cholesky factor `L`
of shape (batch_size, num_data_points, num_data_points)
"""
num_iter = 0
diag = batch_diagonal(
F, matrix, num_data_points, ctx, float_type
) # shape (batch_size, num_data_points, 1)
diag_mean = diag.mean(axis=1).expand_dims(
axis=2
) # shape (batch_size, 1, 1)
jitter = F.zeros_like(diag) # shape (batch_size, num_data_points, 1)
# Ensure that diagonal entries are numerically non-negative, as defined by neg_tol
# TODO: Add support for symbolic case: Cannot use < operator with symbolic variables
if F.sum(diag <= neg_tol) > 0:
raise mx.base.MXNetError(
" Matrix is not positive definite: negative diagonal elements"
)
while num_iter <= max_iter_jitter:
try:
L = F.linalg.potrf(
F.broadcast_add(
matrix,
F.broadcast_mul(
F.eye(num_data_points, ctx=ctx, dtype=float_type),
jitter,
),
)
)
# gpu will not throw error but will store nans. If nan, L.sum() = nan and
# L.nansum() computes the sum treating nans as zeros so the error tolerance can be large.
# for axis = Null, nansum() and sum() will sum over all elements and return scalar array with shape (1,)
# TODO: Add support for symbolic case: Cannot use <= operator with symbolic variables
assert F.abs(L.nansum() - L.sum()) <= 1e-1
return L
except:
if num_iter == 0:
# Initialize the jitter: constant jitter per each batch
jitter = (
F.broadcast_mul(diag_mean, F.ones_like(jitter))
* diag_weight
)
else:
jitter = jitter * increase_jitter
finally:
num_iter += 1
raise mx.base.MXNetError(
f" Matrix is not positive definite after the maximum number of iterations = {max_iter_jitter} "
f"with a maximum jitter = {F.max(jitter)}"
)
| 8,278 | 33.932489 | 116 | py |
rankpredictor | rankpredictor-master/sub/gluonts/support/util.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import inspect
import os
import signal
import tempfile
import time
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, cast
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.core.serde import dump_json, load_json
from gluonts.model.common import Tensor
MXNET_HAS_ERF = hasattr(mx.nd, "erf")
MXNET_HAS_ERFINV = hasattr(mx.nd, "erfinv")
class Timer:
"""Context manager for measuring the time of enclosed code fragments."""
def __enter__(self):
self.start = time.clock()
self.interval = None
return self
def __exit__(self, *args):
self.end = time.clock()
self.interval = self.end - self.start
class SignalHandler:
"""
A context manager that attaches a set of signal handlers within its scope.
Parameters
----------
handlers_map
A dictionary mapping signal numbers to associated signal handlers to
be attached within the scope of the enclosing `SignalHandler` instance.
"""
Callback = Optional[Callable[[int, Any], None]]
def __init__(self, handlers_map: Dict[int, Callback]) -> None:
self.handlers_map = handlers_map
def __enter__(self):
self.default_handlers = {
s: signal.signal(s, h) for s, h in self.handlers_map.items()
}
return self
def __exit__(self, *args):
for s, h in self.default_handlers.items():
signal.signal(s, h)
class HybridContext:
"""
A context manager that ensures that an MXNet network is operating in a
hybridized / not hybridized mode.
Parameters
----------
net
The network whose hybrid mode has to be modified within the enclosing
context.
hybridize
A boolean flag inidicating whether the hybrid mode should be set or
not.
kwargs
A dictionary of optional arguments to pass to the `hybridize()` call
of the enclosed `HybridBlock` network.
"""
def __init__(
self,
net: mx.gluon.HybridBlock,
hybridize: bool,
data_batch: Optional[List[mx.nd.NDArray]] = None,
**kwargs,
) -> None:
self.net = net
self.required_mode = hybridize
self.original_mode = getattr(net, "_active", False)
self.data_batch = data_batch
self.kwargs = kwargs
def __enter__(self):
self.net.hybridize(active=self.required_mode, **self.kwargs)
if self.data_batch is not None:
self.net(*self.data_batch)
def __exit__(self, *args):
self.net.hybridize(active=self.original_mode, **self.kwargs)
def copy_parameters(
net_source: mx.gluon.Block,
net_dest: mx.gluon.Block,
ignore_extra: bool = False,
allow_missing: bool = False,
) -> None:
"""
Copies parameters from one network to another.
Parameters
----------
net_source
Input network.
net_dest
Output network.
ignore_extra
Whether to ignore parameters from the source that are not
present in the target.
allow_missing
Whether to allow additional parameters in the target not
present in the source.
"""
with tempfile.TemporaryDirectory(
prefix="gluonts-estimator-temp-"
) as model_dir:
model_dir_path = str(Path(model_dir) / "tmp_model")
net_source.save_parameters(model_dir_path)
net_dest.load_parameters(
model_dir_path,
ctx=mx.current_context(),
allow_missing=allow_missing,
ignore_extra=ignore_extra,
)
def get_hybrid_forward_input_names(hb: mx.gluon.HybridBlock):
params = inspect.signature(hb.hybrid_forward).parameters
param_names = list(params)
assert param_names[0] == "F", (
f"Expected first argument of HybridBlock to be `F`, "
f"but found `{param_names[0]}`"
)
return param_names[1:] # skip: F
def hybrid_block_to_symbol_block(
hb: mx.gluon.HybridBlock, data_batch: List[mx.nd.NDArray]
) -> mx.gluon.SymbolBlock:
"""
Converts a Gluon `HybridBlock` to a `SymbolBlock`. Following the Gluon API,
this is achieved by a `hybridize()` call on the passed `HybridBlock`, a
single forward pass (using the provided data batch), and a combination of
an `export()` and an `import()` calls of the input block.
Note that MXNet has `problems with this method
<https://github.com/apache/incubator-mxnet/issues/12783>`_.
Parameters
----------
hb
The Gluon `HybridBlock` to convert.
data_batch
Data to use for the forward pass after the `hybridize()` call.
Returns
-------
mx.gluon.SymbolBlock
The resulting Gluon block backed by an MXNet symbol graph.
"""
with tempfile.TemporaryDirectory(
prefix="gluonts-estimator-temp-"
) as model_dir:
num_inputs = len(data_batch)
model_dir_path = Path(model_dir)
model_name = "gluonts-model"
with HybridContext(
net=hb,
hybridize=True,
data_batch=data_batch,
static_alloc=True,
static_shape=True,
):
export_symb_block(hb, model_dir_path, model_name)
sb = import_symb_block(num_inputs, model_dir_path, model_name)
return sb
def export_symb_block(
hb: mx.gluon.HybridBlock, model_dir: Path, model_name: str, epoch: int = 0
) -> None:
"""
Serializes a hybridized Gluon `HybridBlock`.
Parameters
----------
hb
The block to export.
model_dir
The path where the model will be saved.
model_name
The name identifying the model.
epoch
The epoch number, which together with the `model_name` identifies the
model parameters.
"""
hb.export(path=str(model_dir / model_name), epoch=epoch)
def import_symb_block(
num_inputs: int, model_dir: Path, model_name: str, epoch: int = 0
) -> mx.gluon.SymbolBlock:
"""
Deserializes a hybridized Gluon `HybridBlock` as a `SymbolBlock`.
Parameters
----------
num_inputs
The number of inputs of the serialized block.
model_dir
The path where the model is saved.
model_name
The name identifying the model.
epoch
The epoch number, which together with the `model_name` identifies the
model parameters.
Returns
-------
mx.gluon.SymbolBlock
The deserialized block.
"""
if num_inputs == 1:
input_names = ["data"]
else:
input_names = [f"data{i}" for i in range(num_inputs)]
# FIXME: mx.gluon.SymbolBlock cannot infer float_type and uses default np.float32
# FIXME: https://github.com/apache/incubator-mxnet/issues/11849
return mx.gluon.SymbolBlock.imports(
symbol_file=str(model_dir / f"{model_name}-symbol.json"),
input_names=input_names,
param_file=str(model_dir / f"{model_name}-{epoch:04}.params"),
ctx=mx.current_context(),
)
def export_repr_block(
rb: mx.gluon.HybridBlock, model_dir: Path, model_name: str, epoch: int = 0
) -> None:
"""
Serializes a representable Gluon block.
Parameters
----------
rb
The block to export.
model_dir
The path where the model will be saved.
model_name
The name identifying the model.
epoch
The epoch number, which together with the `model_name` identifies the
model parameters.
"""
with (model_dir / f"{model_name}-network.json").open("w") as fp:
print(dump_json(rb), file=fp)
rb.save_parameters(str(model_dir / f"{model_name}-{epoch:04}.params"))
def import_repr_block(
model_dir: Path, model_name: str, epoch: int = 0
) -> mx.gluon.HybridBlock:
"""
Deserializes a representable Gluon block.
Parameters
----------
model_dir
The path where the model is saved.
model_name
The name identifying the model.
epoch
The epoch number, which together with the `model_name` identifies the
model parameters.
Returns
-------
mx.gluon.HybridBlock:
The deserialized block.
"""
with (model_dir / f"{model_name}-network.json").open("r") as fp:
rb = cast(mx.gluon.HybridBlock, load_json(fp.read()))
rb.load_parameters(
str(model_dir / f"{model_name}-{epoch:04}.params"),
ctx=mx.current_context(),
allow_missing=False,
ignore_extra=False,
)
return rb
def cumsum(
F, x: Tensor, exclusive: bool = False, reverse: bool = False
) -> Tensor:
r"""
Find cumulative sum on the last axis by multiplying with lower triangular
ones-matrix:
.. math::
\operatorname{cumsum}(x) =
\begin{cases}
\operatorname{ltr\_ones} \times x
& \text{for cumulative sum}\\
x \times \operatorname{ltr\_ones}
& \text{for cumulative sum in the reverse order}
\end{cases}
Also supports `exclusive` flag to start the cumsum with zero.
For example, if :math:`x = [a, b, c]`, we have
.. math::
\operatorname{cumsum}(x) =
\begin{cases}
[a, a + b, a + b + c]
& \text{if }\mathit{reverse = False, exclusive = False}\\
[0, a, a + b]
& \text{if }\mathit{reverse = False, exclusive = True}\\
[a + b + c, b + c, c]
& \text{if }\mathit{reverse = True, exclusive = False}\\
[b + c, c, 0]
& \text{if }\mathit{reverse = True, exclusive = True}\\
\end{cases}
Parameters
----------
F
The function space to use.
x
A tensor with shape :math:`(..., n)`.
exclusive
If `True`, the cumulative sum starts with zero.
reverse
If `True`, the cumulative sum is performed in the opposite direction.
Returns
-------
Tensor:
A modified tensor with identical shape and cumulative sums in the last
axis.
"""
# Create a new axis (for matrix multiplication) either at last location or
# last-but-one location (for reverse mode)
exp_dim = -2 if reverse else -1
# (..., 1, n) if reverse is True and (..., n, 1) otherwise
x = x.expand_dims(axis=exp_dim)
# Ones_matrix (..., n, n)
ones_matrix = F.linalg_gemm2(
F.ones_like(x),
F.ones_like(x),
transpose_a=reverse,
transpose_b=not reverse,
)
cumulative_sum = F.linalg_trmm(ones_matrix, x, rightside=reverse)
if exclusive:
cumulative_sum = cumulative_sum - x
return cumulative_sum.squeeze(axis=exp_dim)
def weighted_average(
F, x: Tensor, weights: Optional[Tensor] = None, axis=None
) -> Tensor:
"""
Computes the weighted average of a given tensor across a given axis.
Parameters
----------
F
The function space to use.
x
Input tensor, of which the average must be computed.
weights
Weights tensor, of the same shape as `x`.
axis
The axis along which to average `x`
Returns
-------
Tensor:
The tensor with values averaged along the specified `axis`.
"""
if weights is not None:
weighted_tensor = x * weights
sum_weights = F.maximum(1.0, weights.sum(axis=axis))
return weighted_tensor.sum(axis=axis) / sum_weights
else:
return x.mean(axis=axis)
def make_nd_diag(F, x: Tensor, d: int) -> Tensor:
"""
Make a diagonal tensor, given the diagonal
Parameters
----------
F
The function space to use.
x
Diagonal to use, shape :math:`(..., d)`.
d
Last dimension of `x`.
Returns
-------
Tensor
A tensor y of shape :math:`(..., d, d)` such that
:math:`y[..., i, i] = x[..., i]`.
"""
return F.broadcast_mul(F.eye(d), x.expand_dims(axis=-1))
def _broadcast_param(param, axes, sizes):
for axis, size in zip(axes, sizes):
param = param.expand_dims(axis=axis).broadcast_axes(
axis=axis, size=size
)
return param
def erf(F, x: Tensor):
if MXNET_HAS_ERF:
return F.erf(x)
# Using numerical recipes approximation for erf function
# accurate to 1E-7
ones = x.ones_like()
zeros = x.zeros_like()
t = ones / (ones + 0.5 * x.abs())
coefficients = [
1.00002368,
0.37409196,
0.09678418,
-0.18628806,
0.27886807,
-1.13520398,
1.48851587,
-0.82215223,
0.17087277,
]
inner = zeros
for c in coefficients[::-1]:
inner = t * (c + inner)
res = ones - t * (inner - 1.26551223 - x.square()).exp()
return F.where(F.broadcast_greater_equal(x, zeros), res, -1.0 * res)
def erfinv(F, x: Tensor) -> Tensor:
if MXNET_HAS_ERFINV:
return F.erfinv(x)
zeros = x.zeros_like()
w = -F.log(F.broadcast_mul((1.0 - x), (1.0 + x)))
mask_lesser = F.broadcast_lesser(w, zeros + 5.0)
w = F.where(mask_lesser, w - 2.5, F.sqrt(w) - 3.0)
coefficients_lesser = [
2.81022636e-08,
3.43273939e-07,
-3.5233877e-06,
-4.39150654e-06,
0.00021858087,
-0.00125372503,
-0.00417768164,
0.246640727,
1.50140941,
]
coefficients_greater_equal = [
-0.000200214257,
0.000100950558,
0.00134934322,
-0.00367342844,
0.00573950773,
-0.0076224613,
0.00943887047,
1.00167406,
2.83297682,
]
p = F.where(
mask_lesser,
coefficients_lesser[0] + zeros,
coefficients_greater_equal[0] + zeros,
)
for c_l, c_ge in zip(
coefficients_lesser[1:], coefficients_greater_equal[1:]
):
c = F.where(mask_lesser, c_l + zeros, c_ge + zeros)
p = c + F.broadcast_mul(p, w)
return F.broadcast_mul(p, x)
def get_download_path() -> Path:
"""
Returns
-------
Path
default path to download datasets or models of gluon-ts.
The path is either $MXNET_HOME if the environment variable is defined or
/home/username/.mxnet/gluon-ts/
"""
return Path(
os.environ.get("MXNET_HOME", str(Path.home() / ".mxnet" / "gluon-ts"))
)
def map_dct_values(fn: Callable, dct: dict) -> dict:
"""Maps `fn` over a dicts values."""
return {key: fn(value) for key, value in dct.items()}
| 15,017 | 26.255898 | 85 | py |
rankpredictor | rankpredictor-master/sub/gluonts/gp/gaussian_process.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
import numpy as np
# First-party imports
from gluonts.core.component import DType
from gluonts.distribution import MultivariateGaussian
from gluonts.distribution.distribution import getF
from gluonts.kernels import Kernel
from gluonts.model.common import Tensor
from gluonts.support.linalg_util import (
batch_diagonal,
jitter_cholesky,
jitter_cholesky_eig,
)
class GaussianProcess:
# noinspection PyMethodOverriding, PyPep8Naming
def __init__(
self,
sigma: Tensor,
kernel: Kernel,
prediction_length: Optional[int] = None,
context_length: Optional[int] = None,
num_samples: Optional[int] = None,
ctx: mx.Context = mx.Context("cpu"),
float_type: DType = np.float64,
jitter_method: str = "iter",
max_iter_jitter: int = 10,
neg_tol: float = -1e-8,
diag_weight: float = 1e-6,
increase_jitter: int = 10,
sample_noise: bool = True,
F=None,
) -> None:
r"""
Parameters
----------
sigma
Noise parameter of shape (batch_size, num_data_points, 1),
where num_data_points is the number of rows in the Cholesky matrix.
kernel
Kernel object.
prediction_length
Prediction length.
context_length
Training length.
num_samples
The number of samples to be drawn.
ctx
Determines whether to compute on the cpu or gpu.
float_type
Determines whether to use single or double precision.
jitter_method
Iteratively jitter method or use eigenvalue decomposition depending on problem size.
max_iter_jitter
Maximum number of iterations for jitter to iteratively make the matrix positive definite.
neg_tol
Parameter in the jitter methods to eliminate eliminate matrices with diagonal elements smaller than this
when checking if a matrix is positive definite.
diag_weight
Multiple of mean of diagonal entries to initialize the jitter.
increase_jitter
Each iteration multiply by jitter by this amount
sample_noise
Boolean to determine whether to add :math:`\sigma^2I` to the predictive covariance matrix.
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
"""
assert (
prediction_length is None or prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert (
num_samples is None or num_samples > 0
), "The value of `num_samples` should be > 0"
self.sigma = sigma
self.kernel = kernel
self.prediction_length = prediction_length
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.num_samples = num_samples
self.F = F if F else getF(sigma)
self.ctx = ctx
self.float_type = float_type
self.jitter_method = jitter_method
self.max_iter_jitter = max_iter_jitter
self.neg_tol = neg_tol
self.diag_weight = diag_weight
self.increase_jitter = increase_jitter
self.sample_noise = sample_noise
# noinspection PyMethodOverriding,PyPep8Naming
def _compute_cholesky_gp(
self,
kernel_matrix: Tensor,
num_data_points: Optional[int] = None,
noise: bool = True,
) -> Tensor:
r"""
Parameters
--------------------
kernel_matrix
Kernel matrix of shape (batch_size, num_data_points, num_data_points).
num_data_points
Number of rows in the kernel_matrix.
noise
Boolean to determine whether to add :math:`\sigma^2I` to the kernel matrix.
This is used in the predictive step if you would like to sample the predictive
covariance matrix without noise. It is set to True in every other case.
Returns
--------------------
Tensor
Cholesky factor :math:`L` of the kernel matrix with added noise :math:`LL^T = K + \sigma^2 I`
of shape (batch_size, num_data_points, num_data_points).
"""
if noise: # Add sigma
kernel_matrix = self.F.broadcast_plus(
kernel_matrix,
self.F.broadcast_mul(
self.sigma ** 2,
self.F.eye(
num_data_points, ctx=self.ctx, dtype=self.float_type
),
),
)
# Warning: This method is more expensive than the iterative jitter
# but it works for mx.sym
if self.jitter_method == "eig":
return jitter_cholesky_eig(
self.F,
kernel_matrix,
num_data_points,
self.ctx,
self.float_type,
self.diag_weight,
)
elif self.jitter_method == "iter" and self.F is mx.nd:
return jitter_cholesky(
self.F,
kernel_matrix,
num_data_points,
self.ctx,
self.float_type,
self.max_iter_jitter,
self.neg_tol,
self.diag_weight,
self.increase_jitter,
)
else:
return self.F.linalg.potrf(kernel_matrix)
def log_prob(self, x_train: Tensor, y_train: Tensor) -> Tensor:
r"""
This method computes the negative marginal log likelihood
.. math::
:nowrap:
\begin{aligned}
\frac{1}{2} [d \log(2\pi) + \log(|K|) + y^TK^{-1}y],
\end{aligned}
where :math:`d` is the number of data points.
This can be written in terms of the Cholesky factor :math:`L` as
.. math::
:nowrap:
\begin{aligned}
\log(|K|) = \log(|LL^T|) &= \log(|L||L|^T) = \log(|L|^2) = 2\log(|L|) \\
&= 2\log\big(\prod_i^n L_{ii}\big) = 2 \sum_i^N \log(L_{ii})
\end{aligned}
and
.. math::
:nowrap:
\begin{aligned}
y^TK^{-1}y = (y^TL^{-T})(L^{-1}y) = (L^{-1}y)^T(L^{-1}y) = ||L^{-1}y||_2^2.
\end{aligned}
Parameters
--------------------
x_train
Training set of features of shape (batch_size, context_length, num_features).
y_train
Training labels of shape (batch_size, context_length).
Returns
--------------------
Tensor
The negative log marginal likelihood of shape (batch_size,)
"""
assert (
self.context_length is not None
), "The value of `context_length` must be set."
return -MultivariateGaussian(
self.F.zeros_like(y_train), # 0 mean gaussian process prior
self._compute_cholesky_gp(
self.kernel.kernel_matrix(x_train, x_train),
self.context_length,
),
).log_prob(y_train)
def sample(self, mean: Tensor, covariance: Tensor) -> Tensor:
r"""
Parameters
----------
covariance
The covariance matrix of the GP of shape (batch_size, prediction_length, prediction_length).
mean
The mean vector of the GP of shape (batch_size, prediction_length).
Returns
-------
Tensor
Samples from a Gaussian Process of shape (batch_size, prediction_length, num_samples), where :math:`L`
is the matrix square root, Cholesky Factor of the covariance matrix with the added noise tolerance on the
diagonal, :math:`Lz`, where :math:`z \sim N(0,I)` and assumes the mean is zero.
"""
assert (
self.num_samples is not None
), "The value of `num_samples` must be set."
assert (
self.prediction_length is not None
), "The value of `prediction_length` must be set."
samples = MultivariateGaussian(
mean,
self._compute_cholesky_gp(
covariance, self.prediction_length, self.sample_noise
),
).sample_rep(
self.num_samples, dtype=self.float_type
) # Shape (num_samples, batch_size, prediction_length)
return self.F.transpose(samples, axes=(1, 2, 0))
# noinspection PyMethodOverriding,PyPep8Naming
def exact_inference(
self, x_train: Tensor, y_train: Tensor, x_test: Tensor
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Parameters
----------
x_train
Training set of features of shape (batch_size, context_length, num_features).
y_train
Training labels of shape (batch_size, context_length).
x_test
Test set of features of shape (batch_size, prediction_length, num_features).
Returns
-------
Tuple
Tensor
Predictive GP samples of shape (batch_size, prediction_length, num_samples).
Tensor
Predictive mean of the GP of shape (batch_size, prediction_length).
Tensor
Predictive standard deviation of the GP of shape (batch_size, prediction_length).
"""
assert (
self.context_length is not None
), "The value of `context_length` must be set."
assert (
self.prediction_length is not None
), "The value of `prediction_length` must be set."
# Compute Cholesky factorization of training kernel matrix
l_train = self._compute_cholesky_gp(
self.kernel.kernel_matrix(x_train, x_train), self.context_length
)
lower_tri_solve = self.F.linalg.trsm(
l_train, self.kernel.kernel_matrix(x_train, x_test)
)
predictive_mean = self.F.linalg.gemm2(
lower_tri_solve,
self.F.linalg.trsm(l_train, y_train.expand_dims(axis=-1)),
transpose_a=True,
).squeeze(axis=-1)
# Can rewrite second term as
# :math:`||L^-1 * K(x_train,x_test||_2^2`
# and only solve 1 equation
predictive_covariance = self.kernel.kernel_matrix(
x_test, x_test
) - self.F.linalg.gemm2(
lower_tri_solve, lower_tri_solve, transpose_a=True
)
# Extract diagonal entries of covariance matrix
predictive_std = batch_diagonal(
self.F,
predictive_covariance,
self.prediction_length,
self.ctx,
self.float_type,
)
# If self.sample_noise = True, predictive covariance has sigma^2 on the diagonal
if self.sample_noise:
predictive_std = self.F.broadcast_add(
predictive_std, self.sigma ** 2
)
predictive_std = self.F.sqrt(predictive_std).squeeze(axis=-1)
# Compute sample from GP predictive distribution
return (
self.sample(predictive_mean, predictive_covariance),
predictive_mean,
predictive_std,
)
@staticmethod
def plot(
ts_idx: int,
x_train: Optional[Tensor] = None,
y_train: Optional[Tensor] = None,
x_test: Optional[Tensor] = None,
mean: Optional[Tensor] = None,
std: Optional[Tensor] = None,
samples: Optional[Tensor] = None,
axis: Optional[List] = None,
) -> None:
"""
This method plots the sampled GP distribution at the test points in solid colors, as well as the predictive
mean as the dashed red line. Plus and minus 2 predictive standard deviations are shown in the grey region.
The training points are shown as the blue dots.
Parameters
----------
ts_idx
Time series index to plot
x_train
Training set of features of shape (batch_size, context_length, num_features).
y_train
Training labels of shape (batch_size, context_length).
x_test
Test set of features of shape (batch_size, prediction_length, num_features).
mean
Mean of the GP of shape (batch_size, prediction_length).
std
Standard deviation of the GP of shape (batch_size, prediction_length, 1).
samples
GP samples of shape (batch_size, prediction_length, num_samples).
axis
Plot axes limits
"""
# matplotlib==2.0.* gives errors in Brazil builds and has to be
# imported locally
import matplotlib.pyplot as plt
if x_train is not None:
x_train = x_train[ts_idx, :, :].asnumpy()
if y_train is not None:
y_train = y_train[ts_idx, :].asnumpy()
plt.plot(x_train, y_train, "bs", ms=8)
if x_test is not None:
x_test = x_test[ts_idx, :, :].asnumpy()
if samples is not None:
samples = samples[ts_idx, :, :].asnumpy()
plt.plot(x_test, samples)
if mean is not None:
mean = mean[ts_idx, :].asnumpy()
plt.plot(x_test, mean, "r--", lw=2)
if std is not None:
std = std[ts_idx, :].asnumpy()
plt.gca().fill_between(
x_test.flat,
mean - 2 * std,
mean + 2 * std,
color="#dddddd",
)
if axis is not None:
plt.axis(axis)
plt.title(f"Samples from GP for time series {ts_idx}")
plt.show()
| 14,687 | 36.279188 | 117 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/forecast-raw.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import re
from enum import Enum
from typing import Dict, List, NamedTuple, Optional, Set, Union, Callable
# Third-party imports
import mxnet as mx
import numpy as np
import pandas as pd
import pydantic
# First-party imports
from gluonts.core.exception import GluonTSUserError
from gluonts.distribution import Distribution
from gluonts.core.component import validated
class Quantile(NamedTuple):
value: float
name: str
@property
def loss_name(self):
return f"QuantileLoss[{self.name}]"
@property
def weighted_loss_name(self):
return f"wQuantileLoss[{self.name}]"
@property
def coverage_name(self):
return f"Coverage[{self.name}]"
@classmethod
def checked(cls, value: float, name: str) -> "Quantile":
if not 0 <= value <= 1:
raise GluonTSUserError(
f"quantile value should be in [0, 1] but found {value}"
)
return Quantile(value, name)
@classmethod
def from_float(cls, quantile: float) -> "Quantile":
assert isinstance(quantile, float)
return cls.checked(value=quantile, name=str(quantile))
@classmethod
def from_str(cls, quantile: str) -> "Quantile":
assert isinstance(quantile, str)
try:
return cls.checked(value=float(quantile), name=quantile)
except ValueError:
m = re.match(r"^p(\d{2})$", quantile)
if m is None:
raise GluonTSUserError(
"Quantile string should be of the form "
f'"p10", "p50", ... or "0.1", "0.5", ... but found {quantile}'
)
else:
quantile: float = int(m.group(1)) / 100
return cls(value=quantile, name=str(quantile))
@classmethod
def parse(cls, quantile: Union["Quantile", float, str]) -> "Quantile":
"""Produces equivalent float and string representation of a given
quantile level.
>>> Quantile.parse(0.1)
Quantile(value=0.1, name='0.1')
>>> Quantile.parse('0.2')
Quantile(value=0.2, name='0.2')
>>> Quantile.parse('0.20')
Quantile(value=0.2, name='0.20')
>>> Quantile.parse('p99')
Quantile(value=0.99, name='0.99')
Parameters
----------
quantile
Quantile, can be a float a str representing a float e.g. '0.1' or a
quantile string of the form 'p0.1'.
Returns
-------
Quantile
A tuple containing both a float and a string representation of the
input quantile level.
"""
if isinstance(quantile, Quantile):
return quantile
elif isinstance(quantile, float):
return cls.from_float(quantile)
else:
return cls.from_str(quantile)
class Forecast:
"""
A abstract class representing predictions.
"""
start_date: pd.Timestamp
freq: str
item_id: Optional[str]
info: Optional[Dict]
prediction_length: int
mean: np.ndarray
_index = None
def quantile(self, q: Union[float, str]) -> np.ndarray:
"""
Computes a quantile from the predicted distribution.
Parameters
----------
q
Quantile to compute.
Returns
-------
numpy.ndarray
Value of the quantile across the prediction range.
"""
raise NotImplementedError()
@property
def median(self) -> np.ndarray:
return self.quantile(0.5)
def plot(
self,
prediction_intervals=(50.0, 90.0),
show_mean=False,
color="b",
label=None,
output_file=None,
*args,
**kwargs,
):
"""
Plots the median of the forecast as well as confidence bounds.
(requires matplotlib and pandas).
Parameters
----------
prediction_intervals : float or list of floats in [0, 100]
Confidence interval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
show_mean : boolean
Whether to also show the mean of the forecast.
color : matplotlib color name or dictionary
The color used for plotting the forecast.
label : string
A label (prefix) that is used for the forecast
output_file : str or None, default None
Output path for the plot file. If None, plot is not saved to file.
args :
Other arguments are passed to main plot() call
kwargs :
Other keyword arguments are passed to main plot() call
"""
# matplotlib==2.0.* gives errors in Brazil builds and has to be
# imported locally
import matplotlib.pyplot as plt
label_prefix = "" if label is None else label + "-"
for c in prediction_intervals:
assert 0.0 <= c <= 100.0
ps = [50.0] + [
50.0 + f * c / 2.0
for c in prediction_intervals
for f in [-1.0, +1.0]
]
percentiles_sorted = sorted(set(ps))
def alpha_for_percentile(p):
return (p / 100.0) ** 0.3
ps_data = [self.quantile(p / 100.0) for p in percentiles_sorted]
i_p50 = len(percentiles_sorted) // 2
p50_data = ps_data[i_p50]
p50_series = pd.Series(data=p50_data, index=self.index)
p50_series.plot(color=color, ls="-", label=f"{label_prefix}median")
if show_mean:
mean_data = np.mean(self._sorted_samples, axis=0)
pd.Series(data=mean_data, index=self.index).plot(
color=color,
ls=":",
label=f"{label_prefix}mean",
*args,
**kwargs,
)
for i in range(len(percentiles_sorted) // 2):
ptile = percentiles_sorted[i]
alpha = alpha_for_percentile(ptile)
plt.fill_between(
self.index,
ps_data[i],
ps_data[-i - 1],
facecolor=color,
alpha=alpha,
interpolate=True,
*args,
**kwargs,
)
# Hack to create labels for the error intervals.
# Doesn't actually plot anything, because we only pass a single data point
pd.Series(data=p50_data[:1], index=self.index[:1]).plot(
color=color,
alpha=alpha,
linewidth=10,
label=f"{label_prefix}{100 - ptile * 2}%",
*args,
**kwargs,
)
if output_file:
plt.savefig(output_file)
@property
def index(self) -> pd.DatetimeIndex:
if self._index is None:
self._index = pd.date_range(
self.start_date, periods=self.prediction_length, freq=self.freq
)
return self._index
def dim(self) -> int:
"""
Returns the dimensionality of the forecast object.
"""
raise NotImplementedError()
def copy_dim(self, dim: int):
"""
Returns a new Forecast object with only the selected sub-dimension.
Parameters
----------
dim
The returned forecast object will only represent this dimension.
"""
raise NotImplementedError()
def copy_aggregate(self, agg_fun: Callable):
"""
Returns a new Forecast object with a time series aggregated over the
dimension axis.
Parameters
----------
agg_fun
Aggregation function that defines the aggregation operation
(typically mean or sum).
"""
raise NotImplementedError()
def as_json_dict(self, config: "Config") -> dict:
result = {}
if OutputType.mean in config.output_types:
result["mean"] = self.mean.tolist()
if OutputType.quantiles in config.output_types:
quantiles = map(Quantile.parse, config.quantiles)
result["quantiles"] = {
quantile.name: self.quantile(quantile.value).tolist()
for quantile in quantiles
}
if OutputType.samples in config.output_types:
result["samples"] = []
return result
class SampleForecast(Forecast):
"""
A `Forecast` object, where the predicted distribution is represented
internally as samples.
Parameters
----------
samples
Array of size (num_samples, prediction_length) (1D case) or
(num_samples, prediction_length, target_dim) (multivariate case)
start_date
start of the forecast
freq
forecast frequency
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
@validated()
def __init__(
self,
samples: Union[mx.nd.NDArray, np.ndarray],
start_date,
freq,
item_id: Optional[str] = None,
info: Optional[Dict] = None,
):
assert isinstance(
samples, (np.ndarray, mx.ndarray.ndarray.NDArray)
), "samples should be either a numpy or an mxnet array"
assert (
len(np.shape(samples)) == 2 or len(np.shape(samples)) == 3
), "samples should be a 2-dimensional or 3-dimensional array. Dimensions found: {}".format(
len(np.shape(samples))
)
self.samples = (
samples if (isinstance(samples, np.ndarray)) else samples.asnumpy()
)
self._sorted_samples_value = None
self._mean = None
self._dim = None
self.item_id = item_id
self.info = info
assert isinstance(
start_date, pd.Timestamp
), "start_date should be a pandas Timestamp object"
self.start_date = start_date
assert isinstance(freq, str), "freq should be a string"
self.freq = freq
@property
def _sorted_samples(self):
if self._sorted_samples_value is None:
self._sorted_samples_value = np.sort(self.samples, axis=0)
return self._sorted_samples_value
@property
def num_samples(self):
"""
The number of samples representing the forecast.
"""
return self.samples.shape[0]
@property
def prediction_length(self):
"""
Time length of the forecast.
"""
return self.samples.shape[-1]
@property
def mean(self):
"""
Forecast mean.
"""
if self._mean is not None:
return self._mean
else:
return np.mean(self.samples, axis=0)
@property
def mean_ts(self):
"""
Forecast mean, as a pandas.Series object.
"""
return pd.Series(self.index, self.mean)
def quantile(self, q):
q = Quantile.parse(q).value
sample_idx = int(np.round((self.num_samples - 1) * q))
return self._sorted_samples[sample_idx, :]
def copy_dim(self, dim: int):
if len(self.samples.shape) == 2:
samples = self.samples
else:
target_dim = self.samples.shape[2]
assert dim < target_dim, (
f"must set 0 <= dim < target_dim, but got dim={dim},"
f" target_dim={target_dim}"
)
samples = self.samples[:, :, dim]
return SampleForecast(
samples=samples,
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
def copy_aggregate(self, agg_fun: Callable):
if len(self.samples.shape) == 2:
samples = self.samples
else:
# Aggregate over target dimension axis
samples = agg_fun(self.samples, axis=2)
return SampleForecast(
samples=samples,
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
def dim(self) -> int:
if self._dim is not None:
return self._dim
else:
if len(self.samples.shape) == 2:
# univariate target
# shape: (num_samples, prediction_length)
return 1
else:
# multivariate target
# shape: (num_samples, prediction_length, target_dim)
return self.samples.shape[2]
def as_json_dict(self, config: "Config") -> dict:
result = super().as_json_dict(config)
if OutputType.samples in config.output_types:
result["samples"] = self.samples.tolist()
return result
def __repr__(self):
return ", ".join(
[
f"SampleForecast({self.samples!r})",
f"{self.start_date!r}",
f"{self.freq!r}",
f"item_id={self.item_id!r}",
f"info={self.info!r})",
]
)
class QuantileForecast(Forecast):
"""
A Forecast that contains arrays (i.e. time series) for quantiles and mean
Parameters
----------
forecast_arrays
An array of forecasts
start_date
start of the forecast
freq
forecast frequency
forecast_keys
A list of quantiles of the form '0.1', '0.9', etc.,
and potentially 'mean'. Each entry corresponds to one array in
forecast_arrays.
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
def __init__(
self,
forecast_arrays: np.ndarray,
start_date: pd.Timestamp,
freq: str,
forecast_keys: List[str],
item_id: Optional[str] = None,
info: Optional[Dict] = None,
):
self.forecast_array = forecast_arrays
self.start_date = pd.Timestamp(start_date, freq=freq)
self.freq = freq
# normalize keys
self.forecast_keys = [
Quantile.from_str(key).name if key != "mean" else key
for key in forecast_keys
]
self.item_id = item_id
self.info = info
self._dim = None
shape = self.forecast_array.shape
assert shape[0] == len(self.forecast_keys), (
f"The forecast_array (shape={shape} should have the same "
f"length as the forecast_keys (len={len(self.forecast_keys)})."
)
self.prediction_length = shape[-1]
self._forecast_dict = {
k: self.forecast_array[i] for i, k in enumerate(self.forecast_keys)
}
self._nan_out = np.array([np.nan] * self.prediction_length)
def quantile(self, q: Union[float, str]) -> np.ndarray:
q_str = Quantile.parse(q).name
# We return nan here such that evaluation runs through
return self._forecast_dict.get(q_str, self._nan_out)
@property
def mean(self):
"""
Forecast mean.
"""
return self._forecast_dict.get("mean", self._nan_out)
def dim(self) -> int:
if self._dim is not None:
return self._dim
else:
if (
len(self.forecast_array.shape) == 2
): # 1D target. shape: (num_samples, prediction_length)
return 1
else:
return self.forecast_array.shape[
1
] # 2D target. shape: (num_samples, target_dim, prediction_length)
def __repr__(self):
return ", ".join(
[
f"QuantileForecast({self.forecast_array!r})",
f"start_date={self.start_date!r}",
f"freq={self.freq!r}",
f"forecast_keys={self.forecast_keys!r}",
f"item_id={self.item_id!r}",
f"info={self.info!r})",
]
)
class DistributionForecast(Forecast):
"""
A `Forecast` object that uses a GluonTS distribution directly.
This can for instance be used to represent marginal probability
distributions for each time point -- although joint distributions are
also possible, e.g. when using MultiVariateGaussian).
Parameters
----------
distribution
Distribution object. This should represent the entire prediction
length, i.e., if we draw `num_samples` samples from the distribution,
the sample shape should be
samples = trans_dist.sample(num_samples)
samples.shape -> (num_samples, prediction_length)
start_date
start of the forecast
freq
forecast frequency
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
@validated()
def __init__(
self,
distribution: Distribution,
start_date,
freq,
item_id: Optional[str] = None,
info: Optional[Dict] = None,
):
self.distribution = distribution
self.shape = (
self.distribution.batch_shape + self.distribution.event_shape
)
self.prediction_length = self.shape[0]
self.item_id = item_id
self.info = info
assert isinstance(
start_date, pd.Timestamp
), "start_date should be a pandas Timestamp object"
self.start_date = start_date
assert isinstance(freq, str), "freq should be a string"
self.freq = freq
self._mean = None
@property
def mean(self):
"""
Forecast mean.
"""
if self._mean is not None:
return self._mean
else:
self._mean = self.distribution.mean.asnumpy()
return self._mean
@property
def mean_ts(self):
"""
Forecast mean, as a pandas.Series object.
"""
return pd.Series(self.index, self.mean)
def quantile(self, level):
level = Quantile.parse(level).value
q = self.distribution.quantile(mx.nd.array([level])).asnumpy()[0]
return q
def to_sample_forecast(self, num_samples: int = 200) -> SampleForecast:
return SampleForecast(
samples=self.distribution.sample(num_samples),
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
class OutputType(str, Enum):
mean = "mean"
samples = "samples"
quantiles = "quantiles"
class Config(pydantic.BaseModel):
num_samples: int = pydantic.Field(100, alias="num_eval_samples")
output_types: Set[OutputType] = {"quantiles", "mean"}
# FIXME: validate list elements
quantiles: List[str] = ["0.1", "0.5", "0.9"]
class Config:
allow_population_by_field_name = True
# store additional fields
extra = "allow"
| 19,665 | 28.933029 | 99 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/predictor.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import functools
import itertools
import logging
import multiprocessing as mp
import sys
import traceback
from pathlib import Path
from pydoc import locate
from tempfile import TemporaryDirectory
import json
from typing import (
TYPE_CHECKING,
Tuple,
Union,
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Type,
)
# Third-party imports
import mxnet as mx
import numpy as np
# First-party imports
import gluonts
from gluonts.distribution import Distribution, DistributionOutput
from gluonts.core.component import (
DType,
equals,
from_hyperparameters,
get_mxnet_context,
validated,
)
from gluonts.core.exception import GluonTSException
from gluonts.core.serde import dump_json, fqname_for, load_json
from gluonts.dataset.common import DataEntry, Dataset, ListDataset
from .forecast_generator import ForecastGenerator, SampleForecastGenerator
from gluonts.dataset.loader import DataBatch, InferenceDataLoader
from gluonts.model.forecast import Forecast
from gluonts.support.util import (
export_repr_block,
export_symb_block,
get_hybrid_forward_input_names,
hybrid_block_to_symbol_block,
import_repr_block,
import_symb_block,
)
from gluonts.transform import Transformation
if TYPE_CHECKING: # avoid circular import
from gluonts.model.estimator import Estimator # noqa
OutputTransform = Callable[[DataEntry, np.ndarray], np.ndarray]
class Predictor:
"""
Abstract class representing predictor objects.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
__version__: str = gluonts.__version__
def __init__(self, prediction_length: int, freq: str) -> None:
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
self.prediction_length = prediction_length
self.freq = freq
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
"""
Compute forecasts for the time series in the provided dataset.
This method is not implemented in this abstract class; please
use one of the subclasses.
Parameters
----------
dataset
The dataset containing the time series to predict.
Returns
-------
Iterator[Forecast]
Iterator over the forecasts, in the same order as the dataset
iterable was provided.
"""
raise NotImplementedError
def serialize(self, path: Path) -> None:
# serialize Predictor type
with (path / "type.txt").open("w") as fp:
fp.write(fqname_for(self.__class__))
with (path / "version.json").open("w") as fp:
json.dump(
{"model": self.__version__, "gluonts": gluonts.__version__}, fp
)
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "Predictor":
"""
Load a serialized predictor from the given path
Parameters
----------
path
Path to the serialized files predictor.
ctx
Optional mxnet context to be used with the predictor.
If nothing is passed will use the GPU if available and CPU otherwise.
"""
# deserialize Predictor type
with (path / "type.txt").open("r") as fp:
tpe = locate(fp.readline())
# ensure that predictor_cls is a subtype of Predictor
if not issubclass(tpe, Predictor):
raise IOError(
f"Class {fqname_for(tpe)} is not "
f"a subclass of {fqname_for(Predictor)}"
)
# call deserialize() for the concrete Predictor type
return tpe.deserialize(path, ctx)
@classmethod
def from_hyperparameters(cls, **hyperparameters):
return from_hyperparameters(cls, **hyperparameters)
class RepresentablePredictor(Predictor):
"""
An abstract predictor that can be subclassed by models that are not based
on Gluon. Subclasses should have @validated() constructors.
(De)serialization and value equality are all implemented on top of the
@validated() logic.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
@validated()
def __init__(self, prediction_length: int, freq: str) -> None:
super().__init__(prediction_length, freq)
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
for item in dataset:
yield self.predict_item(item)
def predict_item(self, item: DataEntry) -> Forecast:
raise NotImplementedError
def __eq__(self, that):
"""
Two RepresentablePredictor instances are considered equal if they
have the same constructor arguments.
"""
return equals(self, that)
def serialize(self, path: Path) -> None:
# call Predictor.serialize() in order to serialize the class name
super().serialize(path)
with (path / "predictor.json").open("w") as fp:
print(dump_json(self), file=fp)
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "RepresentablePredictor":
with (path / "predictor.json").open("r") as fp:
return load_json(fp.read())
class GluonPredictor(Predictor):
"""
Base predictor type for Gluon-based models.
Parameters
----------
input_names
Input tensor names for the graph
prediction_net
Network that will be called for prediction
batch_size
Number of time series to predict in a single batch
prediction_length
Number of time steps to predict
freq
Frequency of the input data
input_transform
Input transformation pipeline
output_transform
Output transformation
ctx
MXNet context to use for computation
forecast_generator
Class to generate forecasts from network ouputs
"""
BlockType = mx.gluon.Block
def __init__(
self,
input_names: List[str],
prediction_net: BlockType,
batch_size: int,
prediction_length: int,
freq: str,
ctx: mx.Context,
input_transform: Transformation,
forecast_generator: ForecastGenerator = SampleForecastGenerator(),
output_transform: Optional[OutputTransform] = None,
dtype: DType = np.float32,
) -> None:
super().__init__(prediction_length, freq)
self.input_names = input_names
self.prediction_net = prediction_net
self.batch_size = batch_size
self.input_transform = input_transform
self.forecast_generator = forecast_generator
self.output_transform = output_transform
self.ctx = ctx
self.dtype = dtype
def hybridize(self, batch: DataBatch) -> None:
"""
Hybridizes the underlying prediction network.
Parameters
----------
batch
A batch of data to use for the required forward pass after the
`hybridize()` call.
"""
self.prediction_net.hybridize(active=True)
self.prediction_net(*[batch[k] for k in self.input_names])
def as_symbol_block_predictor(
self, batch: DataBatch
) -> "SymbolBlockPredictor":
"""
Returns a variant of the current :class:`GluonPredictor` backed
by a Gluon `SymbolBlock`. If the current predictor is already a
:class:`SymbolBlockPredictor`, it just returns itself.
Parameters
----------
batch
A batch of data to use for the required forward pass after the
`hybridize()` call of the underlying network.
Returns
-------
SymbolBlockPredictor
A predictor derived from the current one backed by a `SymbolBlock`.
"""
raise NotImplementedError
def predict(
self, dataset: Dataset, num_samples: Optional[int] = None
) -> Iterator[Forecast]:
#print('predict')
inference_data_loader = InferenceDataLoader(
dataset,
self.input_transform,
self.batch_size,
ctx=self.ctx,
dtype=self.dtype,
)
yield from self.forecast_generator(
inference_data_loader=inference_data_loader,
prediction_net=self.prediction_net,
input_names=self.input_names,
freq=self.freq,
output_transform=self.output_transform,
num_samples=num_samples,
)
def __eq__(self, that):
if type(self) != type(that):
return False
# TODO: also consider equality of the pipelines
# if not equals(self.input_transform, that.input_transform):
# return False
return equals(
self.prediction_net.collect_params(),
that.prediction_net.collect_params(),
)
def serialize(self, path: Path) -> None:
# call Predictor.serialize() in order to serialize the class name
super().serialize(path)
# serialize every GluonPredictor-specific parameters
# serialize the prediction network
self.serialize_prediction_net(path)
# serialize transformation chain
with (path / "input_transform.json").open("w") as fp:
print(dump_json(self.input_transform), file=fp)
# FIXME: also needs to serialize the output_transform
# serialize all remaining constructor parameters
with (path / "parameters.json").open("w") as fp:
parameters = dict(
batch_size=self.batch_size,
prediction_length=self.prediction_length,
freq=self.freq,
ctx=self.ctx,
dtype=self.dtype,
forecast_generator=self.forecast_generator,
input_names=self.input_names,
)
print(dump_json(parameters), file=fp)
def serialize_prediction_net(self, path: Path) -> None:
raise NotImplementedError()
class SymbolBlockPredictor(GluonPredictor):
"""
A predictor which serializes the network structure as an MXNet symbolic
graph. Should be used for models deployed in production in order to
ensure forward-compatibility as GluonTS models evolve.
Used by the training shell if training is invoked with a hyperparameter
`use_symbol_block_predictor = True`.
"""
BlockType = mx.gluon.SymbolBlock
def as_symbol_block_predictor(
self, batch: DataBatch
) -> "SymbolBlockPredictor":
return self
def serialize_prediction_net(self, path: Path) -> None:
export_symb_block(self.prediction_net, path, "prediction_net")
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "SymbolBlockPredictor":
ctx = ctx if ctx is not None else get_mxnet_context()
with mx.Context(ctx):
# deserialize constructor parameters
with (path / "parameters.json").open("r") as fp:
parameters = load_json(fp.read())
parameters["ctx"] = ctx
# deserialize transformation chain
with (path / "input_transform.json").open("r") as fp:
transform = load_json(fp.read())
# deserialize prediction network
num_inputs = len(parameters["input_names"])
prediction_net = import_symb_block(
num_inputs, path, "prediction_net"
)
return SymbolBlockPredictor(
input_transform=transform,
prediction_net=prediction_net,
**parameters,
)
class RepresentableBlockPredictor(GluonPredictor):
"""
A predictor which serializes the network structure using the
JSON-serialization methods located in `gluonts.core.serde`. Use the following
logic to create a `RepresentableBlockPredictor` from a trained prediction
network.
>>> def create_representable_block_predictor(
... prediction_network: mx.gluon.HybridBlock,
... **kwargs
... ) -> RepresentableBlockPredictor:
... return RepresentableBlockPredictor(
... prediction_net=prediction_network,
... **kwargs
... )
"""
BlockType = mx.gluon.HybridBlock
def __init__(
self,
prediction_net: BlockType,
batch_size: int,
prediction_length: int,
freq: str,
ctx: mx.Context,
input_transform: Transformation,
forecast_generator: ForecastGenerator = SampleForecastGenerator(),
output_transform: Optional[
Callable[[DataEntry, np.ndarray], np.ndarray]
] = None,
dtype: DType = np.float32,
) -> None:
super().__init__(
input_names=get_hybrid_forward_input_names(prediction_net),
prediction_net=prediction_net,
batch_size=batch_size,
prediction_length=prediction_length,
freq=freq,
ctx=ctx,
input_transform=input_transform,
forecast_generator=forecast_generator,
output_transform=output_transform,
dtype=dtype,
)
def as_symbol_block_predictor(
self, batch: DataBatch
) -> SymbolBlockPredictor:
symbol_block_net = hybrid_block_to_symbol_block(
hb=self.prediction_net,
data_batch=[batch[k] for k in self.input_names],
)
return SymbolBlockPredictor(
input_names=self.input_names,
prediction_net=symbol_block_net,
batch_size=self.batch_size,
prediction_length=self.prediction_length,
freq=self.freq,
ctx=self.ctx,
input_transform=self.input_transform,
forecast_generator=self.forecast_generator,
output_transform=self.output_transform,
dtype=self.dtype,
)
def serialize(self, path: Path) -> None:
logging.warning(
"Serializing RepresentableBlockPredictor instances does not save "
"the prediction network structure in a backwards-compatible "
"manner. Be careful not to use this method in production."
)
super().serialize(path)
def serialize_prediction_net(self, path: Path) -> None:
export_repr_block(self.prediction_net, path, "prediction_net")
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "RepresentableBlockPredictor":
ctx = ctx if ctx is not None else get_mxnet_context()
with mx.Context(ctx):
# deserialize constructor parameters
with (path / "parameters.json").open("r") as fp:
parameters = load_json(fp.read())
# deserialize transformation chain
with (path / "input_transform.json").open("r") as fp:
transform = load_json(fp.read())
# deserialize prediction network
prediction_net = import_repr_block(path, "prediction_net")
# input_names is derived from the prediction_net
if "input_names" in parameters:
del parameters["input_names"]
parameters["ctx"] = ctx
return RepresentableBlockPredictor(
input_transform=transform,
prediction_net=prediction_net,
**parameters,
)
class WorkerError:
def __init__(self, msg):
self.msg = msg
def _worker_loop(
predictor_path: Path,
input_queue: mp.Queue,
output_queue: mp.Queue,
worker_id,
**kwargs,
):
"""
Worker loop for multiprocessing Predictor.
Loads the predictor serialized in predictor_path
reads inputs from input_queue and writes forecasts to output_queue
"""
predictor = Predictor.deserialize(predictor_path)
while True:
idx, data_chunk = input_queue.get()
if idx is None:
output_queue.put((None, None, None))
break
try:
result = list(predictor.predict(data_chunk, **kwargs))
except Exception:
we = WorkerError(
"".join(traceback.format_exception(*sys.exc_info()))
)
output_queue.put((we, None, None))
break
output_queue.put((idx, worker_id, result))
class ParallelizedPredictor(Predictor):
"""
Runs multiple instances (workers) of a predictor in parallel.
Exceptions are propagated from the workers.
Note: That there is currently an issue with tqdm that will cause things
to hang if the ParallelizedPredictor is used with tqdm and an exception
occurs during prediction.
https://github.com/tqdm/tqdm/issues/548
Parameters
----------
base_predictor
A representable predictor that will be used
num_workers
Number of workers (processes) to use. If set to
None, one worker per CPU will be used.
chunk_size
Number of items to pass per call
"""
def __init__(
self,
base_predictor: Predictor,
num_workers: Optional[int] = None,
chunk_size=1,
) -> None:
super().__init__(base_predictor.prediction_length, base_predictor.freq)
self._base_predictor = base_predictor
self._num_workers = (
num_workers if num_workers is not None else mp.cpu_count()
)
self._chunk_size = chunk_size
self._num_running_workers = 0
self._input_queues = []
self._output_queue = None
def _grouper(self, iterable, n):
iterator = iter(iterable)
group = tuple(itertools.islice(iterator, n))
while group:
yield group
group = tuple(itertools.islice(iterator, n))
def terminate(self):
for q in self._input_queues:
q.put((None, None))
for w in self._workers:
w.terminate()
for i, w in enumerate(self._workers):
w.join()
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
with TemporaryDirectory() as tempdir:
predictor_path = Path(tempdir)
self._base_predictor.serialize(predictor_path)
# TODO: Consider using shared memory for the data transfer.
self._input_queues = [mp.Queue() for _ in range(self._num_workers)]
self._output_queue = mp.Queue()
workers = []
for worker_id, in_q in enumerate(self._input_queues):
worker = mp.Process(
target=_worker_loop,
args=(predictor_path, in_q, self._output_queue, worker_id),
kwargs=kwargs,
)
worker.daemon = True
worker.start()
workers.append(worker)
self._num_running_workers += 1
self._workers = workers
chunked_data = self._grouper(dataset, self._chunk_size)
self._send_idx = 0
self._next_idx = 0
self._data_buffer = {}
worker_ids = list(range(self._num_workers))
def receive():
idx, worker_id, result = self._output_queue.get()
if isinstance(idx, WorkerError):
self._num_running_workers -= 1
self.terminate()
raise Exception(idx.msg)
if idx is not None:
self._data_buffer[idx] = result
return idx, worker_id, result
def get_next_from_buffer():
while self._next_idx in self._data_buffer:
result_batch = self._data_buffer.pop(self._next_idx)
self._next_idx += 1
for result in result_batch:
yield result
def send(worker_id, chunk):
q = self._input_queues[worker_id]
q.put((self._send_idx, chunk))
self._send_idx += 1
try:
# prime the queues
for wid in worker_ids:
chunk = next(chunked_data)
send(wid, chunk)
while True:
idx, wid, result = receive()
for res in get_next_from_buffer():
yield res
chunk = next(chunked_data)
send(wid, chunk)
except StopIteration:
# signal workers end of data
for q in self._input_queues:
q.put((None, None))
# collect any outstanding results
while self._num_running_workers > 0:
idx, worker_id, result = receive()
if idx is None:
self._num_running_workers -= 1
continue
for res in get_next_from_buffer():
yield res
assert len(self._data_buffer) == 0
assert self._send_idx == self._next_idx
class Localizer(Predictor):
"""
A Predictor that uses an estimator to train a local model per time series and
immediatly calls this to predict.
Parameters
----------
estimator
The estimator object to train on each dataset entry at prediction time.
"""
def __init__(self, estimator: "Estimator"):
super().__init__(estimator.prediction_length, estimator.freq)
self.estimator = estimator
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
logger = logging.getLogger(__name__)
for i, ts in enumerate(dataset, start=1):
logger.info(f"training for time series {i} / {len(dataset)}")
local_ds = ListDataset([ts], freq=self.freq)
trained_pred = self.estimator.train(local_ds)
logger.info(f"predicting for time series {i} / {len(dataset)}")
predictions = trained_pred.predict(local_ds, **kwargs)
for pred in predictions:
yield pred
class FallbackPredictor(Predictor):
@classmethod
def from_predictor(
cls, base: RepresentablePredictor, **overrides
) -> Predictor:
# Create predictor based on an existing predictor.
# This let's us create a MeanPredictor as a fallback on the fly.
return cls.from_hyperparameters(
**getattr(base, "__init_args__"), **overrides
)
def fallback(fallback_cls: Type[FallbackPredictor]):
def decorator(predict_item):
@functools.wraps(predict_item)
def fallback_predict(self, item: DataEntry) -> Forecast:
try:
return predict_item(self, item)
except GluonTSException:
raise
except Exception:
logging.warning(
f"Base predictor failed with: {traceback.format_exc()}"
)
fallback_predictor = fallback_cls.from_predictor(self)
return fallback_predictor.predict_item(item)
return fallback_predict
return decorator
| 23,995 | 30.994667 | 81 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/forecast_generator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from typing import Any, Callable, Iterator, List, Optional
# Third-party imports
import mxnet as mx
import numpy as np
# First-party imports
from gluonts.distribution import Distribution, DistributionOutput
from gluonts.core.component import validated
from gluonts.dataset.common import DataEntry
from gluonts.dataset.field_names import FieldName
from gluonts.dataset.loader import InferenceDataLoader
from gluonts.model.forecast import (
Forecast,
SampleForecast,
QuantileForecast,
DistributionForecast,
)
OutputTransform = Callable[[DataEntry, np.ndarray], np.ndarray]
BlockType = mx.gluon.Block
LOG_CACHE = set([])
def log_once(msg):
global LOG_CACHE
if msg not in LOG_CACHE:
logging.info(msg)
LOG_CACHE.add(msg)
def _extract_instances(x: Any) -> Any:
"""
Helper function to extract individual instances from batched
mxnet results.
For a tensor `a`
_extract_instances(a) -> [a[0], a[1], ...]
For (nested) tuples of tensors `(a, (b, c))`
_extract_instances((a, (b, c)) -> [(a[0], (b[0], c[0])), (a[1], (b[1], c[1])), ...]
"""
if isinstance(x, (np.ndarray, mx.nd.NDArray)):
for i in range(x.shape[0]):
# yield x[i: i + 1]
yield x[i]
elif isinstance(x, tuple):
for m in zip(*[_extract_instances(y) for y in x]):
yield tuple([r for r in m])
elif isinstance(x, list):
for m in zip(*[_extract_instances(y) for y in x]):
yield [r for r in m]
elif x is None:
while True:
yield None
else:
assert False
class ForecastGenerator:
"""
Classes used to bring the output of a network into a class.
"""
def __call__(
self,
inference_data_loader: InferenceDataLoader,
prediction_net: BlockType,
input_names: List[str],
freq: str,
output_transform: Optional[OutputTransform],
num_samples: Optional[int],
**kwargs
) -> Iterator[Forecast]:
raise NotImplementedError()
class DistributionForecastGenerator(ForecastGenerator):
@validated()
def __init__(self, distr_output: DistributionOutput) -> None:
self.distr_output = distr_output
def __call__(
self,
inference_data_loader: InferenceDataLoader,
prediction_net: BlockType,
input_names: List[str],
freq: str,
output_transform: Optional[OutputTransform],
num_samples: Optional[int],
**kwargs
) -> Iterator[DistributionForecast]:
for batch in inference_data_loader:
inputs = [batch[k] for k in input_names]
outputs = prediction_net(*inputs)
if output_transform is not None:
outputs = output_transform(batch, outputs)
if num_samples:
log_once(
"Forecast is not sample based. Ignoring parameter `num_samples` from predict method."
)
distributions = [
self.distr_output.distribution(*u)
for u in _extract_instances(outputs)
]
i = -1
for i, distr in enumerate(distributions):
yield DistributionForecast(
distr,
start_date=batch["forecast_start"][i],
freq=freq,
item_id=batch[FieldName.ITEM_ID][i]
if FieldName.ITEM_ID in batch
else None,
info=batch["info"][i] if "info" in batch else None,
)
assert i + 1 == len(batch["forecast_start"])
class QuantileForecastGenerator(ForecastGenerator):
@validated()
def __init__(self, quantiles: List[str]) -> None:
self.quantiles = quantiles
def __call__(
self,
inference_data_loader: InferenceDataLoader,
prediction_net: BlockType,
input_names: List[str],
freq: str,
output_transform: Optional[OutputTransform],
num_samples: Optional[int],
**kwargs
) -> Iterator[Forecast]:
for batch in inference_data_loader:
inputs = [batch[k] for k in input_names]
outputs = prediction_net(*inputs).asnumpy()
if output_transform is not None:
outputs = output_transform(batch, outputs)
if num_samples:
log_once(
"Forecast is not sample based. Ignoring parameter `num_samples` from predict method."
)
i = -1
for i, output in enumerate(outputs):
yield QuantileForecast(
output,
start_date=batch["forecast_start"][i],
freq=freq,
item_id=batch[FieldName.ITEM_ID][i]
if FieldName.ITEM_ID in batch
else None,
info=batch["info"][i] if "info" in batch else None,
forecast_keys=self.quantiles,
)
assert i + 1 == len(batch["forecast_start"])
class SampleForecastGenerator(ForecastGenerator):
@validated()
def __init__(self):
pass
def __call__(
self,
inference_data_loader: InferenceDataLoader,
prediction_net: BlockType,
input_names: List[str],
freq: str,
output_transform: Optional[OutputTransform],
num_samples: Optional[int],
**kwargs
) -> Iterator[Forecast]:
for batch in inference_data_loader:
inputs = [batch[k] for k in input_names]
outputs = prediction_net(*inputs).asnumpy()
if output_transform is not None:
outputs = output_transform(batch, outputs)
if num_samples:
num_collected_samples = outputs[0].shape[0]
collected_samples = [outputs]
while num_collected_samples < num_samples:
outputs = prediction_net(*inputs).asnumpy()
if output_transform is not None:
outputs = output_transform(batch, outputs)
collected_samples.append(outputs)
num_collected_samples += outputs[0].shape[0]
outputs = [
np.concatenate(s)[:num_samples]
for s in zip(*collected_samples)
]
assert len(outputs[0]) == num_samples
i = -1
for i, output in enumerate(outputs):
yield SampleForecast(
output,
start_date=batch["forecast_start"][i],
freq=freq,
item_id=batch[FieldName.ITEM_ID][i]
if FieldName.ITEM_ID in batch
else None,
info=batch["info"][i] if "info" in batch else None,
)
assert i + 1 == len(batch["forecast_start"])
| 7,578 | 32.535398 | 105 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/forecast.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import re
from enum import Enum
from typing import Dict, List, NamedTuple, Optional, Set, Union, Callable
# Third-party imports
import mxnet as mx
import numpy as np
import pandas as pd
import pydantic
# First-party imports
from gluonts.core.exception import GluonTSUserError
from gluonts.distribution import Distribution
from gluonts.core.component import validated
class Quantile(NamedTuple):
value: float
name: str
@property
def loss_name(self):
return f"QuantileLoss[{self.name}]"
@property
def weighted_loss_name(self):
return f"wQuantileLoss[{self.name}]"
@property
def coverage_name(self):
return f"Coverage[{self.name}]"
@classmethod
def checked(cls, value: float, name: str) -> "Quantile":
if not 0 <= value <= 1:
raise GluonTSUserError(
f"quantile value should be in [0, 1] but found {value}"
)
return Quantile(value, name)
@classmethod
def from_float(cls, quantile: float) -> "Quantile":
assert isinstance(quantile, float)
return cls.checked(value=quantile, name=str(quantile))
@classmethod
def from_str(cls, quantile: str) -> "Quantile":
assert isinstance(quantile, str)
try:
return cls.checked(value=float(quantile), name=quantile)
except ValueError:
m = re.match(r"^p(\d{2})$", quantile)
if m is None:
raise GluonTSUserError(
"Quantile string should be of the form "
f'"p10", "p50", ... or "0.1", "0.5", ... but found {quantile}'
)
else:
quantile: float = int(m.group(1)) / 100
return cls(value=quantile, name=str(quantile))
@classmethod
def parse(cls, quantile: Union["Quantile", float, str]) -> "Quantile":
"""Produces equivalent float and string representation of a given
quantile level.
>>> Quantile.parse(0.1)
Quantile(value=0.1, name='0.1')
>>> Quantile.parse('0.2')
Quantile(value=0.2, name='0.2')
>>> Quantile.parse('0.20')
Quantile(value=0.2, name='0.20')
>>> Quantile.parse('p99')
Quantile(value=0.99, name='0.99')
Parameters
----------
quantile
Quantile, can be a float a str representing a float e.g. '0.1' or a
quantile string of the form 'p0.1'.
Returns
-------
Quantile
A tuple containing both a float and a string representation of the
input quantile level.
"""
if isinstance(quantile, Quantile):
return quantile
elif isinstance(quantile, float):
return cls.from_float(quantile)
else:
return cls.from_str(quantile)
class Forecast:
"""
A abstract class representing predictions.
"""
start_date: pd.Timestamp
freq: str
item_id: Optional[str]
info: Optional[Dict]
prediction_length: int
mean: np.ndarray
_index = None
def quantile(self, q: Union[float, str]) -> np.ndarray:
"""
Computes a quantile from the predicted distribution.
Parameters
----------
q
Quantile to compute.
Returns
-------
numpy.ndarray
Value of the quantile across the prediction range.
"""
raise NotImplementedError()
@property
def median(self) -> np.ndarray:
return self.quantile(0.5)
def plot(
self,
prediction_intervals=(50.0, 90.0),
show_mean=False,
color="b",
label=None,
output_file=None,
*args,
**kwargs,
):
"""
Plots the median of the forecast as well as confidence bounds.
(requires matplotlib and pandas).
Parameters
----------
prediction_intervals : float or list of floats in [0, 100]
Confidence interval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
show_mean : boolean
Whether to also show the mean of the forecast.
color : matplotlib color name or dictionary
The color used for plotting the forecast.
label : string
A label (prefix) that is used for the forecast
output_file : str or None, default None
Output path for the plot file. If None, plot is not saved to file.
args :
Other arguments are passed to main plot() call
kwargs :
Other keyword arguments are passed to main plot() call
"""
# matplotlib==2.0.* gives errors in Brazil builds and has to be
# imported locally
import matplotlib.pyplot as plt
label_prefix = "" if label is None else label + "-"
for c in prediction_intervals:
assert 0.0 <= c <= 100.0
ps = [50.0] + [
50.0 + f * c / 2.0
for c in prediction_intervals
for f in [-1.0, +1.0]
]
percentiles_sorted = sorted(set(ps))
def alpha_for_percentile(p):
return (p / 100.0) ** 0.3
ps_data = [self.quantile(p / 100.0) for p in percentiles_sorted]
i_p50 = len(percentiles_sorted) // 2
p50_data = ps_data[i_p50]
p50_series = pd.Series(data=p50_data, index=self.index)
p50_series.plot(color=color, ls="-", label=f"{label_prefix}median", **kwargs)
if show_mean:
mean_data = np.mean(self._sorted_samples, axis=0)
pd.Series(data=mean_data, index=self.index).plot(
color=color,
ls=":",
label=f"{label_prefix}mean",
*args,
**kwargs,
)
for i in range(len(percentiles_sorted) // 2):
ptile = percentiles_sorted[i]
alpha = alpha_for_percentile(ptile)
plt.fill_between(
self.index,
ps_data[i],
ps_data[-i - 1],
facecolor=color,
alpha=alpha,
interpolate=True,
*args,
**kwargs,
)
# Hack to create labels for the error intervals.
# Doesn't actually plot anything, because we only pass a single data point
pd.Series(data=p50_data[:1], index=self.index[:1]).plot(
color=color,
alpha=alpha,
#linewidth=10,
label=f"{label_prefix}{100 - ptile * 2}%",
*args,
**kwargs,
)
if output_file:
plt.savefig(output_file)
@property
def index(self) -> pd.DatetimeIndex:
if self._index is None:
self._index = pd.date_range(
self.start_date, periods=self.prediction_length, freq=self.freq
)
return self._index
def dim(self) -> int:
"""
Returns the dimensionality of the forecast object.
"""
raise NotImplementedError()
def copy_dim(self, dim: int):
"""
Returns a new Forecast object with only the selected sub-dimension.
Parameters
----------
dim
The returned forecast object will only represent this dimension.
"""
raise NotImplementedError()
def copy_aggregate(self, agg_fun: Callable):
"""
Returns a new Forecast object with a time series aggregated over the
dimension axis.
Parameters
----------
agg_fun
Aggregation function that defines the aggregation operation
(typically mean or sum).
"""
raise NotImplementedError()
def as_json_dict(self, config: "Config") -> dict:
result = {}
if OutputType.mean in config.output_types:
result["mean"] = self.mean.tolist()
if OutputType.quantiles in config.output_types:
quantiles = map(Quantile.parse, config.quantiles)
result["quantiles"] = {
quantile.name: self.quantile(quantile.value).tolist()
for quantile in quantiles
}
if OutputType.samples in config.output_types:
result["samples"] = []
return result
class SampleForecast(Forecast):
"""
A `Forecast` object, where the predicted distribution is represented
internally as samples.
Parameters
----------
samples
Array of size (num_samples, prediction_length) (1D case) or
(num_samples, prediction_length, target_dim) (multivariate case)
start_date
start of the forecast
freq
forecast frequency
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
@validated()
def __init__(
self,
samples: Union[mx.nd.NDArray, np.ndarray],
start_date,
freq,
item_id: Optional[str] = None,
info: Optional[Dict] = None,
):
assert isinstance(
samples, (np.ndarray, mx.ndarray.ndarray.NDArray)
), "samples should be either a numpy or an mxnet array"
assert (
len(np.shape(samples)) == 2 or len(np.shape(samples)) == 3
), "samples should be a 2-dimensional or 3-dimensional array. Dimensions found: {}".format(
len(np.shape(samples))
)
self.samples = (
samples if (isinstance(samples, np.ndarray)) else samples.asnumpy()
)
self._sorted_samples_value = None
self._mean = None
self._dim = None
self.item_id = item_id
self.info = info
assert isinstance(
start_date, pd.Timestamp
), "start_date should be a pandas Timestamp object"
self.start_date = start_date
assert isinstance(freq, str), "freq should be a string"
self.freq = freq
@property
def _sorted_samples(self):
if self._sorted_samples_value is None:
self._sorted_samples_value = np.sort(self.samples, axis=0)
return self._sorted_samples_value
@property
def num_samples(self):
"""
The number of samples representing the forecast.
"""
return self.samples.shape[0]
@property
def prediction_length(self):
"""
Time length of the forecast.
"""
return self.samples.shape[-1]
@property
def mean(self):
"""
Forecast mean.
"""
if self._mean is not None:
return self._mean
else:
return np.mean(self.samples, axis=0)
@property
def mean_ts(self):
"""
Forecast mean, as a pandas.Series object.
"""
return pd.Series(self.index, self.mean)
def quantile(self, q):
q = Quantile.parse(q).value
sample_idx = int(np.round((self.num_samples - 1) * q))
return self._sorted_samples[sample_idx, :]
def copy_dim(self, dim: int):
if len(self.samples.shape) == 2:
samples = self.samples
else:
target_dim = self.samples.shape[2]
assert dim < target_dim, (
f"must set 0 <= dim < target_dim, but got dim={dim},"
f" target_dim={target_dim}"
)
samples = self.samples[:, :, dim]
return SampleForecast(
samples=samples,
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
def copy_aggregate(self, agg_fun: Callable):
if len(self.samples.shape) == 2:
samples = self.samples
else:
# Aggregate over target dimension axis
samples = agg_fun(self.samples, axis=2)
return SampleForecast(
samples=samples,
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
def dim(self) -> int:
if self._dim is not None:
return self._dim
else:
if len(self.samples.shape) == 2:
# univariate target
# shape: (num_samples, prediction_length)
return 1
else:
# multivariate target
# shape: (num_samples, prediction_length, target_dim)
return self.samples.shape[2]
def as_json_dict(self, config: "Config") -> dict:
result = super().as_json_dict(config)
if OutputType.samples in config.output_types:
result["samples"] = self.samples.tolist()
return result
def __repr__(self):
return ", ".join(
[
f"SampleForecast({self.samples!r})",
f"{self.start_date!r}",
f"{self.freq!r}",
f"item_id={self.item_id!r}",
f"info={self.info!r})",
]
)
class QuantileForecast(Forecast):
"""
A Forecast that contains arrays (i.e. time series) for quantiles and mean
Parameters
----------
forecast_arrays
An array of forecasts
start_date
start of the forecast
freq
forecast frequency
forecast_keys
A list of quantiles of the form '0.1', '0.9', etc.,
and potentially 'mean'. Each entry corresponds to one array in
forecast_arrays.
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
def __init__(
self,
forecast_arrays: np.ndarray,
start_date: pd.Timestamp,
freq: str,
forecast_keys: List[str],
item_id: Optional[str] = None,
info: Optional[Dict] = None,
):
self.forecast_array = forecast_arrays
self.start_date = pd.Timestamp(start_date, freq=freq)
self.freq = freq
# normalize keys
self.forecast_keys = [
Quantile.from_str(key).name if key != "mean" else key
for key in forecast_keys
]
self.item_id = item_id
self.info = info
self._dim = None
shape = self.forecast_array.shape
assert shape[0] == len(self.forecast_keys), (
f"The forecast_array (shape={shape} should have the same "
f"length as the forecast_keys (len={len(self.forecast_keys)})."
)
self.prediction_length = shape[-1]
self._forecast_dict = {
k: self.forecast_array[i] for i, k in enumerate(self.forecast_keys)
}
self._nan_out = np.array([np.nan] * self.prediction_length)
def quantile(self, q: Union[float, str]) -> np.ndarray:
q_str = Quantile.parse(q).name
# We return nan here such that evaluation runs through
return self._forecast_dict.get(q_str, self._nan_out)
@property
def mean(self):
"""
Forecast mean.
"""
return self._forecast_dict.get("mean", self._nan_out)
def dim(self) -> int:
if self._dim is not None:
return self._dim
else:
if (
len(self.forecast_array.shape) == 2
): # 1D target. shape: (num_samples, prediction_length)
return 1
else:
return self.forecast_array.shape[
1
] # 2D target. shape: (num_samples, target_dim, prediction_length)
def __repr__(self):
return ", ".join(
[
f"QuantileForecast({self.forecast_array!r})",
f"start_date={self.start_date!r}",
f"freq={self.freq!r}",
f"forecast_keys={self.forecast_keys!r}",
f"item_id={self.item_id!r}",
f"info={self.info!r})",
]
)
class DistributionForecast(Forecast):
"""
A `Forecast` object that uses a GluonTS distribution directly.
This can for instance be used to represent marginal probability
distributions for each time point -- although joint distributions are
also possible, e.g. when using MultiVariateGaussian).
Parameters
----------
distribution
Distribution object. This should represent the entire prediction
length, i.e., if we draw `num_samples` samples from the distribution,
the sample shape should be
samples = trans_dist.sample(num_samples)
samples.shape -> (num_samples, prediction_length)
start_date
start of the forecast
freq
forecast frequency
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
@validated()
def __init__(
self,
distribution: Distribution,
start_date,
freq,
item_id: Optional[str] = None,
info: Optional[Dict] = None,
):
self.distribution = distribution
self.shape = (
self.distribution.batch_shape + self.distribution.event_shape
)
self.prediction_length = self.shape[0]
self.item_id = item_id
self.info = info
assert isinstance(
start_date, pd.Timestamp
), "start_date should be a pandas Timestamp object"
self.start_date = start_date
assert isinstance(freq, str), "freq should be a string"
self.freq = freq
self._mean = None
@property
def mean(self):
"""
Forecast mean.
"""
if self._mean is not None:
return self._mean
else:
self._mean = self.distribution.mean.asnumpy()
return self._mean
@property
def mean_ts(self):
"""
Forecast mean, as a pandas.Series object.
"""
return pd.Series(self.index, self.mean)
def quantile(self, level):
level = Quantile.parse(level).value
q = self.distribution.quantile(mx.nd.array([level])).asnumpy()[0]
return q
def to_sample_forecast(self, num_samples: int = 200) -> SampleForecast:
return SampleForecast(
samples=self.distribution.sample(num_samples),
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
class OutputType(str, Enum):
mean = "mean"
samples = "samples"
quantiles = "quantiles"
class Config(pydantic.BaseModel):
num_samples: int = pydantic.Field(100, alias="num_eval_samples")
output_types: Set[OutputType] = {"quantiles", "mean"}
# FIXME: validate list elements
quantiles: List[str] = ["0.1", "0.5", "0.9"]
class Config:
allow_population_by_field_name = True
# store additional fields
extra = "allow"
| 19,676 | 28.949772 | 99 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/common.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import types
import typing
# Third-party imports
import mxnet as mx
import numpy as np
# Tensor type for HybridBlocks in Gluon
Tensor = typing.Union[mx.nd.NDArray, mx.sym.Symbol]
# Type of tensor-transforming functions in Gluon
TensorTransformer = typing.Callable[[types.ModuleType, Tensor], Tensor]
# untyped global configuration passed to model components
GlobalConfig = typing.Dict[str, typing.Any]
# to annotate Numpy parameter
NPArrayLike = typing.Union[int, float, np.ndarray]
| 1,091 | 32.090909 | 75 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import NamedTuple, Optional
# Third-party imports
import numpy as np
from mxnet.gluon import HybridBlock
from pydantic import ValidationError
# First-party imports
import gluonts
from gluonts.core import fqname_for
from gluonts.core.component import DType, from_hyperparameters, validated
from gluonts.core.exception import GluonTSHyperparametersError
from gluonts.dataset.common import Dataset
from gluonts.dataset.loader import TrainDataLoader, ValidationDataLoader
from gluonts.model.predictor import Predictor
from gluonts.support.util import get_hybrid_forward_input_names
from gluonts.trainer import Trainer
from gluonts.transform import Transformation
class Estimator:
"""
An abstract class representing a trainable model.
The underlying model is trained by calling the `train` method with
a training `Dataset`, producing a `Predictor` object.
"""
__version__: str = gluonts.__version__
prediction_length: int
freq: str
def train(
self, training_data: Dataset, validation_data: Optional[Dataset] = None
) -> Predictor:
"""
Train the estimator on the given data.
Parameters
----------
training_data
Dataset to train the model on.
validation_data
Dataset to validate the model on during training.
Returns
-------
Predictor
The predictor containing the trained model.
"""
raise NotImplementedError
@classmethod
def from_hyperparameters(cls, **hyperparameters):
return from_hyperparameters(cls, **hyperparameters)
class DummyEstimator(Estimator):
"""
An `Estimator` that, upon training, simply returns a pre-constructed
`Predictor`.
Parameters
----------
predictor_cls
`Predictor` class to instantiate.
**kwargs
Keyword arguments to pass to the predictor constructor.
"""
@validated()
def __init__(self, predictor_cls: type, **kwargs) -> None:
self.predictor = predictor_cls(**kwargs)
def train(
self,
training_data: Dataset,
validation_dataset: Optional[Dataset] = None,
) -> Predictor:
return self.predictor
class TrainOutput(NamedTuple):
transformation: Transformation
trained_net: HybridBlock
predictor: Predictor
class GluonEstimator(Estimator):
"""
An `Estimator` type with utilities for creating Gluon-based models.
To extend this class, one needs to implement three methods:
`create_transformation`, `create_training_network`, `create_predictor`.
"""
@validated()
def __init__(self, trainer: Trainer, dtype: DType = np.float32) -> None:
self.trainer = trainer
self.dtype = dtype
@classmethod
def from_hyperparameters(cls, **hyperparameters) -> "GluonEstimator":
Model = getattr(cls.__init__, "Model", None)
if not Model:
raise AttributeError(
f"Cannot find attribute Model attached to the "
f"{fqname_for(cls)}. Most probably you have forgotten to mark "
f"the class constructor as @validated()."
)
try:
trainer = from_hyperparameters(Trainer, **hyperparameters)
return cls(
**Model(**{**hyperparameters, "trainer": trainer}).__dict__
)
except ValidationError as e:
raise GluonTSHyperparametersError from e
def create_transformation(self) -> Transformation:
"""
Create and return the transformation needed for training and inference.
Returns
-------
Transformation
The transformation that will be applied entry-wise to datasets,
at training and inference time.
"""
raise NotImplementedError
def create_training_network(self) -> HybridBlock:
"""
Create and return the network used for training (i.e., computing the
loss).
Returns
-------
HybridBlock
The network that computes the loss given input data.
"""
raise NotImplementedError
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
"""
Create and return a predictor object.
Returns
-------
Predictor
A predictor wrapping a `HybridBlock` used for inference.
"""
raise NotImplementedError
def train_model(
self, training_data: Dataset, validation_data: Optional[Dataset] = None
) -> TrainOutput:
transformation = self.create_transformation()
transformation.estimate(iter(training_data))
training_data_loader = TrainDataLoader(
dataset=training_data,
transform=transformation,
batch_size=self.trainer.batch_size,
num_batches_per_epoch=self.trainer.num_batches_per_epoch,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
validation_data_loader = None
if validation_data is not None:
validation_data_loader = ValidationDataLoader(
dataset=validation_data,
transform=transformation,
batch_size=self.trainer.batch_size,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
# ensure that the training network is created within the same MXNet
# context as the one that will be used during training
with self.trainer.ctx:
trained_net = self.create_training_network()
self.trainer(
net=trained_net,
input_names=get_hybrid_forward_input_names(trained_net),
train_iter=training_data_loader,
validation_iter=validation_data_loader,
)
with self.trainer.ctx:
# ensure that the prediction network is created within the same MXNet
# context as the one that was used during training
return TrainOutput(
transformation=transformation,
trained_net=trained_net,
predictor=self.create_predictor(transformation, trained_net),
)
def train(
self, training_data: Dataset, validation_data: Optional[Dataset] = None
) -> Predictor:
return self.train_model(training_data, validation_data).predictor
| 7,061 | 30.526786 | 81 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/canonical/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import MeanScaler
from gluonts.core.component import validated
from gluonts.distribution import DistributionOutput
from gluonts.model.common import Tensor
class CanonicalNetworkBase(HybridBlock):
@validated()
def __init__(
self,
model: HybridBlock,
embedder: FeatureEmbedder,
distr_output: DistributionOutput,
is_sequential: bool,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.distr_output = distr_output
self.is_sequential = is_sequential
self.model = model
self.embedder = embedder
with self.name_scope():
self.proj_distr_args = self.distr_output.get_args_proj()
self.scaler = MeanScaler(keepdims=True)
def assemble_features(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
time_feat: Tensor, # (batch_size, history_length, num_features)
) -> Tensor:
embedded_cat = self.embedder(
feat_static_cat
) # (batch_size, num_features * embedding_size)
# a workaround when you wish to repeat without knowing the number
# of repeats
helper_ones = F.ones_like(
F.slice_axis(time_feat, axis=2, begin=-1, end=None)
)
# (batch_size, history_length, num_features * embedding_size)
repeated_cat = F.batch_dot(
helper_ones, F.expand_dims(embedded_cat, axis=1)
)
# putting together all the features
input_feat = F.concat(repeated_cat, time_feat, dim=2)
return input_feat
def hybrid_forward(self, F, x, *args, **kwargs):
raise NotImplementedError
class CanonicalTrainingNetwork(CanonicalNetworkBase):
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
past_time_feat: Tensor,
# (batch_size, num_features, history_length)
past_target: Tensor, # (batch_size, history_length)
) -> Tensor:
"""
Parameters
----------
F
Function space
feat_static_cat
Shape: (batch_size, num_features)
past_time_feat
Shape: (batch_size, history_length, num_features)
past_target
Shape: (batch_size, history_length)
Returns
-------
Tensor
A batch of negative log likelihoods.
"""
_, target_scale = self.scaler(
past_target,
F.ones_like(past_target), # TODO: pass the actual observed here
)
input_feat = self.assemble_features(F, feat_static_cat, past_time_feat)
outputs = self.model(input_feat)
distr = self.distr_output.distribution(
self.proj_distr_args(outputs), scale=target_scale
)
loss = distr.loss(past_target)
return loss
class CanonicalPredictionNetwork(CanonicalNetworkBase):
@validated()
def __init__(
self, prediction_len: int, num_parallel_samples: int, **kwargs
) -> None:
super().__init__(**kwargs)
self.prediction_len = prediction_len
self.num_parallel_samples = num_parallel_samples
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
future_time_feat: Tensor,
past_target: Tensor,
) -> Tensor:
"""
Parameters
----------
F
Function space module.
feat_static_cat
Shape: (batch_size, num_features).
past_time_feat
Shape: (batch_size, history_length, num_features).
future_time_feat
Shape: (batch_size, history_length, num_features).
past_target
Shape: (batch_size, history_length).
Returns
-------
Tensor
a batch of prediction samples
Shape: (batch_size, prediction_length, num_sample_paths)
"""
_, target_scale = self.scaler(
past_target,
F.ones_like(past_target), # TODO: pass the actual observed here
)
time_feat = (
F.concat(past_time_feat, future_time_feat, dim=1)
if self.is_sequential
else future_time_feat
)
input_feat = self.assemble_features(F, feat_static_cat, time_feat)
outputs = self.model(input_feat)
if self.is_sequential:
outputs = F.slice_axis(
outputs, axis=1, begin=-self.prediction_len, end=None
)
distr = self.distr_output.distribution(
self.proj_distr_args(outputs), scale=target_scale
)
samples = distr.sample(
self.num_parallel_samples
) # (num_samples, batch_size, prediction_length, 1)
return samples.swapaxes(0, 1)
| 5,687 | 29.745946 | 79 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/canonical/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List
# Third-party imports
from mxnet.gluon import HybridBlock, nn
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.rnn import RNN
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.time_feature import time_features_from_frequency_str
from gluonts.trainer import Trainer
from gluonts.transform import (
AddTimeFeatures,
AsNumpyArray,
Chain,
InstanceSplitter,
SetFieldIfNotPresent,
TestSplitSampler,
Transformation,
)
# Relative imports
from ._network import CanonicalPredictionNetwork, CanonicalTrainingNetwork
class CanonicalEstimator(GluonEstimator):
@validated()
def __init__(
self,
model: HybridBlock,
is_sequential: bool,
freq: str,
context_length: int,
prediction_length: int,
trainer: Trainer = Trainer(),
num_parallel_samples: int = 100,
cardinality: List[int] = list([1]),
embedding_dimension: int = 10,
distr_output: DistributionOutput = StudentTOutput(),
) -> None:
super().__init__(trainer=trainer)
# TODO: error checking
self.freq = freq
self.context_length = context_length
self.prediction_length = prediction_length
self.distr_output = distr_output
self.num_parallel_samples = num_parallel_samples
self.cardinality = cardinality
self.embedding_dimensions = [embedding_dimension for _ in cardinality]
self.model = model
self.is_sequential = is_sequential
def create_transformation(self) -> Transformation:
return Chain(
trans=[
AsNumpyArray(field=FieldName.TARGET, expected_ndim=1),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=time_features_from_frequency_str(self.freq),
pred_length=self.prediction_length,
),
SetFieldIfNotPresent(
field=FieldName.FEAT_STATIC_CAT, value=[0.0]
),
AsNumpyArray(field=FieldName.FEAT_STATIC_CAT, expected_ndim=1),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=TestSplitSampler(),
time_series_fields=[FieldName.FEAT_TIME],
past_length=self.context_length,
future_length=self.prediction_length,
),
]
)
def create_training_network(self) -> CanonicalTrainingNetwork:
return CanonicalTrainingNetwork(
embedder=FeatureEmbedder(
cardinalities=self.cardinality,
embedding_dims=self.embedding_dimensions,
),
model=self.model,
distr_output=self.distr_output,
is_sequential=self.is_sequential,
)
def create_predictor(
self,
transformation: Transformation,
trained_network: CanonicalTrainingNetwork,
) -> Predictor:
prediction_net = CanonicalPredictionNetwork(
embedder=trained_network.embedder,
model=trained_network.model,
distr_output=trained_network.distr_output,
is_sequential=trained_network.is_sequential,
prediction_len=self.prediction_length,
num_parallel_samples=self.num_parallel_samples,
params=trained_network.collect_params(),
)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_net,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
class CanonicalRNNEstimator(CanonicalEstimator):
@validated()
def __init__(
self,
freq: str,
context_length: int,
prediction_length: int,
trainer: Trainer = Trainer(),
num_layers: int = 1,
num_cells: int = 50,
cell_type: str = "lstm",
num_parallel_samples: int = 100,
cardinality: List[int] = list([1]),
embedding_dimension: int = 10,
distr_output: DistributionOutput = StudentTOutput(),
) -> None:
model = RNN(
mode=cell_type, num_layers=num_layers, num_hidden=num_cells
)
super(CanonicalRNNEstimator, self).__init__(
model=model,
is_sequential=True,
freq=freq,
context_length=context_length,
prediction_length=prediction_length,
trainer=trainer,
num_parallel_samples=num_parallel_samples,
cardinality=cardinality,
embedding_dimension=embedding_dimension,
distr_output=distr_output,
)
class MLPForecasterEstimator(CanonicalEstimator):
@validated()
def __init__(
self,
freq: str,
context_length: int,
prediction_length: int,
trainer: Trainer = Trainer(),
hidden_dim_sequence=list([50]),
num_parallel_samples: int = 100,
cardinality: List[int] = list([1]),
embedding_dimension: int = 10,
distr_output: DistributionOutput = StudentTOutput(),
) -> None:
model = nn.HybridSequential()
for layer, layer_dim in enumerate(hidden_dim_sequence):
model.add(
nn.Dense(
layer_dim,
flatten=False,
activation="relu",
prefix="mlp_%d_" % layer,
)
)
super(MLPForecasterEstimator, self).__init__(
model=model,
is_sequential=False,
freq=freq,
context_length=context_length,
prediction_length=prediction_length,
trainer=trainer,
num_parallel_samples=num_parallel_samples,
cardinality=cardinality,
embedding_dimension=embedding_dimension,
distr_output=distr_output,
)
| 7,229 | 33.759615 | 79 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/deepar-original/predictor.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import functools
import itertools
import logging
import multiprocessing as mp
import sys
import traceback
from pathlib import Path
from pydoc import locate
from tempfile import TemporaryDirectory
import json
from typing import (
TYPE_CHECKING,
Tuple,
Union,
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Type,
)
# Third-party imports
import mxnet as mx
import numpy as np
# First-party imports
import gluonts
from gluonts.distribution import Distribution, DistributionOutput
from gluonts.core.component import (
DType,
equals,
from_hyperparameters,
get_mxnet_context,
validated,
)
from gluonts.core.exception import GluonTSException
from gluonts.core.serde import dump_json, fqname_for, load_json
from gluonts.dataset.common import DataEntry, Dataset, ListDataset
from .forecast_generator import ForecastGenerator, SampleForecastGenerator
from gluonts.dataset.loader import DataBatch, InferenceDataLoader
from gluonts.model.forecast import Forecast
from gluonts.support.util import (
export_repr_block,
export_symb_block,
get_hybrid_forward_input_names,
hybrid_block_to_symbol_block,
import_repr_block,
import_symb_block,
)
from gluonts.transform import Transformation
if TYPE_CHECKING: # avoid circular import
from gluonts.model.estimator import Estimator # noqa
OutputTransform = Callable[[DataEntry, np.ndarray], np.ndarray]
class Predictor:
"""
Abstract class representing predictor objects.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
__version__: str = gluonts.__version__
def __init__(self, prediction_length: int, freq: str) -> None:
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
self.prediction_length = prediction_length
self.freq = freq
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
"""
Compute forecasts for the time series in the provided dataset.
This method is not implemented in this abstract class; please
use one of the subclasses.
Parameters
----------
dataset
The dataset containing the time series to predict.
Returns
-------
Iterator[Forecast]
Iterator over the forecasts, in the same order as the dataset
iterable was provided.
"""
raise NotImplementedError
def serialize(self, path: Path) -> None:
# serialize Predictor type
with (path / "type.txt").open("w") as fp:
fp.write(fqname_for(self.__class__))
with (path / "version.json").open("w") as fp:
json.dump(
{"model": self.__version__, "gluonts": gluonts.__version__}, fp
)
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "Predictor":
"""
Load a serialized predictor from the given path
Parameters
----------
path
Path to the serialized files predictor.
ctx
Optional mxnet context to be used with the predictor.
If nothing is passed will use the GPU if available and CPU otherwise.
"""
# deserialize Predictor type
with (path / "type.txt").open("r") as fp:
tpe = locate(fp.readline())
# ensure that predictor_cls is a subtype of Predictor
if not issubclass(tpe, Predictor):
raise IOError(
f"Class {fqname_for(tpe)} is not "
f"a subclass of {fqname_for(Predictor)}"
)
# call deserialize() for the concrete Predictor type
return tpe.deserialize(path, ctx)
@classmethod
def from_hyperparameters(cls, **hyperparameters):
return from_hyperparameters(cls, **hyperparameters)
class RepresentablePredictor(Predictor):
"""
An abstract predictor that can be subclassed by models that are not based
on Gluon. Subclasses should have @validated() constructors.
(De)serialization and value equality are all implemented on top of the
@validated() logic.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
@validated()
def __init__(self, prediction_length: int, freq: str) -> None:
super().__init__(prediction_length, freq)
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
for item in dataset:
yield self.predict_item(item)
def predict_item(self, item: DataEntry) -> Forecast:
raise NotImplementedError
def __eq__(self, that):
"""
Two RepresentablePredictor instances are considered equal if they
have the same constructor arguments.
"""
return equals(self, that)
def serialize(self, path: Path) -> None:
# call Predictor.serialize() in order to serialize the class name
super().serialize(path)
with (path / "predictor.json").open("w") as fp:
print(dump_json(self), file=fp)
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "RepresentablePredictor":
with (path / "predictor.json").open("r") as fp:
return load_json(fp.read())
class GluonPredictor(Predictor):
"""
Base predictor type for Gluon-based models.
Parameters
----------
input_names
Input tensor names for the graph
prediction_net
Network that will be called for prediction
batch_size
Number of time series to predict in a single batch
prediction_length
Number of time steps to predict
freq
Frequency of the input data
input_transform
Input transformation pipeline
output_transform
Output transformation
ctx
MXNet context to use for computation
forecast_generator
Class to generate forecasts from network ouputs
"""
BlockType = mx.gluon.Block
def __init__(
self,
input_names: List[str],
prediction_net: BlockType,
batch_size: int,
prediction_length: int,
freq: str,
ctx: mx.Context,
input_transform: Transformation,
forecast_generator: ForecastGenerator = SampleForecastGenerator(),
output_transform: Optional[OutputTransform] = None,
dtype: DType = np.float32,
) -> None:
super().__init__(prediction_length, freq)
self.input_names = input_names
self.prediction_net = prediction_net
self.batch_size = batch_size
self.input_transform = input_transform
self.forecast_generator = forecast_generator
self.output_transform = output_transform
self.ctx = ctx
self.dtype = dtype
def hybridize(self, batch: DataBatch) -> None:
"""
Hybridizes the underlying prediction network.
Parameters
----------
batch
A batch of data to use for the required forward pass after the
`hybridize()` call.
"""
self.prediction_net.hybridize(active=True)
self.prediction_net(*[batch[k] for k in self.input_names])
def as_symbol_block_predictor(
self, batch: DataBatch
) -> "SymbolBlockPredictor":
"""
Returns a variant of the current :class:`GluonPredictor` backed
by a Gluon `SymbolBlock`. If the current predictor is already a
:class:`SymbolBlockPredictor`, it just returns itself.
Parameters
----------
batch
A batch of data to use for the required forward pass after the
`hybridize()` call of the underlying network.
Returns
-------
SymbolBlockPredictor
A predictor derived from the current one backed by a `SymbolBlock`.
"""
raise NotImplementedError
def predict(
self, dataset: Dataset, num_samples: Optional[int] = None
) -> Iterator[Forecast]:
#print('predict')
inference_data_loader = InferenceDataLoader(
dataset,
self.input_transform,
self.batch_size,
ctx=self.ctx,
dtype=self.dtype,
)
yield from self.forecast_generator(
inference_data_loader=inference_data_loader,
prediction_net=self.prediction_net,
input_names=self.input_names,
freq=self.freq,
output_transform=self.output_transform,
num_samples=num_samples,
)
def __eq__(self, that):
if type(self) != type(that):
return False
# TODO: also consider equality of the pipelines
# if not equals(self.input_transform, that.input_transform):
# return False
return equals(
self.prediction_net.collect_params(),
that.prediction_net.collect_params(),
)
def serialize(self, path: Path) -> None:
# call Predictor.serialize() in order to serialize the class name
super().serialize(path)
# serialize every GluonPredictor-specific parameters
# serialize the prediction network
self.serialize_prediction_net(path)
# serialize transformation chain
with (path / "input_transform.json").open("w") as fp:
print(dump_json(self.input_transform), file=fp)
# FIXME: also needs to serialize the output_transform
# serialize all remaining constructor parameters
with (path / "parameters.json").open("w") as fp:
parameters = dict(
batch_size=self.batch_size,
prediction_length=self.prediction_length,
freq=self.freq,
ctx=self.ctx,
dtype=self.dtype,
forecast_generator=self.forecast_generator,
input_names=self.input_names,
)
print(dump_json(parameters), file=fp)
def serialize_prediction_net(self, path: Path) -> None:
raise NotImplementedError()
class SymbolBlockPredictor(GluonPredictor):
"""
A predictor which serializes the network structure as an MXNet symbolic
graph. Should be used for models deployed in production in order to
ensure forward-compatibility as GluonTS models evolve.
Used by the training shell if training is invoked with a hyperparameter
`use_symbol_block_predictor = True`.
"""
BlockType = mx.gluon.SymbolBlock
def as_symbol_block_predictor(
self, batch: DataBatch
) -> "SymbolBlockPredictor":
return self
def serialize_prediction_net(self, path: Path) -> None:
export_symb_block(self.prediction_net, path, "prediction_net")
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "SymbolBlockPredictor":
ctx = ctx if ctx is not None else get_mxnet_context()
with mx.Context(ctx):
# deserialize constructor parameters
with (path / "parameters.json").open("r") as fp:
parameters = load_json(fp.read())
parameters["ctx"] = ctx
# deserialize transformation chain
with (path / "input_transform.json").open("r") as fp:
transform = load_json(fp.read())
# deserialize prediction network
num_inputs = len(parameters["input_names"])
prediction_net = import_symb_block(
num_inputs, path, "prediction_net"
)
return SymbolBlockPredictor(
input_transform=transform,
prediction_net=prediction_net,
**parameters,
)
class RepresentableBlockPredictor(GluonPredictor):
"""
A predictor which serializes the network structure using the
JSON-serialization methods located in `gluonts.core.serde`. Use the following
logic to create a `RepresentableBlockPredictor` from a trained prediction
network.
>>> def create_representable_block_predictor(
... prediction_network: mx.gluon.HybridBlock,
... **kwargs
... ) -> RepresentableBlockPredictor:
... return RepresentableBlockPredictor(
... prediction_net=prediction_network,
... **kwargs
... )
"""
BlockType = mx.gluon.HybridBlock
def __init__(
self,
prediction_net: BlockType,
batch_size: int,
prediction_length: int,
freq: str,
ctx: mx.Context,
input_transform: Transformation,
forecast_generator: ForecastGenerator = SampleForecastGenerator(),
output_transform: Optional[
Callable[[DataEntry, np.ndarray], np.ndarray]
] = None,
dtype: DType = np.float32,
) -> None:
super().__init__(
input_names=get_hybrid_forward_input_names(prediction_net),
prediction_net=prediction_net,
batch_size=batch_size,
prediction_length=prediction_length,
freq=freq,
ctx=ctx,
input_transform=input_transform,
forecast_generator=forecast_generator,
output_transform=output_transform,
dtype=dtype,
)
def as_symbol_block_predictor(
self, batch: DataBatch
) -> SymbolBlockPredictor:
symbol_block_net = hybrid_block_to_symbol_block(
hb=self.prediction_net,
data_batch=[batch[k] for k in self.input_names],
)
return SymbolBlockPredictor(
input_names=self.input_names,
prediction_net=symbol_block_net,
batch_size=self.batch_size,
prediction_length=self.prediction_length,
freq=self.freq,
ctx=self.ctx,
input_transform=self.input_transform,
forecast_generator=self.forecast_generator,
output_transform=self.output_transform,
dtype=self.dtype,
)
def serialize(self, path: Path) -> None:
logging.warning(
"Serializing RepresentableBlockPredictor instances does not save "
"the prediction network structure in a backwards-compatible "
"manner. Be careful not to use this method in production."
)
super().serialize(path)
def serialize_prediction_net(self, path: Path) -> None:
export_repr_block(self.prediction_net, path, "prediction_net")
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "RepresentableBlockPredictor":
ctx = ctx if ctx is not None else get_mxnet_context()
with mx.Context(ctx):
# deserialize constructor parameters
with (path / "parameters.json").open("r") as fp:
parameters = load_json(fp.read())
# deserialize transformation chain
with (path / "input_transform.json").open("r") as fp:
transform = load_json(fp.read())
# deserialize prediction network
prediction_net = import_repr_block(path, "prediction_net")
# input_names is derived from the prediction_net
if "input_names" in parameters:
del parameters["input_names"]
parameters["ctx"] = ctx
return RepresentableBlockPredictor(
input_transform=transform,
prediction_net=prediction_net,
**parameters,
)
class WorkerError:
def __init__(self, msg):
self.msg = msg
def _worker_loop(
predictor_path: Path,
input_queue: mp.Queue,
output_queue: mp.Queue,
worker_id,
**kwargs,
):
"""
Worker loop for multiprocessing Predictor.
Loads the predictor serialized in predictor_path
reads inputs from input_queue and writes forecasts to output_queue
"""
predictor = Predictor.deserialize(predictor_path)
while True:
idx, data_chunk = input_queue.get()
if idx is None:
output_queue.put((None, None, None))
break
try:
result = list(predictor.predict(data_chunk, **kwargs))
except Exception:
we = WorkerError(
"".join(traceback.format_exception(*sys.exc_info()))
)
output_queue.put((we, None, None))
break
output_queue.put((idx, worker_id, result))
class ParallelizedPredictor(Predictor):
"""
Runs multiple instances (workers) of a predictor in parallel.
Exceptions are propagated from the workers.
Note: That there is currently an issue with tqdm that will cause things
to hang if the ParallelizedPredictor is used with tqdm and an exception
occurs during prediction.
https://github.com/tqdm/tqdm/issues/548
Parameters
----------
base_predictor
A representable predictor that will be used
num_workers
Number of workers (processes) to use. If set to
None, one worker per CPU will be used.
chunk_size
Number of items to pass per call
"""
def __init__(
self,
base_predictor: Predictor,
num_workers: Optional[int] = None,
chunk_size=1,
) -> None:
super().__init__(base_predictor.prediction_length, base_predictor.freq)
self._base_predictor = base_predictor
self._num_workers = (
num_workers if num_workers is not None else mp.cpu_count()
)
self._chunk_size = chunk_size
self._num_running_workers = 0
self._input_queues = []
self._output_queue = None
def _grouper(self, iterable, n):
iterator = iter(iterable)
group = tuple(itertools.islice(iterator, n))
while group:
yield group
group = tuple(itertools.islice(iterator, n))
def terminate(self):
for q in self._input_queues:
q.put((None, None))
for w in self._workers:
w.terminate()
for i, w in enumerate(self._workers):
w.join()
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
with TemporaryDirectory() as tempdir:
predictor_path = Path(tempdir)
self._base_predictor.serialize(predictor_path)
# TODO: Consider using shared memory for the data transfer.
self._input_queues = [mp.Queue() for _ in range(self._num_workers)]
self._output_queue = mp.Queue()
workers = []
for worker_id, in_q in enumerate(self._input_queues):
worker = mp.Process(
target=_worker_loop,
args=(predictor_path, in_q, self._output_queue, worker_id),
kwargs=kwargs,
)
worker.daemon = True
worker.start()
workers.append(worker)
self._num_running_workers += 1
self._workers = workers
chunked_data = self._grouper(dataset, self._chunk_size)
self._send_idx = 0
self._next_idx = 0
self._data_buffer = {}
worker_ids = list(range(self._num_workers))
def receive():
idx, worker_id, result = self._output_queue.get()
if isinstance(idx, WorkerError):
self._num_running_workers -= 1
self.terminate()
raise Exception(idx.msg)
if idx is not None:
self._data_buffer[idx] = result
return idx, worker_id, result
def get_next_from_buffer():
while self._next_idx in self._data_buffer:
result_batch = self._data_buffer.pop(self._next_idx)
self._next_idx += 1
for result in result_batch:
yield result
def send(worker_id, chunk):
q = self._input_queues[worker_id]
q.put((self._send_idx, chunk))
self._send_idx += 1
try:
# prime the queues
for wid in worker_ids:
chunk = next(chunked_data)
send(wid, chunk)
while True:
idx, wid, result = receive()
for res in get_next_from_buffer():
yield res
chunk = next(chunked_data)
send(wid, chunk)
except StopIteration:
# signal workers end of data
for q in self._input_queues:
q.put((None, None))
# collect any outstanding results
while self._num_running_workers > 0:
idx, worker_id, result = receive()
if idx is None:
self._num_running_workers -= 1
continue
for res in get_next_from_buffer():
yield res
assert len(self._data_buffer) == 0
assert self._send_idx == self._next_idx
class Localizer(Predictor):
"""
A Predictor that uses an estimator to train a local model per time series and
immediatly calls this to predict.
Parameters
----------
estimator
The estimator object to train on each dataset entry at prediction time.
"""
def __init__(self, estimator: "Estimator"):
super().__init__(estimator.prediction_length, estimator.freq)
self.estimator = estimator
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
logger = logging.getLogger(__name__)
for i, ts in enumerate(dataset, start=1):
logger.info(f"training for time series {i} / {len(dataset)}")
local_ds = ListDataset([ts], freq=self.freq)
trained_pred = self.estimator.train(local_ds)
logger.info(f"predicting for time series {i} / {len(dataset)}")
predictions = trained_pred.predict(local_ds, **kwargs)
for pred in predictions:
yield pred
class FallbackPredictor(Predictor):
@classmethod
def from_predictor(
cls, base: RepresentablePredictor, **overrides
) -> Predictor:
# Create predictor based on an existing predictor.
# This let's us create a MeanPredictor as a fallback on the fly.
return cls.from_hyperparameters(
**getattr(base, "__init_args__"), **overrides
)
def fallback(fallback_cls: Type[FallbackPredictor]):
def decorator(predict_item):
@functools.wraps(predict_item)
def fallback_predict(self, item: DataEntry) -> Forecast:
try:
return predict_item(self, item)
except GluonTSException:
raise
except Exception:
logging.warning(
f"Base predictor failed with: {traceback.format_exc()}"
)
fallback_predictor = fallback_cls.from_predictor(self)
return fallback_predictor.predict_item(item)
return fallback_predict
return decorator
| 23,995 | 30.994667 | 81 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/deepar-original/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import DType, validated
from gluonts.distribution import DistributionOutput, Distribution
from gluonts.distribution.distribution import getF
from gluonts.model.common import Tensor
from gluonts.support.util import weighted_average
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
lags_seq: List[int],
scaling: bool = True,
dtype: DType = np.float32,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
assert len(cardinality) == len(
embedding_dimension
), "embedding_dimension should be a list with the same size as cardinality"
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.distr_output = distr_output
RnnCell = {"lstm": mx.gluon.rnn.LSTMCell, "gru": mx.gluon.rnn.GRUCell}[
self.cell_type
]
self.target_shape = distr_output.event_shape
# TODO: is the following restriction needed?
assert (
len(self.target_shape) <= 1
), "Argument `target_shape` should be a tuple with 1 element at most"
with self.name_scope():
self.proj_distr_args = distr_output.get_args_proj()
self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
for k in range(num_layers):
cell = RnnCell(hidden_size=num_cells)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.rnn.add(cell)
self.rnn.cast(dtype=dtype)
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=embedding_dimension,
dtype=self.dtype,
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, "
f"found lag {max(indices)} while history length is only "
f"{sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def unroll_encoder(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
Tensor
], # (batch_size, prediction_length, num_features)
future_target: Optional[
Tensor
], # (batch_size, prediction_length, *target_shape)
) -> Tuple[Tensor, List, Tensor, Tensor]:
"""
Unrolls the LSTM encoder over past and, if present, future data.
Returns outputs and state of the encoder, plus the scale of past_target
and a vector of static features that was constructed and fed as input
to the encoder.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help
# prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
feat_static_real,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
# unroll encoder
outputs, state = self.rnn.unroll(
inputs=inputs,
length=subsequences_length,
layout="NTC",
merge_outputs=True,
begin_state=self.rnn.begin_state(
func=F.zeros,
dtype=self.dtype,
batch_size=inputs.shape[0]
if isinstance(inputs, mx.nd.NDArray)
else 0,
),
)
# outputs: (batch_size, seq_len, num_cells)
# state: list of (batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class DeepARTrainingNetwork(DeepARNetwork):
def distribution(
self,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Distribution:
"""
Returns the distribution predicted by the model on the range of
past_target and future_target.
The distribution is obtained by unrolling the network with the true
target, this is also the distribution that is being minimized during
training. This can be used in anomaly detection, see for instance
examples/anomaly_detection.py.
Input arguments are the same as for the hybrid_forward method.
Returns
-------
Distribution
a distribution object whose mean has shape:
(batch_size, context_length + prediction_length).
"""
# unroll the decoder in "training mode"
# i.e. by providing future data as well
F = getF(feat_static_cat)
rnn_outputs, _, scale, _ = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
distr_args = self.proj_distr_args(rnn_outputs)
return self.distr_output.distribution(distr_args, scale=scale)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Tensor:
"""
Computes the loss for training DeepAR, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
future_observed_values : (batch_size, prediction_length, *target_shape)
Returns loss with shape (batch_size, context + prediction_length, 1)
-------
"""
distr = self.distribution(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
future_observed_values=future_observed_values,
)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
# (batch_size, seq_len, *target_shape)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=self.history_length,
),
future_observed_values,
dim=1,
)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
loss_weights = (
observed_values
if (len(self.target_shape) == 0)
else observed_values.min(axis=-1, keepdims=False)
)
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights, axis=1
)
return weighted_loss, loss
class DeepARPredictionNetwork(DeepARNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List
list of initial states for the LSTM layers.
the shape of each tensor of the list should be (batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_states = [
s.repeat(repeats=self.num_parallel_samples, axis=0)
for s in begin_states
]
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn.unroll(
inputs=decoder_input,
length=1,
begin_state=repeated_states,
layout="NTC",
merge_outputs=True,
)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample(dtype=self.dtype)
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Tensor, # (batch_size, prediction_length, num_features)
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 21,761 | 34.85173 | 116 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/deepar-original/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
time_features_from_frequency_str,
get_lags_for_frequency,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import DeepARPredictionNetwork, DeepARTrainingNetwork
class DeepAREstimator(GluonEstimator):
"""
Construct a DeepAR estimator.
This implements an RNN-based model, close to the one described in
[SFG17]_.
*Note:* the code of this model is unrelated to the implementation behind
`SageMaker's DeepAR Forecasting Algorithm
<https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html>`_.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
num_layers
Number of RNN layers (default: 2)
num_cells
Number of RNN cells for each layer (default: 40)
cell_type
Type of recurrent cells to use (available: 'lstm' or 'gru';
default: 'lstm')
dropout_rate
Dropout regularization parameter (default: 0.1)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data
(default: False)
use_feat_static_cat
Whether to use the ``feat_static_cat`` field from the data
(default: False)
use_feat_static_real
Whether to use the ``feat_static_real`` field from the data
(default: False)
cardinality
Number of values of each categorical feature.
This must be set if ``use_feat_static_cat == True`` (default: None)
embedding_dimension
Dimension of the embeddings for categorical features
(default: [min(50, (cat+1)//2) for cat in cardinality])
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None,
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "lstm",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
dtype: DType = np.float32,
) -> None:
super().__init__(trainer=trainer, dtype=dtype)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_layers > 0, "The value of `num_layers` should be > 0"
assert num_cells > 0, "The value of `num_cells` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (cardinality is not None and use_feat_static_cat) or (
cardinality is None and not use_feat_static_cat
), "You should set `cardinality` if and only if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert embedding_dimension is None or all(
[e > 0 for e in embedding_dimension]
), "Elements of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.scaling = scaling
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.num_parallel_samples = num_parallel_samples
def create_transformation(self) -> Transformation:
remove_field_names = [FieldName.FEAT_DYNAMIC_CAT]
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ (
[
SetField(
output_field=FieldName.FEAT_STATIC_REAL, value=[0.0]
)
]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> DeepARTrainingNetwork:
return DeepARTrainingNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = DeepARPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
| 12,645 | 37.090361 | 94 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/simple_feedforward/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import validated
from gluonts.distribution import Distribution, DistributionOutput
from gluonts.model.common import Tensor
class SimpleFeedForwardNetworkBase(mx.gluon.HybridBlock):
"""
Abstract base class to implement feed-forward networks for probabilistic
time series prediction.
This class does not implement hybrid_forward: this is delegated
to the two subclasses SimpleFeedForwardTrainingNetwork and
SimpleFeedForwardPredictionNetwork, that define respectively how to
compute the loss and how to generate predictions.
Parameters
----------
num_hidden_dimensions
Number of hidden nodes in each layer.
prediction_length
Number of time units to predict.
context_length
Number of time units that condition the predictions.
batch_normalization
Whether to use batch normalization.
mean_scaling
Scale the network input by the data mean and the network output by
its inverse.
distr_output
Distribution to fit.
kwargs
"""
# Needs the validated decorator so that arguments types are checked and
# the block can be serialized.
@validated()
def __init__(
self,
num_hidden_dimensions: List[int],
prediction_length: int,
context_length: int,
batch_normalization: bool,
mean_scaling: bool,
distr_output: DistributionOutput,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_hidden_dimensions = num_hidden_dimensions
self.prediction_length = prediction_length
self.context_length = context_length
self.batch_normalization = batch_normalization
self.mean_scaling = mean_scaling
self.distr_output = distr_output
with self.name_scope():
self.distr_args_proj = self.distr_output.get_args_proj()
self.mlp = mx.gluon.nn.HybridSequential()
dims = self.num_hidden_dimensions
for layer_no, units in enumerate(dims[:-1]):
self.mlp.add(mx.gluon.nn.Dense(units=units, activation="relu"))
if self.batch_normalization:
self.mlp.add(mx.gluon.nn.BatchNorm())
self.mlp.add(mx.gluon.nn.Dense(units=prediction_length * dims[-1]))
self.mlp.add(
mx.gluon.nn.HybridLambda(
lambda F, o: F.reshape(
o, (-1, prediction_length, dims[-1])
)
)
)
self.scaler = MeanScaler() if mean_scaling else NOPScaler()
def get_distr(self, F, past_target: Tensor) -> Distribution:
"""
Given past target values, applies the feed-forward network and
maps the output to a probability distribution for future observations.
Parameters
----------
F
past_target
Tensor containing past target observations.
Shape: (batch_size, context_length, target_dim).
Returns
-------
Distribution
The predicted probability distribution for future observations.
"""
# (batch_size, seq_len, target_dim) and (batch_size, seq_len, target_dim)
scaled_target, target_scale = self.scaler(
past_target,
F.ones_like(past_target), # TODO: pass the actual observed here
)
mlp_outputs = self.mlp(scaled_target)
distr_args = self.distr_args_proj(mlp_outputs)
return self.distr_output.distribution(
distr_args, scale=target_scale.expand_dims(axis=1)
)
class SimpleFeedForwardTrainingNetwork(SimpleFeedForwardNetworkBase):
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, past_target: Tensor, future_target: Tensor
) -> Tensor:
"""
Computes a probability distribution for future data given the past,
and returns the loss associated with the actual future observations.
Parameters
----------
F
past_target
Tensor with past observations.
Shape: (batch_size, context_length, target_dim).
future_target
Tensor with future observations.
Shape: (batch_size, prediction_length, target_dim).
Returns
-------
Tensor
Loss tensor. Shape: (batch_size, ).
"""
distr = self.get_distr(F, past_target)
# (batch_size, prediction_length, target_dim)
loss = distr.loss(future_target)
# (batch_size, )
return loss.mean(axis=1)
class SimpleFeedForwardPredictionNetwork(SimpleFeedForwardNetworkBase):
@validated()
def __init__(
self, num_parallel_samples: int = 100, *args, **kwargs
) -> None:
super().__init__(*args, **kwargs)
self.num_parallel_samples = num_parallel_samples
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, past_target: Tensor) -> Tensor:
"""
Computes a probability distribution for future data given the past,
and draws samples from it.
Parameters
----------
F
past_target
Tensor with past observations.
Shape: (batch_size, context_length, target_dim).
Returns
-------
Tensor
Prediction sample. Shape: (samples, batch_size, prediction_length).
"""
distr = self.get_distr(F, past_target)
# (num_samples, batch_size, prediction_length)
samples = distr.sample(self.num_parallel_samples)
# (batch_size, num_samples, prediction_length)
return samples.swapaxes(0, 1)
| 6,491 | 32.989529 | 81 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/simple_feedforward/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.trainer import Trainer
from gluonts.transform import (
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
Transformation,
)
# Relative imports
from ._network import (
SimpleFeedForwardPredictionNetwork,
SimpleFeedForwardTrainingNetwork,
)
class SimpleFeedForwardEstimator(GluonEstimator):
"""
SimpleFeedForwardEstimator shows how to build a simple MLP model predicting
the next target time-steps given the previous ones.
Given that we want to define a gluon model trainable by SGD, we inherit the
parent class `GluonEstimator` that handles most of the logic for fitting a
neural-network.
We thus only have to define:
1. How the data is transformed before being fed to our model::
def create_transformation(self) -> Transformation
2. How the training happens::
def create_training_network(self) -> HybridBlock
3. how the predictions can be made for a batch given a trained network::
def create_predictor(
self,
transformation: Transformation,
trained_net: HybridBlock,
) -> Predictor
Parameters
----------
freq
Time time granularity of the data
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
num_hidden_dimensions
Number of hidden nodes in each layer (default: [40, 40])
context_length
Number of time units that condition the predictions
(default: None, in which case context_length = prediction_length)
distr_output
Distribution to fit (default: StudentTOutput())
batch_normalization
Whether to use batch normalization (default: False)
mean_scaling
Scale the network input by the data mean and the network output by
its inverse (default: True)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
# The validated() decorator makes sure that parameters are checked by
# Pydantic and allows to serialize/print models. Note that all parameters
# have defaults except for `freq` and `prediction_length`. which is
# recommended in GluonTS to allow to compare models easily.
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
num_hidden_dimensions: Optional[List[int]] = None,
context_length: Optional[int] = None,
distr_output: DistributionOutput = StudentTOutput(),
batch_normalization: bool = False,
mean_scaling: bool = True,
num_parallel_samples: int = 100,
) -> None:
"""
Defines an estimator. All parameters should be serializable.
"""
super().__init__(trainer=trainer)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_hidden_dimensions is None or (
[d > 0 for d in num_hidden_dimensions]
), "Elements of `num_hidden_dimensions` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.num_hidden_dimensions = (
num_hidden_dimensions
if num_hidden_dimensions is not None
else list([40, 40])
)
self.prediction_length = prediction_length
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.freq = freq
self.distr_output = distr_output
self.batch_normalization = batch_normalization
self.mean_scaling = mean_scaling
self.num_parallel_samples = num_parallel_samples
# here we do only a simple operation to convert the input data to a form
# that can be digested by our model by only splitting the target in two, a
# conditioning part and a to-predict part, for each training example.
# fFr a more complex transformation example, see the `gluonts.model.deepar`
# transformation that includes time features, age feature, observed values
# indicator, ...
def create_transformation(self) -> Transformation:
return Chain(
[
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.context_length,
future_length=self.prediction_length,
time_series_fields=[], # [FieldName.FEAT_DYNAMIC_REAL]
)
]
)
# defines the network, we get to see one batch to initialize it.
# the network should return at least one tensor that is used as a loss to minimize in the training loop.
# several tensors can be returned for instance for analysis, see DeepARTrainingNetwork for an example.
def create_training_network(self) -> HybridBlock:
return SimpleFeedForwardTrainingNetwork(
num_hidden_dimensions=self.num_hidden_dimensions,
prediction_length=self.prediction_length,
context_length=self.context_length,
distr_output=self.distr_output,
batch_normalization=self.batch_normalization,
mean_scaling=self.mean_scaling,
)
# we now define how the prediction happens given that we are provided a
# training network.
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = SimpleFeedForwardPredictionNetwork(
num_hidden_dimensions=self.num_hidden_dimensions,
prediction_length=self.prediction_length,
context_length=self.context_length,
distr_output=self.distr_output,
batch_normalization=self.batch_normalization,
mean_scaling=self.mean_scaling,
params=trained_network.collect_params(),
num_parallel_samples=self.num_parallel_samples,
)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
| 7,852 | 37.684729 | 108 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/deepar-inuse/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import DType, validated
from gluonts.distribution import DistributionOutput, Distribution
from gluonts.distribution.distribution import getF
from gluonts.model.common import Tensor
from gluonts.support.util import weighted_average
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
lags_seq: List[int],
scaling: bool = True,
dtype: DType = np.float32,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
assert len(cardinality) == len(
embedding_dimension
), "embedding_dimension should be a list with the same size as cardinality"
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.distr_output = distr_output
RnnCell = {"lstm": mx.gluon.rnn.LSTMCell, "gru": mx.gluon.rnn.GRUCell}[
self.cell_type
]
self.target_shape = distr_output.event_shape
# TODO: is the following restriction needed?
assert (
len(self.target_shape) <= 1
), "Argument `target_shape` should be a tuple with 1 element at most"
with self.name_scope():
self.proj_distr_args = distr_output.get_args_proj()
self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
for k in range(num_layers):
cell = RnnCell(hidden_size=num_cells)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.rnn.add(cell)
self.rnn.cast(dtype=dtype)
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=embedding_dimension,
dtype=self.dtype,
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, "
f"found lag {max(indices)} while history length is only "
f"{sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def unroll_encoder(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
Tensor
], # (batch_size, prediction_length, num_features)
future_target: Optional[
Tensor
], # (batch_size, prediction_length, *target_shape)
) -> Tuple[Tensor, List, Tensor, Tensor]:
"""
Unrolls the LSTM encoder over past and, if present, future data.
Returns outputs and state of the encoder, plus the scale of past_target
and a vector of static features that was constructed and fed as input
to the encoder.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help
# prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
feat_static_real,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
# unroll encoder
outputs, state = self.rnn.unroll(
inputs=inputs,
length=subsequences_length,
layout="NTC",
merge_outputs=True,
begin_state=self.rnn.begin_state(
func=F.zeros,
dtype=self.dtype,
batch_size=inputs.shape[0]
if isinstance(inputs, mx.nd.NDArray)
else 0,
),
)
# outputs: (batch_size, seq_len, num_cells)
# state: list of (batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class DeepARTrainingNetwork(DeepARNetwork):
def distribution(
self,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Distribution:
"""
Returns the distribution predicted by the model on the range of
past_target and future_target.
The distribution is obtained by unrolling the network with the true
target, this is also the distribution that is being minimized during
training. This can be used in anomaly detection, see for instance
examples/anomaly_detection.py.
Input arguments are the same as for the hybrid_forward method.
Returns
-------
Distribution
a distribution object whose mean has shape:
(batch_size, context_length + prediction_length).
"""
# unroll the decoder in "training mode"
# i.e. by providing future data as well
F = getF(feat_static_cat)
rnn_outputs, _, scale, _ = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
distr_args = self.proj_distr_args(rnn_outputs)
return self.distr_output.distribution(distr_args, scale=scale)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Tensor:
"""
Computes the loss for training DeepAR, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
future_observed_values : (batch_size, prediction_length, *target_shape)
Returns loss with shape (batch_size, context + prediction_length, 1)
-------
"""
distr = self.distribution(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
future_observed_values=future_observed_values,
)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
# (batch_size, seq_len, *target_shape)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=self.history_length,
),
future_observed_values,
dim=1,
)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
loss_weights = (
observed_values
if (len(self.target_shape) == 0)
else observed_values.min(axis=-1, keepdims=False)
)
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights, axis=1
)
return weighted_loss, loss
class DeepARPredictionNetwork(DeepARNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List
list of initial states for the LSTM layers.
the shape of each tensor of the list should be (batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_states = [
s.repeat(repeats=self.num_parallel_samples, axis=0)
for s in begin_states
]
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn.unroll(
inputs=decoder_input,
length=1,
begin_state=repeated_states,
layout="NTC",
merge_outputs=True,
)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample(dtype=self.dtype)
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Tensor, # (batch_size, prediction_length, num_features)
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 21,761 | 34.85173 | 116 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/deepar-inuse/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
time_features_from_frequency_str,
get_lags_for_frequency,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import DeepARPredictionNetwork, DeepARTrainingNetwork
class DeepAREstimator(GluonEstimator):
"""
Construct a DeepAR estimator.
This implements an RNN-based model, close to the one described in
[SFG17]_.
*Note:* the code of this model is unrelated to the implementation behind
`SageMaker's DeepAR Forecasting Algorithm
<https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html>`_.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
num_layers
Number of RNN layers (default: 2)
num_cells
Number of RNN cells for each layer (default: 40)
cell_type
Type of recurrent cells to use (available: 'lstm' or 'gru';
default: 'lstm')
dropout_rate
Dropout regularization parameter (default: 0.1)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data
(default: False)
use_feat_static_cat
Whether to use the ``feat_static_cat`` field from the data
(default: False)
use_feat_static_real
Whether to use the ``feat_static_real`` field from the data
(default: False)
cardinality
Number of values of each categorical feature.
This must be set if ``use_feat_static_cat == True`` (default: None)
embedding_dimension
Dimension of the embeddings for categorical features
(default: [min(50, (cat+1)//2) for cat in cardinality])
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None,
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "lstm",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
dtype: DType = np.float32,
) -> None:
super().__init__(trainer=trainer, dtype=dtype)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_layers > 0, "The value of `num_layers` should be > 0"
assert num_cells > 0, "The value of `num_cells` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (cardinality is not None and use_feat_static_cat) or (
cardinality is None and not use_feat_static_cat
), "You should set `cardinality` if and only if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert embedding_dimension is None or all(
[e > 0 for e in embedding_dimension]
), "Elements of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.scaling = scaling
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.num_parallel_samples = num_parallel_samples
def create_transformation(self) -> Transformation:
remove_field_names = [FieldName.FEAT_DYNAMIC_CAT]
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ (
[
SetField(
output_field=FieldName.FEAT_STATIC_REAL, value=[0.0]
)
]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> DeepARTrainingNetwork:
return DeepARTrainingNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = DeepARPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
| 12,645 | 37.090361 | 94 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/deepar-savedata/predictor.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import functools
import itertools
import logging
import multiprocessing as mp
import sys
import traceback
from pathlib import Path
from pydoc import locate
from tempfile import TemporaryDirectory
import json
from typing import (
TYPE_CHECKING,
Tuple,
Union,
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Type,
)
# Third-party imports
import mxnet as mx
import numpy as np
# First-party imports
import gluonts
from gluonts.distribution import Distribution, DistributionOutput
from gluonts.core.component import (
DType,
equals,
from_hyperparameters,
get_mxnet_context,
validated,
)
from gluonts.core.exception import GluonTSException
from gluonts.core.serde import dump_json, fqname_for, load_json
from gluonts.dataset.common import DataEntry, Dataset, ListDataset
from .forecast_generator import ForecastGenerator, SampleForecastGenerator
from gluonts.dataset.loader import DataBatch, InferenceDataLoader
from gluonts.model.forecast import Forecast
from gluonts.support.util import (
export_repr_block,
export_symb_block,
get_hybrid_forward_input_names,
hybrid_block_to_symbol_block,
import_repr_block,
import_symb_block,
)
from gluonts.transform import Transformation
if TYPE_CHECKING: # avoid circular import
from gluonts.model.estimator import Estimator # noqa
OutputTransform = Callable[[DataEntry, np.ndarray], np.ndarray]
class Predictor:
"""
Abstract class representing predictor objects.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
__version__: str = gluonts.__version__
def __init__(self, prediction_length: int, freq: str) -> None:
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
self.prediction_length = prediction_length
self.freq = freq
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
"""
Compute forecasts for the time series in the provided dataset.
This method is not implemented in this abstract class; please
use one of the subclasses.
Parameters
----------
dataset
The dataset containing the time series to predict.
Returns
-------
Iterator[Forecast]
Iterator over the forecasts, in the same order as the dataset
iterable was provided.
"""
raise NotImplementedError
def serialize(self, path: Path) -> None:
# serialize Predictor type
with (path / "type.txt").open("w") as fp:
fp.write(fqname_for(self.__class__))
with (path / "version.json").open("w") as fp:
json.dump(
{"model": self.__version__, "gluonts": gluonts.__version__}, fp
)
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "Predictor":
"""
Load a serialized predictor from the given path
Parameters
----------
path
Path to the serialized files predictor.
ctx
Optional mxnet context to be used with the predictor.
If nothing is passed will use the GPU if available and CPU otherwise.
"""
# deserialize Predictor type
with (path / "type.txt").open("r") as fp:
tpe = locate(fp.readline())
# ensure that predictor_cls is a subtype of Predictor
if not issubclass(tpe, Predictor):
raise IOError(
f"Class {fqname_for(tpe)} is not "
f"a subclass of {fqname_for(Predictor)}"
)
# call deserialize() for the concrete Predictor type
return tpe.deserialize(path, ctx)
@classmethod
def from_hyperparameters(cls, **hyperparameters):
return from_hyperparameters(cls, **hyperparameters)
class RepresentablePredictor(Predictor):
"""
An abstract predictor that can be subclassed by models that are not based
on Gluon. Subclasses should have @validated() constructors.
(De)serialization and value equality are all implemented on top of the
@validated() logic.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
@validated()
def __init__(self, prediction_length: int, freq: str) -> None:
super().__init__(prediction_length, freq)
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
for item in dataset:
yield self.predict_item(item)
def predict_item(self, item: DataEntry) -> Forecast:
raise NotImplementedError
def __eq__(self, that):
"""
Two RepresentablePredictor instances are considered equal if they
have the same constructor arguments.
"""
return equals(self, that)
def serialize(self, path: Path) -> None:
# call Predictor.serialize() in order to serialize the class name
super().serialize(path)
with (path / "predictor.json").open("w") as fp:
print(dump_json(self), file=fp)
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "RepresentablePredictor":
with (path / "predictor.json").open("r") as fp:
return load_json(fp.read())
class GluonPredictor(Predictor):
"""
Base predictor type for Gluon-based models.
Parameters
----------
input_names
Input tensor names for the graph
prediction_net
Network that will be called for prediction
batch_size
Number of time series to predict in a single batch
prediction_length
Number of time steps to predict
freq
Frequency of the input data
input_transform
Input transformation pipeline
output_transform
Output transformation
ctx
MXNet context to use for computation
forecast_generator
Class to generate forecasts from network ouputs
"""
BlockType = mx.gluon.Block
def __init__(
self,
input_names: List[str],
prediction_net: BlockType,
batch_size: int,
prediction_length: int,
freq: str,
ctx: mx.Context,
input_transform: Transformation,
forecast_generator: ForecastGenerator = SampleForecastGenerator(),
output_transform: Optional[OutputTransform] = None,
dtype: DType = np.float32,
) -> None:
super().__init__(prediction_length, freq)
self.input_names = input_names
self.prediction_net = prediction_net
#self.batch_size = batch_size
self.batch_size = 1
self.input_transform = input_transform
self.forecast_generator = forecast_generator
self.output_transform = output_transform
self.ctx = ctx
self.dtype = dtype
def hybridize(self, batch: DataBatch) -> None:
"""
Hybridizes the underlying prediction network.
Parameters
----------
batch
A batch of data to use for the required forward pass after the
`hybridize()` call.
"""
self.prediction_net.hybridize(active=True)
self.prediction_net(*[batch[k] for k in self.input_names])
def as_symbol_block_predictor(
self, batch: DataBatch
) -> "SymbolBlockPredictor":
"""
Returns a variant of the current :class:`GluonPredictor` backed
by a Gluon `SymbolBlock`. If the current predictor is already a
:class:`SymbolBlockPredictor`, it just returns itself.
Parameters
----------
batch
A batch of data to use for the required forward pass after the
`hybridize()` call of the underlying network.
Returns
-------
SymbolBlockPredictor
A predictor derived from the current one backed by a `SymbolBlock`.
"""
raise NotImplementedError
def predict(
self, dataset: Dataset, num_samples: Optional[int] = None
) -> Iterator[Forecast]:
#print('predict')
inference_data_loader = InferenceDataLoader(
dataset,
self.input_transform,
#self.batch_size,
1,
ctx=self.ctx,
dtype=self.dtype,
)
yield from self.forecast_generator(
inference_data_loader=inference_data_loader,
prediction_net=self.prediction_net,
input_names=self.input_names,
freq=self.freq,
output_transform=self.output_transform,
num_samples=num_samples,
)
def __eq__(self, that):
if type(self) != type(that):
return False
# TODO: also consider equality of the pipelines
# if not equals(self.input_transform, that.input_transform):
# return False
return equals(
self.prediction_net.collect_params(),
that.prediction_net.collect_params(),
)
def serialize(self, path: Path) -> None:
# call Predictor.serialize() in order to serialize the class name
super().serialize(path)
# serialize every GluonPredictor-specific parameters
# serialize the prediction network
self.serialize_prediction_net(path)
# serialize transformation chain
with (path / "input_transform.json").open("w") as fp:
print(dump_json(self.input_transform), file=fp)
# FIXME: also needs to serialize the output_transform
# serialize all remaining constructor parameters
with (path / "parameters.json").open("w") as fp:
parameters = dict(
batch_size=self.batch_size,
prediction_length=self.prediction_length,
freq=self.freq,
ctx=self.ctx,
dtype=self.dtype,
forecast_generator=self.forecast_generator,
input_names=self.input_names,
)
print(dump_json(parameters), file=fp)
def serialize_prediction_net(self, path: Path) -> None:
raise NotImplementedError()
class SymbolBlockPredictor(GluonPredictor):
"""
A predictor which serializes the network structure as an MXNet symbolic
graph. Should be used for models deployed in production in order to
ensure forward-compatibility as GluonTS models evolve.
Used by the training shell if training is invoked with a hyperparameter
`use_symbol_block_predictor = True`.
"""
BlockType = mx.gluon.SymbolBlock
def as_symbol_block_predictor(
self, batch: DataBatch
) -> "SymbolBlockPredictor":
return self
def serialize_prediction_net(self, path: Path) -> None:
export_symb_block(self.prediction_net, path, "prediction_net")
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "SymbolBlockPredictor":
ctx = ctx if ctx is not None else get_mxnet_context()
with mx.Context(ctx):
# deserialize constructor parameters
with (path / "parameters.json").open("r") as fp:
parameters = load_json(fp.read())
parameters["ctx"] = ctx
# deserialize transformation chain
with (path / "input_transform.json").open("r") as fp:
transform = load_json(fp.read())
# deserialize prediction network
num_inputs = len(parameters["input_names"])
prediction_net = import_symb_block(
num_inputs, path, "prediction_net"
)
return SymbolBlockPredictor(
input_transform=transform,
prediction_net=prediction_net,
**parameters,
)
class RepresentableBlockPredictor(GluonPredictor):
"""
A predictor which serializes the network structure using the
JSON-serialization methods located in `gluonts.core.serde`. Use the following
logic to create a `RepresentableBlockPredictor` from a trained prediction
network.
>>> def create_representable_block_predictor(
... prediction_network: mx.gluon.HybridBlock,
... **kwargs
... ) -> RepresentableBlockPredictor:
... return RepresentableBlockPredictor(
... prediction_net=prediction_network,
... **kwargs
... )
"""
BlockType = mx.gluon.HybridBlock
def __init__(
self,
prediction_net: BlockType,
batch_size: int,
prediction_length: int,
freq: str,
ctx: mx.Context,
input_transform: Transformation,
forecast_generator: ForecastGenerator = SampleForecastGenerator(),
output_transform: Optional[
Callable[[DataEntry, np.ndarray], np.ndarray]
] = None,
dtype: DType = np.float32,
) -> None:
super().__init__(
input_names=get_hybrid_forward_input_names(prediction_net),
prediction_net=prediction_net,
batch_size=batch_size,
prediction_length=prediction_length,
freq=freq,
ctx=ctx,
input_transform=input_transform,
forecast_generator=forecast_generator,
output_transform=output_transform,
dtype=dtype,
)
def as_symbol_block_predictor(
self, batch: DataBatch
) -> SymbolBlockPredictor:
symbol_block_net = hybrid_block_to_symbol_block(
hb=self.prediction_net,
data_batch=[batch[k] for k in self.input_names],
)
return SymbolBlockPredictor(
input_names=self.input_names,
prediction_net=symbol_block_net,
batch_size=self.batch_size,
prediction_length=self.prediction_length,
freq=self.freq,
ctx=self.ctx,
input_transform=self.input_transform,
forecast_generator=self.forecast_generator,
output_transform=self.output_transform,
dtype=self.dtype,
)
def serialize(self, path: Path) -> None:
logging.warning(
"Serializing RepresentableBlockPredictor instances does not save "
"the prediction network structure in a backwards-compatible "
"manner. Be careful not to use this method in production."
)
super().serialize(path)
def serialize_prediction_net(self, path: Path) -> None:
export_repr_block(self.prediction_net, path, "prediction_net")
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "RepresentableBlockPredictor":
ctx = ctx if ctx is not None else get_mxnet_context()
with mx.Context(ctx):
# deserialize constructor parameters
with (path / "parameters.json").open("r") as fp:
parameters = load_json(fp.read())
# deserialize transformation chain
with (path / "input_transform.json").open("r") as fp:
transform = load_json(fp.read())
# deserialize prediction network
prediction_net = import_repr_block(path, "prediction_net")
# input_names is derived from the prediction_net
if "input_names" in parameters:
del parameters["input_names"]
parameters["ctx"] = ctx
return RepresentableBlockPredictor(
input_transform=transform,
prediction_net=prediction_net,
**parameters,
)
class WorkerError:
def __init__(self, msg):
self.msg = msg
def _worker_loop(
predictor_path: Path,
input_queue: mp.Queue,
output_queue: mp.Queue,
worker_id,
**kwargs,
):
"""
Worker loop for multiprocessing Predictor.
Loads the predictor serialized in predictor_path
reads inputs from input_queue and writes forecasts to output_queue
"""
predictor = Predictor.deserialize(predictor_path)
while True:
idx, data_chunk = input_queue.get()
if idx is None:
output_queue.put((None, None, None))
break
try:
result = list(predictor.predict(data_chunk, **kwargs))
except Exception:
we = WorkerError(
"".join(traceback.format_exception(*sys.exc_info()))
)
output_queue.put((we, None, None))
break
output_queue.put((idx, worker_id, result))
class ParallelizedPredictor(Predictor):
"""
Runs multiple instances (workers) of a predictor in parallel.
Exceptions are propagated from the workers.
Note: That there is currently an issue with tqdm that will cause things
to hang if the ParallelizedPredictor is used with tqdm and an exception
occurs during prediction.
https://github.com/tqdm/tqdm/issues/548
Parameters
----------
base_predictor
A representable predictor that will be used
num_workers
Number of workers (processes) to use. If set to
None, one worker per CPU will be used.
chunk_size
Number of items to pass per call
"""
def __init__(
self,
base_predictor: Predictor,
num_workers: Optional[int] = None,
chunk_size=1,
) -> None:
super().__init__(base_predictor.prediction_length, base_predictor.freq)
self._base_predictor = base_predictor
self._num_workers = (
num_workers if num_workers is not None else mp.cpu_count()
)
self._chunk_size = chunk_size
self._num_running_workers = 0
self._input_queues = []
self._output_queue = None
def _grouper(self, iterable, n):
iterator = iter(iterable)
group = tuple(itertools.islice(iterator, n))
while group:
yield group
group = tuple(itertools.islice(iterator, n))
def terminate(self):
for q in self._input_queues:
q.put((None, None))
for w in self._workers:
w.terminate()
for i, w in enumerate(self._workers):
w.join()
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
with TemporaryDirectory() as tempdir:
predictor_path = Path(tempdir)
self._base_predictor.serialize(predictor_path)
# TODO: Consider using shared memory for the data transfer.
self._input_queues = [mp.Queue() for _ in range(self._num_workers)]
self._output_queue = mp.Queue()
workers = []
for worker_id, in_q in enumerate(self._input_queues):
worker = mp.Process(
target=_worker_loop,
args=(predictor_path, in_q, self._output_queue, worker_id),
kwargs=kwargs,
)
worker.daemon = True
worker.start()
workers.append(worker)
self._num_running_workers += 1
self._workers = workers
chunked_data = self._grouper(dataset, self._chunk_size)
self._send_idx = 0
self._next_idx = 0
self._data_buffer = {}
worker_ids = list(range(self._num_workers))
def receive():
idx, worker_id, result = self._output_queue.get()
if isinstance(idx, WorkerError):
self._num_running_workers -= 1
self.terminate()
raise Exception(idx.msg)
if idx is not None:
self._data_buffer[idx] = result
return idx, worker_id, result
def get_next_from_buffer():
while self._next_idx in self._data_buffer:
result_batch = self._data_buffer.pop(self._next_idx)
self._next_idx += 1
for result in result_batch:
yield result
def send(worker_id, chunk):
q = self._input_queues[worker_id]
q.put((self._send_idx, chunk))
self._send_idx += 1
try:
# prime the queues
for wid in worker_ids:
chunk = next(chunked_data)
send(wid, chunk)
while True:
idx, wid, result = receive()
for res in get_next_from_buffer():
yield res
chunk = next(chunked_data)
send(wid, chunk)
except StopIteration:
# signal workers end of data
for q in self._input_queues:
q.put((None, None))
# collect any outstanding results
while self._num_running_workers > 0:
idx, worker_id, result = receive()
if idx is None:
self._num_running_workers -= 1
continue
for res in get_next_from_buffer():
yield res
assert len(self._data_buffer) == 0
assert self._send_idx == self._next_idx
class Localizer(Predictor):
"""
A Predictor that uses an estimator to train a local model per time series and
immediatly calls this to predict.
Parameters
----------
estimator
The estimator object to train on each dataset entry at prediction time.
"""
def __init__(self, estimator: "Estimator"):
super().__init__(estimator.prediction_length, estimator.freq)
self.estimator = estimator
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
logger = logging.getLogger(__name__)
for i, ts in enumerate(dataset, start=1):
logger.info(f"training for time series {i} / {len(dataset)}")
local_ds = ListDataset([ts], freq=self.freq)
trained_pred = self.estimator.train(local_ds)
logger.info(f"predicting for time series {i} / {len(dataset)}")
predictions = trained_pred.predict(local_ds, **kwargs)
for pred in predictions:
yield pred
class FallbackPredictor(Predictor):
@classmethod
def from_predictor(
cls, base: RepresentablePredictor, **overrides
) -> Predictor:
# Create predictor based on an existing predictor.
# This let's us create a MeanPredictor as a fallback on the fly.
return cls.from_hyperparameters(
**getattr(base, "__init_args__"), **overrides
)
def fallback(fallback_cls: Type[FallbackPredictor]):
def decorator(predict_item):
@functools.wraps(predict_item)
def fallback_predict(self, item: DataEntry) -> Forecast:
try:
return predict_item(self, item)
except GluonTSException:
raise
except Exception:
logging.warning(
f"Base predictor failed with: {traceback.format_exc()}"
)
fallback_predictor = fallback_cls.from_predictor(self)
return fallback_predictor.predict_item(item)
return fallback_predict
return decorator
| 24,040 | 30.969415 | 81 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/deepar-savedata/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import DType, validated
from gluonts.distribution import DistributionOutput, Distribution
from gluonts.distribution.distribution import getF
from gluonts.model.common import Tensor
from gluonts.support.util import weighted_average
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
lags_seq: List[int],
scaling: bool = True,
dtype: DType = np.float32,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
assert len(cardinality) == len(
embedding_dimension
), "embedding_dimension should be a list with the same size as cardinality"
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.distr_output = distr_output
RnnCell = {"lstm": mx.gluon.rnn.LSTMCell, "gru": mx.gluon.rnn.GRUCell}[
self.cell_type
]
self.target_shape = distr_output.event_shape
# TODO: is the following restriction needed?
assert (
len(self.target_shape) <= 1
), "Argument `target_shape` should be a tuple with 1 element at most"
with self.name_scope():
self.proj_distr_args = distr_output.get_args_proj()
self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
for k in range(num_layers):
cell = RnnCell(hidden_size=num_cells)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.rnn.add(cell)
self.rnn.cast(dtype=dtype)
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=embedding_dimension,
dtype=self.dtype,
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
#save data
self.reset_savedata()
def reset_savedata(self):
self.savedata = {}
self.savedata['input'] = []
self.savedata['target'] = []
self.savedata['lags'] = []
self.savedata['theta'] = []
self.savedata['hstate'] = []
self.savedata['rnnoutput'] = []
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, "
f"found lag {max(indices)} while history length is only "
f"{sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def unroll_encoder(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
Tensor
], # (batch_size, prediction_length, num_features)
future_target: Optional[
Tensor
], # (batch_size, prediction_length, *target_shape)
) -> Tuple[Tensor, List, Tensor, Tensor]:
"""
Unrolls the LSTM encoder over past and, if present, future data.
Returns outputs and state of the encoder, plus the scale of past_target
and a vector of static features that was constructed and fed as input
to the encoder.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help
# prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
feat_static_real,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
#save data here
self.savedata['input'].append(inputs.asnumpy().copy())
#self.savedata.append(inputs)
self.savedata['lags'].append(lags.asnumpy().copy())
#print(self.lags_seq)
# unroll encoder
outputs, state = self.rnn.unroll(
inputs=inputs,
length=subsequences_length,
layout="NTC",
merge_outputs=True,
begin_state=self.rnn.begin_state(
func=F.zeros,
dtype=self.dtype,
batch_size=inputs.shape[0]
if isinstance(inputs, mx.nd.NDArray)
else 0,
),
)
# outputs: (batch_size, seq_len, num_cells)
# state: list of (batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class DeepARTrainingNetwork(DeepARNetwork):
def distribution(
self,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Distribution:
"""
Returns the distribution predicted by the model on the range of
past_target and future_target.
The distribution is obtained by unrolling the network with the true
target, this is also the distribution that is being minimized during
training. This can be used in anomaly detection, see for instance
examples/anomaly_detection.py.
Input arguments are the same as for the hybrid_forward method.
Returns
-------
Distribution
a distribution object whose mean has shape:
(batch_size, context_length + prediction_length).
"""
# unroll the decoder in "training mode"
# i.e. by providing future data as well
F = getF(feat_static_cat)
rnn_outputs, _, scale, _ = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
distr_args = self.proj_distr_args(rnn_outputs)
return self.distr_output.distribution(distr_args, scale=scale)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Tensor:
"""
Computes the loss for training DeepAR, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
future_observed_values : (batch_size, prediction_length, *target_shape)
Returns loss with shape (batch_size, context + prediction_length, 1)
-------
"""
distr = self.distribution(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
future_observed_values=future_observed_values,
)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
#save target in training
self.savedata['target'].append(target.asnumpy().copy())
# (batch_size, seq_len, *target_shape)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=self.history_length,
),
future_observed_values,
dim=1,
)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
loss_weights = (
observed_values
if (len(self.target_shape) == 0)
else observed_values.min(axis=-1, keepdims=False)
)
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights, axis=1
)
return weighted_loss, loss
class DeepARPredictionNetwork(DeepARNetwork):
@validated()
#def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
def __init__(self, num_parallel_samples: int = 1, **kwargs) -> None:
super().__init__(**kwargs)
#self.num_parallel_samples = num_parallel_samples
self.num_parallel_samples = 1
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List
list of initial states for the LSTM layers.
the shape of each tensor of the list should be (batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_states = [
s.repeat(repeats=self.num_parallel_samples, axis=0)
for s in begin_states
]
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn.unroll(
inputs=decoder_input,
length=1,
begin_state=repeated_states,
layout="NTC",
merge_outputs=True,
)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample(dtype=self.dtype)
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
#save only the last output
if k == self.prediction_length -1:
self.savedata['hstate'].append(repeated_states)
self.savedata['rnnoutput'].append(rnn_outputs.asnumpy().copy())
self.savedata['theta'].append(distr_args)
self.savedata['target'].append(new_samples.asnumpy().copy())
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Tensor, # (batch_size, prediction_length, num_features)
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 22,891 | 34.601866 | 116 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/deepar-savedata/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
time_features_from_frequency_str,
get_lags_for_frequency,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import DeepARPredictionNetwork, DeepARTrainingNetwork
class DeepAREstimator(GluonEstimator):
"""
Construct a DeepAR estimator.
This implements an RNN-based model, close to the one described in
[SFG17]_.
*Note:* the code of this model is unrelated to the implementation behind
`SageMaker's DeepAR Forecasting Algorithm
<https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html>`_.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
num_layers
Number of RNN layers (default: 2)
num_cells
Number of RNN cells for each layer (default: 40)
cell_type
Type of recurrent cells to use (available: 'lstm' or 'gru';
default: 'lstm')
dropout_rate
Dropout regularization parameter (default: 0.1)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data
(default: False)
use_feat_static_cat
Whether to use the ``feat_static_cat`` field from the data
(default: False)
use_feat_static_real
Whether to use the ``feat_static_real`` field from the data
(default: False)
cardinality
Number of values of each categorical feature.
This must be set if ``use_feat_static_cat == True`` (default: None)
embedding_dimension
Dimension of the embeddings for categorical features
(default: [min(50, (cat+1)//2) for cat in cardinality])
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None,
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "lstm",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
dtype: DType = np.float32,
) -> None:
super().__init__(trainer=trainer, dtype=dtype)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_layers > 0, "The value of `num_layers` should be > 0"
assert num_cells > 0, "The value of `num_cells` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (cardinality is not None and use_feat_static_cat) or (
cardinality is None and not use_feat_static_cat
), "You should set `cardinality` if and only if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert embedding_dimension is None or all(
[e > 0 for e in embedding_dimension]
), "Elements of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.scaling = scaling
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.num_parallel_samples = num_parallel_samples
def create_transformation(self) -> Transformation:
remove_field_names = [FieldName.FEAT_DYNAMIC_CAT]
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ (
[
SetField(
output_field=FieldName.FEAT_STATIC_REAL, value=[0.0]
)
]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> DeepARTrainingNetwork:
return DeepARTrainingNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = DeepARPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
| 12,645 | 37.090361 | 94 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/wavenet/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import math
from typing import List
# Third-party imports
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.model.common import Tensor
from gluonts.core.component import validated
class LookupValues(gluon.HybridBlock):
def __init__(self, values: mx.nd.NDArray, **kwargs):
super().__init__(**kwargs)
with self.name_scope():
self.bin_values = self.params.get_constant("bin_values", values)
def hybrid_forward(self, F, indices, bin_values):
return F.take(bin_values, indices)
def conv1d(channels, kernel_size, in_channels, use_bias=True, **kwargs):
"""
Conv1D with better default initialization.
"""
n = in_channels
kernel_size = (
kernel_size if isinstance(kernel_size, list) else [kernel_size]
)
for k in kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
winit = mx.initializer.Uniform(stdv)
if use_bias:
binit = mx.initializer.Uniform(stdv)
else:
binit = "zeros"
return nn.Conv1D(
channels=channels,
kernel_size=kernel_size,
in_channels=in_channels,
use_bias=use_bias,
weight_initializer=winit,
bias_initializer=binit,
**kwargs,
)
class CausalDilatedResidue(nn.HybridBlock):
def __init__(
self,
n_residue,
n_skip,
dilation,
return_dense_out,
kernel_size,
**kwargs,
):
super().__init__(**kwargs)
self.n_residue = n_residue
self.n_skip = n_skip
self.dilation = dilation
self.kernel_size = kernel_size
self.return_dense_out = return_dense_out
with self.name_scope():
self.conv_sigmoid = conv1d(
in_channels=n_residue,
channels=n_residue,
kernel_size=kernel_size,
dilation=dilation,
activation="sigmoid",
)
self.conv_tanh = conv1d(
in_channels=n_residue,
channels=n_residue,
kernel_size=kernel_size,
dilation=dilation,
activation="tanh",
)
self.skip = conv1d(
in_channels=n_residue, channels=n_skip, kernel_size=1
)
self.residue = (
conv1d(
in_channels=n_residue, channels=n_residue, kernel_size=1
)
if self.return_dense_out
else None
)
def hybrid_forward(self, F, x):
u = self.conv_sigmoid(x) * self.conv_tanh(x)
s = self.skip(u)
if not self.return_dense_out:
return s, F.zeros(shape=(1,))
output = self.residue(u)
output = output + F.slice_axis(
x, begin=(self.kernel_size - 1) * self.dilation, end=None, axis=-1
)
return s, output
class WaveNet(nn.HybridBlock):
def __init__(
self,
bin_values: List[float],
n_residue: int,
n_skip: int,
dilation_depth: int,
n_stacks: int,
act_type: str,
cardinality: List[int],
embedding_dimension: int,
pred_length: int,
**kwargs,
):
super().__init__(**kwargs)
self.dilation_depth = dilation_depth
self.pred_length = pred_length
self.mu = len(bin_values)
self.dilations = WaveNet._get_dilations(
dilation_depth=dilation_depth, n_stacks=n_stacks
)
self.receptive_field = WaveNet.get_receptive_field(
dilation_depth=dilation_depth, n_stacks=n_stacks
)
self.trim_lengths = [
sum(self.dilations) - sum(self.dilations[: i + 1])
for i, _ in enumerate(self.dilations)
]
with self.name_scope():
self.feature_embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=[embedding_dimension for _ in cardinality],
)
# self.post_transform = LookupValues(mx.nd.array(bin_values))
self.target_embed = nn.Embedding(
input_dim=self.mu, output_dim=n_residue
)
self.residuals = nn.HybridSequential()
for i, d in enumerate(self.dilations):
is_not_last = i + 1 < len(self.dilations)
self.residuals.add(
CausalDilatedResidue(
n_residue=n_residue,
n_skip=n_skip,
dilation=d,
return_dense_out=is_not_last,
kernel_size=2,
)
)
std = 1.0 / math.sqrt(n_residue)
self.conv_project = nn.Conv1D(
channels=n_residue,
kernel_size=1,
use_bias=True,
weight_initializer=mx.init.Uniform(std),
bias_initializer="zero",
)
self.conv1 = conv1d(
in_channels=n_skip, channels=n_skip, kernel_size=1
)
self.conv2 = conv1d(
in_channels=n_skip, channels=self.mu, kernel_size=1
)
self.output_act = (
nn.ELU()
if act_type == "elu"
else nn.Activation(activation=act_type)
)
self.cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss()
@staticmethod
def _get_dilations(dilation_depth, n_stacks):
return [2 ** i for i in range(dilation_depth)] * n_stacks
@staticmethod
def get_receptive_field(dilation_depth, n_stacks):
"""
Return the length of the receptive field
"""
dilations = WaveNet._get_dilations(
dilation_depth=dilation_depth, n_stacks=n_stacks
)
return sum(dilations) + 1
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
past_time_feat: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
scale: Tensor,
) -> Tensor:
embedded_cat = self.feature_embedder(feat_static_cat)
static_feat = F.concat(embedded_cat, F.log(scale + 1.0), dim=1)
full_target = F.concat(past_target, future_target, dim=-1).astype(
"int32"
)
full_observed = F.expand_dims(
F.concat(past_observed_values, future_observed_values, dim=-1),
axis=1,
)
full_time_features = F.concat(past_time_feat, future_time_feat, dim=-1)
repeated_static_feat = F.repeat(
F.expand_dims(static_feat, axis=-1),
repeats=self.pred_length + self.receptive_field,
axis=-1,
)
full_features = F.concat(
full_time_features, full_observed, repeated_static_feat, dim=1
)
# (batch_size, embed_dim, sequence_length)
o = self.target_embed(
F.slice_axis(full_target, begin=0, end=-1, axis=-1)
).swapaxes(1, 2)
o = F.concat(
o, F.slice_axis(full_features, begin=1, end=None, axis=-1), dim=1
)
o = self.conv_project(o)
skip_outs = []
for i, d in enumerate(self.dilations):
skip, o = self.residuals[i](o)
skip_trim = F.slice_axis(
skip, begin=self.trim_lengths[i], end=None, axis=-1
)
skip_outs.append(skip_trim)
y = sum(skip_outs)
y = self.output_act(y)
y = self.conv1(y)
y = self.output_act(y)
y = self.conv2(y)
unnormalized_output = y.swapaxes(1, 2)
label = F.slice_axis(
full_target, begin=self.receptive_field, end=None, axis=-1
)
loss_weight = F.slice_axis(
full_observed, begin=self.receptive_field, end=None, axis=-1
)
loss_weight = F.expand_dims(loss_weight, axis=2)
loss = self.cross_entropy_loss(unnormalized_output, label, loss_weight)
return loss
class WaveNetSampler(WaveNet):
"""
Runs Wavenet generation in an auto-regressive manner using caching for
speedup [PKC+16]_.
Same arguments as WaveNet. In addition
Parameters
----------
pred_length
Length of the prediction horizon
num_samples
Number of sample paths to generate in parallel in the graph
temperature
If set to 1.0 (default), sample according to estimated probabilities, if set to 0.0
most likely sample at each step is chosen.
post_transform
An optional post transform that will be applied to the samples
"""
@validated()
def __init__(
self,
bin_values: List[float],
num_samples: int,
temperature: float = 1.0,
**kwargs,
):
"""
Same arguments as WaveNet. In addition
:param pred_length: prediction length
:param num_samples: number of sample paths to generate in parallel in the graph
:param temperature: if set to 1.0 (default), sample according to estimated probabilities
- if set to 0.0 most likely sample at each step is chosen.
:param post_transform: An optional post transform that will be applied to the samples.
"""
super().__init__(bin_values=bin_values, **kwargs)
self.num_samples = num_samples
self.temperature = temperature
with self.name_scope():
self.post_transform = LookupValues(mx.nd.array(bin_values))
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
past_time_feat: Tensor,
future_time_feat: Tensor,
scale: Tensor,
) -> Tensor:
embedded_cat = self.feature_embedder(feat_static_cat)
static_feat = F.concat(embedded_cat, F.log(scale + 1.0), dim=1)
past_target = past_target.astype("int32")
def blow_up(u):
"""
Expand to (batch_size x num_samples)
"""
return F.repeat(u, repeats=self.num_samples, axis=0)
def is_last_layer(i):
return i + 1 == len(self.dilations)
queues = []
full_time_features = F.concat(past_time_feat, future_time_feat, dim=-1)
future_observed_values = F.slice_axis(
future_time_feat, begin=0, end=1, axis=1
).ones_like()
full_observed = F.concat(
F.expand_dims(past_observed_values, axis=1),
future_observed_values,
dim=-1,
)
repeated_static_feat = F.repeat(
F.expand_dims(static_feat, axis=-1),
repeats=self.pred_length + self.receptive_field,
axis=-1,
)
full_features = F.concat(
full_time_features, full_observed, repeated_static_feat, dim=1
)
feature_slice = F.slice_axis(
full_features,
begin=-self.pred_length - self.receptive_field + 1,
end=None,
axis=-1,
)
tmp = F.slice_axis(
past_target, begin=-self.receptive_field, end=None, axis=-1
)
o = self.target_embed(tmp).swapaxes(1, 2)
o = F.concat(
o,
F.slice_axis(
feature_slice, begin=-self.receptive_field, end=None, axis=-1
),
dim=1,
)
o = self.conv_project(o)
for i, d in enumerate(self.dilations):
sz = 1 if d == 2 ** (self.dilation_depth - 1) else d * 2
_, o = self.residuals[i](o)
if not is_last_layer(i):
o_chunk = F.slice_axis(o, begin=-sz - 1, end=-1, axis=-1)
else:
o_chunk = o
queues.append(blow_up(o_chunk))
res = F.slice_axis(past_target, begin=-2, end=None, axis=-1)
res = blow_up(res)
for n in range(self.pred_length):
queues_next = []
o = self.target_embed(
F.slice_axis(res, begin=-2, end=None, axis=-1)
).swapaxes(1, 2)
b = F.slice_axis(
full_features,
begin=self.receptive_field + n - 1,
end=self.receptive_field + n + 1,
axis=-1,
)
b = blow_up(b)
o = F.concat(o, b, dim=1)
o = self.conv_project(o)
skip_outs = []
for i, d in enumerate(self.dilations):
skip, o = self.residuals[i](o)
skip_outs.append(skip)
if not is_last_layer(i):
q = queues[i]
o = F.concat(q, o, num_args=2, dim=-1)
queues_next.append(
F.slice_axis(o, begin=1, end=None, axis=-1)
)
queues = queues_next
y = sum(skip_outs)
y = self.output_act(y)
y = self.conv1(y)
y = self.output_act(y)
unnormalized_outputs = self.conv2(y)
if self.temperature > 0:
probs = F.softmax(
unnormalized_outputs / self.temperature, axis=1
)
y = F.sample_multinomial(probs.swapaxes(1, 2))
else:
y = F.argmax(unnormalized_outputs, axis=1)
y = y.astype("int32")
res = F.concat(res, y, num_args=2, dim=-1)
samples = F.slice_axis(res, begin=-self.pred_length, end=None, axis=-1)
samples = samples.reshape(
shape=(-1, self.num_samples, self.pred_length)
)
samples = self.post_transform(samples)
samples = F.broadcast_mul(scale.expand_dims(axis=1), samples)
return samples
| 14,582 | 31.121145 | 96 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/wavenet/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import logging
import re
from typing import Dict, List, Optional
# Third-party imports
import mxnet as mx
import numpy as np
# First-party imports
from gluonts import transform
from gluonts.core.component import validated
from gluonts.dataset.common import Dataset
from gluonts.dataset.field_names import FieldName
from gluonts.dataset.loader import TrainDataLoader, ValidationDataLoader
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.model.wavenet._network import WaveNet, WaveNetSampler
from gluonts.support.util import (
copy_parameters,
get_hybrid_forward_input_names,
)
from gluonts.time_feature import time_features_from_frequency_str
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
DataEntry,
ExpectedNumInstanceSampler,
InstanceSplitter,
SetFieldIfNotPresent,
SimpleTransformation,
VstackFeatures,
)
class QuantizeScaled(SimpleTransformation):
"""
Rescale and quantize the target variable.
Requires
past_target and future_target fields.
The mean absolute value of the past_target is used to rescale past_target and future_target.
Then the bin_edges are used to quantize the rescaled target.
The calculated scale is included as a new field "scale"
"""
@validated()
def __init__(
self,
bin_edges: List[float],
past_target: str,
future_target: str,
scale: str = "scale",
):
self.bin_edges = np.array(bin_edges)
self.future_target = future_target
self.past_target = past_target
self.scale = scale
def transform(self, data: DataEntry) -> DataEntry:
p = data[self.past_target]
m = np.mean(np.abs(p))
scale = m if m > 0 else 1.0
data[self.future_target] = np.digitize(
data[self.future_target] / scale, bins=self.bin_edges, right=False
)
data[self.past_target] = np.digitize(
data[self.past_target] / scale, bins=self.bin_edges, right=False
)
data[self.scale] = np.array([scale])
return data
def _get_seasonality(freq: str, seasonality_dict: Dict) -> int:
match = re.match(r"(\d*)(\w+)", freq)
assert match, "Cannot match freq regex"
multiple, base_freq = match.groups()
multiple = int(multiple) if multiple else 1
seasonality = seasonality_dict[base_freq]
if seasonality % multiple != 0:
logging.warning(
f"multiple {multiple} does not divide base seasonality {seasonality}."
f"Falling back to seasonality 1"
)
return 1
return seasonality // multiple
class WaveNetEstimator(GluonEstimator):
"""
Model with Wavenet architecture and quantized target.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
cardinality
Number of values of the each categorical feature (default: [1])
embedding_dimension
Dimension of the embeddings for categorical features (the same
dimension is used for all embeddings, default: 5)
num_bins
Number of bins used for quantization of signal (default: 1024)
hybridize_prediction_net
Boolean (default: False)
n_residue
Number of residual channels in wavenet architecture (default: 24)
n_skip
Number of skip channels in wavenet architecture (default: 32)
dilation_depth
Number of dilation layers in wavenet architecture.
If set to None (default), dialation_depth is set such that the receptive length is at least
as long as typical seasonality for the frequency and at least 2 * prediction_length.
n_stacks
Number of dilation stacks in wavenet architecture (default: 1)
temperature
Temparature used for sampling from softmax distribution.
For temperature = 1.0 (default) sampling is according to estimated probability.
act_type
Activation type used after before output layer (default: "elu").
Can be any of 'elu', 'relu', 'sigmoid', 'tanh', 'softrelu', 'softsign'.
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 200)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(
learning_rate=0.01,
epochs=200,
num_batches_per_epoch=50,
hybridize=False,
),
cardinality: List[int] = [1],
seasonality: Optional[int] = None,
embedding_dimension: int = 5,
num_bins: int = 1024,
hybridize_prediction_net: bool = False,
n_residue=24,
n_skip=32,
dilation_depth: Optional[int] = None,
n_stacks: int = 1,
train_window_length: Optional[int] = None,
temperature: float = 1.0,
act_type: str = "elu",
num_parallel_samples: int = 200,
) -> None:
"""
Model with Wavenet architecture and quantized target.
:param freq:
:param prediction_length:
:param trainer:
:param num_eval_samples:
:param cardinality:
:param embedding_dimension:
:param num_bins: Number of bins used for quantization of signal
:param hybridize_prediction_net:
:param n_residue: Number of residual channels in wavenet architecture
:param n_skip: Number of skip channels in wavenet architecture
:param dilation_depth: number of dilation layers in wavenet architecture.
If set to None, dialation_depth is set such that the receptive length is at
least as long as 2 * seasonality for the frequency and at least
2 * prediction_length.
:param n_stacks: Number of dilation stacks in wavenet architecture
:param train_window_length: Length of windows used for training. This should be
longer than context + prediction length. Larger values result in more efficient
reuse of computations for convolutions.
:param temperature: Temparature used for sampling from softmax distribution.
For temperature = 1.0 sampling is according to estimated probability.
:param act_type: Activation type used after before output layer.
Can be any of
'elu', 'relu', 'sigmoid', 'tanh', 'softrelu', 'softsign'
"""
super().__init__(trainer=trainer)
self.freq = freq
self.prediction_length = prediction_length
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_bins = num_bins
self.hybridize_prediction_net = hybridize_prediction_net
self.n_residue = n_residue
self.n_skip = n_skip
self.n_stacks = n_stacks
self.train_window_length = (
train_window_length
if train_window_length is not None
else prediction_length
)
self.temperature = temperature
self.act_type = act_type
self.num_parallel_samples = num_parallel_samples
seasonality = (
_get_seasonality(
self.freq,
{
"H": 7 * 24,
"D": 7,
"W": 52,
"M": 12,
"B": 7 * 5,
"min": 24 * 60,
},
)
if seasonality is None
else seasonality
)
goal_receptive_length = max(
2 * seasonality, 2 * self.prediction_length
)
if dilation_depth is None:
d = 1
while (
WaveNet.get_receptive_field(
dilation_depth=d, n_stacks=n_stacks
)
< goal_receptive_length
):
d += 1
self.dilation_depth = d
else:
self.dilation_depth = dilation_depth
self.context_length = WaveNet.get_receptive_field(
dilation_depth=self.dilation_depth, n_stacks=n_stacks
)
self.logger = logging.getLogger(__name__)
self.logger.info(
f"Using dilation depth {self.dilation_depth} and receptive field length {self.context_length}"
)
def train(
self, training_data: Dataset, validation_data: Optional[Dataset] = None
) -> Predictor:
has_negative_data = any(np.any(d["target"] < 0) for d in training_data)
low = -10.0 if has_negative_data else 0
high = 10.0
bin_centers = np.linspace(low, high, self.num_bins)
bin_edges = np.concatenate(
[[-1e20], (bin_centers[1:] + bin_centers[:-1]) / 2.0, [1e20]]
)
logging.info(
f"using training windows of length = {self.train_window_length}"
)
transformation = self.create_transformation(
bin_edges, pred_length=self.train_window_length
)
transformation.estimate(iter(training_data))
training_data_loader = TrainDataLoader(
dataset=training_data,
transform=transformation,
batch_size=self.trainer.batch_size,
num_batches_per_epoch=self.trainer.num_batches_per_epoch,
ctx=self.trainer.ctx,
)
validation_data_loader = None
if validation_data is not None:
validation_data_loader = ValidationDataLoader(
dataset=validation_data,
transform=transformation,
batch_size=self.trainer.batch_size,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
# ensure that the training network is created within the same MXNet
# context as the one that will be used during training
with self.trainer.ctx:
params = self._get_wavenet_args(bin_centers)
params.update(pred_length=self.train_window_length)
trained_net = WaveNet(**params)
self.trainer(
net=trained_net,
input_names=get_hybrid_forward_input_names(trained_net),
train_iter=training_data_loader,
validation_iter=validation_data_loader,
)
# ensure that the prediction network is created within the same MXNet
# context as the one that was used during training
with self.trainer.ctx:
return self.create_predictor(
transformation, trained_net, bin_centers
)
def create_transformation(
self, bin_edges: np.ndarray, pred_length: int
) -> transform.Transformation:
return Chain(
[
AsNumpyArray(field=FieldName.TARGET, expected_ndim=1),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=time_features_from_frequency_str(self.freq),
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE],
),
SetFieldIfNotPresent(
field=FieldName.FEAT_STATIC_CAT, value=[0.0]
),
AsNumpyArray(field=FieldName.FEAT_STATIC_CAT, expected_ndim=1),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.context_length,
future_length=pred_length,
output_NTC=False,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
QuantizeScaled(
bin_edges=bin_edges.tolist(),
future_target="future_target",
past_target="past_target",
),
]
)
def _get_wavenet_args(self, bin_centers):
return dict(
n_residue=self.n_residue,
n_skip=self.n_skip,
dilation_depth=self.dilation_depth,
n_stacks=self.n_stacks,
act_type=self.act_type,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
bin_values=bin_centers.tolist(),
pred_length=self.prediction_length,
)
def create_predictor(
self,
transformation: transform.Transformation,
trained_network: mx.gluon.HybridBlock,
bin_values: np.ndarray,
) -> Predictor:
prediction_network = WaveNetSampler(
num_samples=self.num_parallel_samples,
temperature=self.temperature,
**self._get_wavenet_args(bin_values),
)
# The lookup layer is specific to the sampling network here
# we make sure it is initialized.
prediction_network.initialize()
copy_parameters(
net_source=trained_network,
net_dest=prediction_network,
allow_missing=True,
)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
| 15,319 | 35.563246 | 106 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/deepstate/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import NOPScaler, MeanScaler
from gluonts.core.component import validated
from gluonts.distribution.lds import ParameterBounds, LDS, LDSArgsProj
from gluonts.model.deepstate.issm import ISSM
from gluonts.model.common import Tensor
from gluonts.support.util import weighted_average, make_nd_diag
class DeepStateNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
num_layers: int,
num_cells: int,
cell_type: str,
past_length: int,
prediction_length: int,
issm: ISSM,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
scaling: bool = True,
noise_std_bounds: ParameterBounds = ParameterBounds(1e-6, 1.0),
prior_cov_bounds: ParameterBounds = ParameterBounds(1e-6, 1.0),
innovation_bounds: ParameterBounds = ParameterBounds(1e-6, 0.01),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.past_length = past_length
self.prediction_length = prediction_length
self.issm = issm
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
assert len(cardinality) == len(
embedding_dimension
), "embedding_dimension should be a list with the same size as cardinality"
self.univariate = self.issm.output_dim() == 1
self.noise_std_bounds = noise_std_bounds
self.prior_cov_bounds = prior_cov_bounds
self.innovation_bounds = innovation_bounds
with self.name_scope():
self.prior_mean_model = mx.gluon.nn.Dense(
units=self.issm.latent_dim(), flatten=False
)
self.prior_cov_diag_model = mx.gluon.nn.Dense(
units=self.issm.latent_dim(),
activation="sigmoid",
flatten=False,
)
self.lstm = mx.gluon.rnn.HybridSequentialRNNCell()
self.lds_proj = LDSArgsProj(
output_dim=self.issm.output_dim(),
noise_std_bounds=self.noise_std_bounds,
innovation_bounds=self.innovation_bounds,
)
for k in range(num_layers):
cell = mx.gluon.rnn.LSTMCell(hidden_size=num_cells)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.lstm.add(cell)
self.embedder = FeatureEmbedder(
cardinalities=cardinality, embedding_dims=embedding_dimension
)
if scaling:
self.scaler = MeanScaler(keepdims=False)
else:
self.scaler = NOPScaler(keepdims=False)
def compute_lds(
self,
F,
feat_static_cat: Tensor,
seasonal_indicators: Tensor,
time_feat: Tensor,
length: int,
prior_mean: Optional[Tensor] = None,
prior_cov: Optional[Tensor] = None,
lstm_begin_state: Optional[List[Tensor]] = None,
):
# embed categorical features and expand along time axis
embedded_cat = self.embedder(feat_static_cat)
repeated_static_features = embedded_cat.expand_dims(axis=1).repeat(
axis=1, repeats=length
)
# construct big features tensor (context)
features = F.concat(time_feat, repeated_static_features, dim=2)
output, lstm_final_state = self.lstm.unroll(
inputs=features,
begin_state=lstm_begin_state,
length=length,
merge_outputs=True,
)
if prior_mean is None:
prior_input = F.slice_axis(output, axis=1, begin=0, end=1).squeeze(
axis=1
)
prior_mean = self.prior_mean_model(prior_input)
prior_cov_diag = (
self.prior_cov_diag_model(prior_input)
* (self.prior_cov_bounds.upper - self.prior_cov_bounds.lower)
+ self.prior_cov_bounds.lower
)
prior_cov = make_nd_diag(F, prior_cov_diag, self.issm.latent_dim())
(
emission_coeff,
transition_coeff,
innovation_coeff,
) = self.issm.get_issm_coeff(seasonal_indicators)
noise_std, innovation, residuals = self.lds_proj(output)
lds = LDS(
emission_coeff=emission_coeff,
transition_coeff=transition_coeff,
innovation_coeff=F.broadcast_mul(innovation, innovation_coeff),
noise_std=noise_std,
residuals=residuals,
prior_mean=prior_mean,
prior_cov=prior_cov,
latent_dim=self.issm.latent_dim(),
output_dim=self.issm.output_dim(),
seq_length=length,
)
return lds, lstm_final_state
class DeepStateTrainingNetwork(DeepStateNetwork):
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_observed_values: Tensor,
past_seasonal_indicators: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
) -> Tensor:
lds, _ = self.compute_lds(
F,
feat_static_cat=feat_static_cat,
seasonal_indicators=past_seasonal_indicators.slice_axis(
axis=1, begin=-self.past_length, end=None
),
time_feat=past_time_feat.slice_axis(
axis=1, begin=-self.past_length, end=None
),
length=self.past_length,
)
_, scale = self.scaler(past_target, past_observed_values)
observed_context = past_observed_values.slice_axis(
axis=1, begin=-self.past_length, end=None
)
ll, _, _ = lds.log_prob(
x=past_target.slice_axis(
axis=1, begin=-self.past_length, end=None
),
observed=observed_context.min(axis=-1, keepdims=False),
scale=scale,
)
return weighted_average(
F=F, x=-ll, axis=1, weights=observed_context.squeeze(axis=-1)
)
class DeepStatePredictionNetwork(DeepStateNetwork):
@validated()
def __init__(self, num_parallel_samples: int, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.num_parallel_samples = num_parallel_samples
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_observed_values: Tensor,
past_seasonal_indicators: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
future_seasonal_indicators: Tensor,
future_time_feat: Tensor,
) -> Tensor:
lds, lstm_state = self.compute_lds(
F,
feat_static_cat=feat_static_cat,
seasonal_indicators=past_seasonal_indicators.slice_axis(
axis=1, begin=-self.past_length, end=None
),
time_feat=past_time_feat.slice_axis(
axis=1, begin=-self.past_length, end=None
),
length=self.past_length,
)
_, scale = self.scaler(past_target, past_observed_values)
observed_context = past_observed_values.slice_axis(
axis=1, begin=-self.past_length, end=None
)
_, final_mean, final_cov = lds.log_prob(
x=past_target.slice_axis(
axis=1, begin=-self.past_length, end=None
),
observed=observed_context.min(axis=-1, keepdims=False),
scale=scale,
)
lds_prediction, _ = self.compute_lds(
F,
feat_static_cat=feat_static_cat,
seasonal_indicators=future_seasonal_indicators,
time_feat=future_time_feat,
length=self.prediction_length,
lstm_begin_state=lstm_state,
prior_mean=final_mean,
prior_cov=final_cov,
)
samples = lds_prediction.sample(
num_samples=self.num_parallel_samples, scale=scale
)
# convert samples from
# (num_samples, batch_size, prediction_length, target_dim)
# to
# (batch_size, num_samples, prediction_length, target_dim)
# and squeeze last axis in the univariate case
if self.univariate:
return samples.transpose(axes=(1, 0, 2, 3)).squeeze(axis=3)
else:
return samples.transpose(axes=(1, 0, 2, 3))
| 9,652 | 33.723022 | 83 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/deepstate/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
from pandas.tseries.frequencies import to_offset
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution.lds import ParameterBounds
from gluonts.model.deepstate.issm import ISSM, CompositeISSM
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import TimeFeature, time_features_from_frequency_str
from gluonts.trainer import Trainer
from gluonts.transform import (
AddObservedValuesIndicator,
AddAgeFeature,
AddTimeFeatures,
AsNumpyArray,
Chain,
CanonicalInstanceSplitter,
ExpandDimArray,
RemoveFields,
SetField,
TestSplitSampler,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import DeepStateTrainingNetwork, DeepStatePredictionNetwork
SEASON_INDICATORS_FIELD = "seasonal_indicators"
# A dictionary mapping granularity to the period length of the longest season
# one can expect given the granularity of the time series.
# This is similar to the frequency value in the R forecast package:
# https://stats.stackexchange.com/questions/120806/frequency-value-for-seconds-minutes-intervals-data-in-r
# This is useful for setting default values for past/context length for models
# that do not do data augmentation and uses a single training example per time series in the dataset.
FREQ_LONGEST_PERIOD_DICT = {
"M": 12, # yearly seasonality
"W-SUN": 52, # yearly seasonality
"D": 31, # monthly seasonality
"B": 22, # monthly seasonality
"H": 168, # weekly seasonality
"T": 1440, # daily seasonality
}
def longest_period_from_frequency_str(freq_str: str) -> int:
offset = to_offset(freq_str)
return FREQ_LONGEST_PERIOD_DICT[offset.name] // offset.n
class DeepStateEstimator(GluonEstimator):
"""
Construct a DeepState estimator.
This implements the deep state space model described in
[RSG+18]_.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
cardinality
Number of values of each categorical feature.
This must be set by default unless ``use_feat_static_cat``
is set to `False` explicitly (which is NOT recommended).
add_trend
Flag to indicate whether to include trend component in the
state space model
past_length
This is the length of the training time series;
i.e., number of steps to unroll the RNN for before computing
predictions.
Set this to (at most) the length of the shortest time series in the
dataset.
(default: None, in which case the training length is set such that
at least
`num_seasons_to_train` seasons are included in the training.
See `num_seasons_to_train`)
num_periods_to_train
(Used only when `past_length` is not set)
Number of periods to include in the training time series. (default: 4)
Here period corresponds to the longest cycle one can expect given
the granularity of the time series.
See: https://stats.stackexchange.com/questions/120806/frequency
-value-for-seconds-minutes-intervals-data-in-r
trainer
Trainer object to be used (default: Trainer())
num_layers
Number of RNN layers (default: 2)
num_cells
Number of RNN cells for each layer (default: 40)
cell_type
Type of recurrent cells to use (available: 'lstm' or 'gru';
default: 'lstm')
num_parallel_samples
Number of evaluation samples per time series to increase parallelism
during inference.
This is a model optimization that does not affect the accuracy (
default: 100).
dropout_rate
Dropout regularization parameter (default: 0.1)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data
(default: False)
use_feat_static_cat
Whether to use the ``feat_static_cat`` field from the data
(default: True)
embedding_dimension
Dimension of the embeddings for categorical features
(default: [min(50, (cat+1)//2) for cat in cardinality])
scaling
Whether to automatically scale the target values (default: true)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
noise_std_bounds
Lower and upper bounds for the standard deviation of the observation
noise
prior_cov_bounds
Lower and upper bounds for the diagonal of the prior covariance matrix
innovation_bounds
Lower and upper bounds for the standard deviation of the observation
noise
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
cardinality: List[int],
add_trend: bool = False,
past_length: Optional[int] = None,
num_periods_to_train: int = 4,
trainer: Trainer = Trainer(
epochs=100, num_batches_per_epoch=50, hybridize=False
),
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "lstm",
num_parallel_samples: int = 100,
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = True,
embedding_dimension: Optional[List[int]] = None,
issm: Optional[ISSM] = None,
scaling: bool = True,
time_features: Optional[List[TimeFeature]] = None,
noise_std_bounds: ParameterBounds = ParameterBounds(1e-6, 1.0),
prior_cov_bounds: ParameterBounds = ParameterBounds(1e-6, 1.0),
innovation_bounds: ParameterBounds = ParameterBounds(1e-6, 0.01),
) -> None:
super().__init__(trainer=trainer)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
past_length is None or past_length > 0
), "The value of `past_length` should be > 0"
assert num_layers > 0, "The value of `num_layers` should be > 0"
assert num_cells > 0, "The value of `num_cells` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert not use_feat_static_cat or any(c > 1 for c in cardinality), (
f"Cardinality of at least one static categorical feature must be larger than 1 "
f"if `use_feat_static_cat=True`. But cardinality provided is: {cardinality}"
)
assert embedding_dimension is None or all(
e > 0 for e in embedding_dimension
), "Elements of `embedding_dimension` should be > 0"
assert all(
np.isfinite(p.lower) and np.isfinite(p.upper) and p.lower > 0
for p in [noise_std_bounds, prior_cov_bounds, innovation_bounds]
), "All parameter bounds should be finite, and lower bounds should be positive"
self.freq = freq
self.past_length = (
past_length
if past_length is not None
else num_periods_to_train * longest_period_from_frequency_str(freq)
)
self.prediction_length = prediction_length
self.add_trend = add_trend
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.num_parallel_samples = num_parallel_samples
self.scaling = scaling
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.issm = (
issm
if issm is not None
else CompositeISSM.get_from_freq(freq, add_trend)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.noise_std_bounds = noise_std_bounds
self.prior_cov_bounds = prior_cov_bounds
self.innovation_bounds = innovation_bounds
def create_transformation(self) -> Transformation:
remove_field_names = [
FieldName.FEAT_DYNAMIC_CAT,
FieldName.FEAT_STATIC_REAL,
]
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ [
AsNumpyArray(field=FieldName.FEAT_STATIC_CAT, expected_ndim=1),
AsNumpyArray(field=FieldName.TARGET, expected_ndim=1),
# gives target the (1, T) layout
ExpandDimArray(field=FieldName.TARGET, axis=0),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
),
# Unnormalized seasonal features
AddTimeFeatures(
time_features=CompositeISSM.seasonal_features(self.freq),
pred_length=self.prediction_length,
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=SEASON_INDICATORS_FIELD,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
CanonicalInstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
instance_sampler=TestSplitSampler(),
time_series_fields=[
FieldName.FEAT_TIME,
SEASON_INDICATORS_FIELD,
FieldName.OBSERVED_VALUES,
],
allow_target_padding=True,
instance_length=self.past_length,
use_prediction_features=True,
prediction_length=self.prediction_length,
),
]
)
def create_training_network(self) -> DeepStateTrainingNetwork:
return DeepStateTrainingNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
past_length=self.past_length,
prediction_length=self.prediction_length,
issm=self.issm,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
scaling=self.scaling,
noise_std_bounds=self.noise_std_bounds,
prior_cov_bounds=self.prior_cov_bounds,
innovation_bounds=self.innovation_bounds,
)
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = DeepStatePredictionNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
past_length=self.past_length,
prediction_length=self.prediction_length,
issm=self.issm,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
scaling=self.scaling,
num_parallel_samples=self.num_parallel_samples,
noise_std_bounds=self.noise_std_bounds,
prior_cov_bounds=self.prior_cov_bounds,
innovation_bounds=self.innovation_bounds,
params=trained_network.collect_params(),
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
| 14,462 | 38.408719 | 106 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/deep_factor/RNNModel.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Third-party imports
from mxnet.gluon import HybridBlock, nn
# First-party imports
from gluonts.block.rnn import RNN
from gluonts.core.component import validated
class RNNModel(HybridBlock):
@validated()
def __init__(
self,
mode,
num_hidden,
num_layers,
num_output,
bidirectional=False,
**kwargs,
):
super(RNNModel, self).__init__(**kwargs)
self.num_output = num_output
with self.name_scope():
self.rnn = RNN(
mode=mode,
num_hidden=num_hidden,
num_layers=num_layers,
bidirectional=bidirectional,
)
self.decoder = nn.Dense(
num_output, in_units=num_hidden, flatten=False
)
def hybrid_forward(self, F, inputs):
return self.decoder(self.rnn(inputs))
| 1,462 | 28.26 | 75 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/deep_factor/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import math
from mxnet.gluon import HybridBlock
from mxnet.gluon import nn
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.core.component import validated
from gluonts.model.common import Tensor
class DeepFactorNetworkBase(HybridBlock):
def __init__(
self,
global_model: HybridBlock,
local_model: HybridBlock,
embedder: FeatureEmbedder,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.global_model = global_model
self.local_model = local_model
self.embedder = embedder
with self.name_scope():
self.loading = nn.Dense(
units=global_model.num_output, use_bias=False
)
def assemble_features(
self,
F,
feat_static_cat: Tensor, # (batch_size, 1)
time_feat: Tensor, # (batch_size, history_length, num_features)
) -> Tensor: # (batch_size, history_length, num_features)
# todo: this is shared by more than one places, and should be a general routine
embedded_cat = self.embedder(
feat_static_cat
) # (batch_size, num_features * embedding_size)
# a workaround when you wish to repeat without knowing the number
# of repeats
helper_ones = F.ones_like(
F.slice_axis(time_feat, axis=2, begin=-1, end=None)
)
# (batch_size, history_length, num_features * embedding_size)
repeated_cat = F.batch_dot(
helper_ones, F.expand_dims(embedded_cat, axis=1)
)
# putting together all the features
input_feat = F.concat(repeated_cat, time_feat, dim=2)
return embedded_cat, input_feat
def compute_global_local(
self,
F,
feat_static_cat: Tensor, # (batch_size, 1)
time_feat: Tensor, # (batch_size, history_length, num_features)
) -> (Tensor, Tensor): # both of size (batch_size, history_length, 1)
cat, local_input = self.assemble_features(
F, feat_static_cat, time_feat
)
loadings = self.loading(cat) # (batch_size, num_factors)
global_factors = self.global_model(
time_feat
) # (batch_size, history_length, num_factors)
fixed_effect = F.batch_dot(
global_factors, loadings.expand_dims(axis=2)
) # (batch_size, history_length, 1)
random_effect = F.log(
F.exp(self.local_model(local_input)) + 1.0
) # (batch_size, history_length, 1)
return F.exp(fixed_effect), random_effect
def hybrid_forward(self, F, x, *args, **kwargs):
raise NotImplementedError
def negative_normal_likelihood(self, F, y, mu, sigma):
return (
F.log(sigma)
+ 0.5 * math.log(2 * math.pi)
+ 0.5 * F.square((y - mu) / sigma)
)
class DeepFactorTrainingNetwork(DeepFactorNetworkBase):
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, 1)
past_time_feat: Tensor,
# (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length)
) -> Tensor:
"""
Parameters
----------
F
Function space
feat_static_cat
Shape: (batch_size, 1)
past_time_feat
Shape: (batch_size, history_length, num_features)
past_target
Shape: (batch_size, history_length)
Returns
-------
Tensor
A batch of negative log likelihoods.
"""
fixed_effect, random_effect = self.compute_global_local(
F, feat_static_cat, past_time_feat
)
loss = self.negative_normal_likelihood(
F, past_target.expand_dims(axis=2), fixed_effect, random_effect
)
return loss
class DeepFactorPredictionNetwork(DeepFactorNetworkBase):
@validated()
def __init__(
self, prediction_len: int, num_parallel_samples: int, **kwargs
) -> None:
super().__init__(**kwargs)
self.prediction_len = prediction_len
self.num_parallel_samples = num_parallel_samples
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
future_time_feat: Tensor,
past_target: Tensor,
) -> Tensor:
"""
Parameters
----------
F
Function space
feat_static_cat
Shape: (batch_size, 1)
past_time_feat
Shape: (batch_size, history_length, num_features)
future_time_feat
Shape: (batch_size, prediction_length, num_features)
past_target
Shape: (batch_size, history_length)
Returns
-------
Tensor
Samples of shape (batch_size, num_samples, prediction_length).
"""
time_feat = F.concat(past_time_feat, future_time_feat, dim=1)
fixed_effect, random_effect = self.compute_global_local(
F, feat_static_cat, time_feat
)
samples = F.concat(
*[
F.sample_normal(fixed_effect, random_effect)
for _ in range(self.num_parallel_samples)
],
dim=2,
) # (batch_size, train_len + prediction_len, num_samples)
pred_samples = F.slice_axis(
samples, axis=1, begin=-self.prediction_len, end=None
) # (batch_size, prediction_len, num_samples)
return pred_samples.swapaxes(1, 2)
| 6,153 | 30.88601 | 87 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/seq2seq/_seq2seq_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts import transform
from gluonts.block.decoder import OneShotDecoder
from gluonts.block.enc2dec import PassThroughEnc2Dec
from gluonts.block.encoder import (
HierarchicalCausalConv1DEncoder,
RNNCovariateEncoder,
MLPEncoder,
Seq2SeqEncoder,
)
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.quantile_output import QuantileOutput
from gluonts.block.scaler import NOPScaler, Scaler
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.model.estimator import GluonEstimator
from gluonts.model.forecast import QuantileForecast, Quantile
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import time_features_from_frequency_str
from gluonts.trainer import Trainer
from gluonts.transform import ExpectedNumInstanceSampler
from gluonts.model.forecast_generator import QuantileForecastGenerator
# Relative imports
from ._seq2seq_network import Seq2SeqPredictionNetwork, Seq2SeqTrainingNetwork
class Seq2SeqEstimator(GluonEstimator):
"""
Quantile-Regression Sequence-to-Sequence Estimator
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
cardinality: List[int],
embedding_dimension: int,
encoder: Seq2SeqEncoder,
decoder_mlp_layer: List[int],
decoder_mlp_static_dim: int,
scaler: Scaler = NOPScaler(),
context_length: Optional[int] = None,
quantiles: List[float] = [0.1, 0.5, 0.9],
trainer: Trainer = Trainer(),
num_parallel_samples: int = 100,
) -> None:
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
super().__init__(trainer=trainer)
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.freq = freq
self.quantiles = quantiles
self.encoder = encoder
self.decoder_mlp_layer = decoder_mlp_layer
self.decoder_mlp_static_dim = decoder_mlp_static_dim
self.scaler = scaler
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=[embedding_dimension for _ in cardinality],
)
self.num_parallel_samples = num_parallel_samples
def create_transformation(self) -> transform.Transformation:
return transform.Chain(
trans=[
transform.AsNumpyArray(
field=FieldName.TARGET, expected_ndim=1
),
transform.AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=time_features_from_frequency_str(self.freq),
pred_length=self.prediction_length,
),
transform.VstackFeatures(
output_field=FieldName.FEAT_DYNAMIC_REAL,
input_fields=[FieldName.FEAT_TIME],
),
transform.SetFieldIfNotPresent(
field=FieldName.FEAT_STATIC_CAT, value=[0.0]
),
transform.AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT, expected_ndim=1
),
transform.InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.context_length,
future_length=self.prediction_length,
time_series_fields=[FieldName.FEAT_DYNAMIC_REAL],
),
]
)
def create_training_network(self) -> mx.gluon.HybridBlock:
distribution = QuantileOutput(self.quantiles)
enc2dec = PassThroughEnc2Dec()
decoder = OneShotDecoder(
decoder_length=self.prediction_length,
layer_sizes=self.decoder_mlp_layer,
static_outputs_per_time_step=self.decoder_mlp_static_dim,
)
training_network = Seq2SeqTrainingNetwork(
embedder=self.embedder,
scaler=self.scaler,
encoder=self.encoder,
enc2dec=enc2dec,
decoder=decoder,
quantile_output=distribution,
)
return training_network
def create_predictor(
self,
transformation: transform.Transformation,
trained_network: Seq2SeqTrainingNetwork,
) -> Predictor:
# todo: this is specific to quantile output
quantile_strs = [
Quantile.from_float(quantile).name for quantile in self.quantiles
]
prediction_network = Seq2SeqPredictionNetwork(
embedder=trained_network.embedder,
scaler=trained_network.scaler,
encoder=trained_network.encoder,
enc2dec=trained_network.enc2dec,
decoder=trained_network.decoder,
quantile_output=trained_network.quantile_output,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
forecast_generator=QuantileForecastGenerator(quantile_strs),
)
class MLP2QRForecaster(Seq2SeqEstimator):
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
cardinality: List[int],
embedding_dimension: int,
encoder_mlp_layer: List[int],
decoder_mlp_layer: List[int],
decoder_mlp_static_dim: int,
scaler: Scaler = NOPScaler,
context_length: Optional[int] = None,
quantiles: List[float] = list([0.1, 0.5, 0.9]),
trainer: Trainer = Trainer(),
num_parallel_samples: int = 100,
) -> None:
encoder = MLPEncoder(layer_sizes=encoder_mlp_layer)
super(MLP2QRForecaster, self).__init__(
freq=freq,
prediction_length=prediction_length,
encoder=encoder,
cardinality=cardinality,
embedding_dimension=embedding_dimension,
decoder_mlp_layer=decoder_mlp_layer,
decoder_mlp_static_dim=decoder_mlp_static_dim,
context_length=context_length,
scaler=scaler,
quantiles=quantiles,
trainer=trainer,
num_parallel_samples=num_parallel_samples,
)
class RNN2QRForecaster(Seq2SeqEstimator):
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
cardinality: List[int],
embedding_dimension: int,
encoder_rnn_layer: int,
encoder_rnn_num_hidden: int,
decoder_mlp_layer: List[int],
decoder_mlp_static_dim: int,
encoder_rnn_model: str = "lstm",
encoder_rnn_bidirectional: bool = True,
scaler: Scaler = NOPScaler,
context_length: Optional[int] = None,
quantiles: List[float] = list([0.1, 0.5, 0.9]),
trainer: Trainer = Trainer(),
num_parallel_samples: int = 100,
) -> None:
encoder = RNNCovariateEncoder(
mode=encoder_rnn_model,
hidden_size=encoder_rnn_num_hidden,
num_layers=encoder_rnn_layer,
bidirectional=encoder_rnn_bidirectional,
)
super(RNN2QRForecaster, self).__init__(
freq=freq,
prediction_length=prediction_length,
encoder=encoder,
cardinality=cardinality,
embedding_dimension=embedding_dimension,
decoder_mlp_layer=decoder_mlp_layer,
decoder_mlp_static_dim=decoder_mlp_static_dim,
context_length=context_length,
scaler=scaler,
quantiles=quantiles,
trainer=trainer,
num_parallel_samples=num_parallel_samples,
)
class CNN2QRForecaster(Seq2SeqEstimator):
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
cardinality: List[int],
embedding_dimension: int,
decoder_mlp_layer: List[int],
decoder_mlp_static_dim: int,
scaler: Scaler = NOPScaler,
context_length: Optional[int] = None,
quantiles: List[float] = list([0.1, 0.5, 0.9]),
trainer: Trainer = Trainer(),
num_parallel_samples: int = 100,
) -> None:
encoder = HierarchicalCausalConv1DEncoder(
dilation_seq=[1, 3, 9],
kernel_size_seq=([3] * len([30, 30, 30])),
channels_seq=[30, 30, 30],
use_residual=True,
use_covariates=True,
)
super(CNN2QRForecaster, self).__init__(
freq=freq,
prediction_length=prediction_length,
encoder=encoder,
cardinality=cardinality,
embedding_dimension=embedding_dimension,
decoder_mlp_layer=decoder_mlp_layer,
decoder_mlp_static_dim=decoder_mlp_static_dim,
context_length=context_length,
scaler=scaler,
quantiles=quantiles,
trainer=trainer,
num_parallel_samples=num_parallel_samples,
)
| 10,621 | 34.644295 | 79 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/seq2seq/_forking_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Third-party imports
from mxnet import gluon, nd
# First-party imports
from gluonts.block.decoder import Seq2SeqDecoder
from gluonts.block.enc2dec import Seq2SeqEnc2Dec
from gluonts.block.encoder import Seq2SeqEncoder
from gluonts.block.quantile_output import QuantileOutput
from gluonts.core.component import validated
from gluonts.model.common import Tensor
nd_None = nd.array([])
class ForkingSeq2SeqNetworkBase(gluon.HybridBlock):
"""
Base network for the :class:`ForkingSeq2SeqEstimator`.
Parameters
----------
encoder: Seq2SeqEncoder
encoder block
enc2dec: Seq2SeqEnc2Dec
encoder to decoder mapping block
decoder: Seq2SeqDecoder
decoder block
quantile_output: QuantileOutput
quantile output block
kwargs: dict
dictionary of Gluon HybridBlock parameters
"""
@validated()
def __init__(
self,
encoder: Seq2SeqEncoder,
enc2dec: Seq2SeqEnc2Dec,
decoder: Seq2SeqDecoder,
quantile_output: QuantileOutput,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.encoder = encoder
self.enc2dec = enc2dec
self.decoder = decoder
self.quantile_output = quantile_output
with self.name_scope():
self.quantile_proj = quantile_output.get_quantile_proj()
self.loss = quantile_output.get_loss()
class ForkingSeq2SeqTrainingNetwork(ForkingSeq2SeqNetworkBase):
# noinspection PyMethodOverriding
def hybrid_forward(
self, F, past_target: Tensor, future_target: Tensor
) -> Tensor:
"""
Parameters
----------
F: mx.symbol or mx.ndarray
Gluon function space
past_target: Tensor
FIXME
future_target: Tensor
shape (num_ts, encoder_length, 1) FIXME
Returns
-------
loss with shape (FIXME, FIXME)
"""
# FIXME: can we factor out a common prefix in the base network?
feat_static_real = nd_None
past_feat_dynamic_real = nd_None
future_feat_dynamic_real = nd_None
enc_output_static, enc_output_dynamic = self.encoder(
past_target, feat_static_real, past_feat_dynamic_real
)
dec_input_static, dec_input_dynamic, _ = self.enc2dec(
enc_output_static, enc_output_dynamic, future_feat_dynamic_real
)
dec_output = self.decoder(dec_input_dynamic, dec_input_static)
dec_dist_output = self.quantile_proj(dec_output)
loss = self.loss(future_target, dec_dist_output)
return loss.mean(axis=1)
class ForkingSeq2SeqPredictionNetwork(ForkingSeq2SeqNetworkBase):
# noinspection PyMethodOverriding
def hybrid_forward(self, F, past_target: Tensor) -> Tensor:
"""
Parameters
----------
F: mx.symbol or mx.ndarray
Gluon function space
past_target: Tensor
FIXME
Returns
-------
prediction tensor with shape (FIXME, FIXME)
"""
# FIXME: can we factor out a common prefix in the base network?
feat_static_real = nd_None
past_feat_dynamic_real = nd_None
future_feat_dynamic_real = nd_None
enc_output_static, enc_output_dynamic = self.encoder(
past_target, feat_static_real, past_feat_dynamic_real
)
enc_output_static = (
nd_None if enc_output_static is None else enc_output_static
)
dec_inp_static, dec_inp_dynamic, _ = self.enc2dec(
enc_output_static, enc_output_dynamic, future_feat_dynamic_real
)
dec_output = self.decoder(dec_inp_dynamic, dec_inp_static)
fcst_output = F.slice_axis(dec_output, axis=1, begin=-1, end=None)
fcst_output = F.squeeze(fcst_output, axis=1)
predictions = self.quantile_proj(fcst_output).swapaxes(2, 1)
return predictions
| 4,527 | 30.013699 | 75 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/seq2seq/_seq2seq_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.decoder import Seq2SeqDecoder
from gluonts.block.enc2dec import Seq2SeqEnc2Dec
from gluonts.block.encoder import Seq2SeqEncoder
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.quantile_output import QuantileOutput
from gluonts.block.scaler import Scaler
from gluonts.core.component import validated
from gluonts.model.common import Tensor
class Seq2SeqNetworkBase(mx.gluon.HybridBlock):
"""
Base network for the :class:`Seq2SeqEstimator`.
Parameters
----------
scaler : Scaler
scale of the target time series, both as input or in the output
distributions
encoder : encoder
see encoder.py for possible choices
enc2dec : encoder to decoder
see enc2dec.py for possible choices
decoder : decoder
see decoder.py for possible choices
quantile_output : QuantileOutput
quantile regression output
kwargs : dict
a dict of parameters to be passed to the parent initializer
"""
@validated()
def __init__(
self,
embedder: FeatureEmbedder,
scaler: Scaler,
encoder: Seq2SeqEncoder,
enc2dec: Seq2SeqEnc2Dec,
decoder: Seq2SeqDecoder,
quantile_output: QuantileOutput,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.embedder = embedder
self.scaler = scaler
self.encoder = encoder
self.enc2dec = enc2dec
self.decoder = decoder
self.quantile_output = quantile_output
with self.name_scope():
self.quantile_proj = quantile_output.get_quantile_proj()
self.loss = quantile_output.get_loss()
def compute_decoder_outputs(
self,
F,
past_target: Tensor,
feat_static_cat: Tensor,
past_feat_dynamic_real: Tensor,
future_feat_dynamic_real: Tensor,
) -> Tensor:
scaled_target, scale = self.scaler(
past_target, F.ones_like(past_target)
)
embedded_cat = self.embedder(
feat_static_cat
) # (batch_size, num_features * embedding_size)
encoder_output_static, encoder_output_dynamic = self.encoder(
scaled_target, embedded_cat, past_feat_dynamic_real
)
decoder_input_static, _, decoder_input_dynamic = self.enc2dec(
encoder_output_static,
encoder_output_dynamic,
future_feat_dynamic_real,
)
decoder_output = self.decoder(
decoder_input_static, decoder_input_dynamic
)
scaled_decoder_output = F.broadcast_mul(
decoder_output, scale.expand_dims(-1).expand_dims(-1)
)
return scaled_decoder_output
class Seq2SeqTrainingNetwork(Seq2SeqNetworkBase):
# noinspection PyMethodOverriding
def hybrid_forward(
self,
F,
past_target: Tensor,
future_target: Tensor,
feat_static_cat: Tensor,
past_feat_dynamic_real: Tensor,
future_feat_dynamic_real: Tensor,
) -> Tensor:
"""
Parameters
----------
F: mx.symbol or mx.ndarray
Gluon function space
past_target: mx.nd.NDArray or mx.sym.Symbol
past target
future_target: mx.nd.NDArray or mx.sym.Symbol
future target
feat_static_cat: mx.nd.NDArray or mx.sym.Symbol
static categorical features
past_feat_dynamic_real: mx.nd.NDArray or mx.sym.Symbol
past dynamic real-valued features
future_feat_dynamic_real: mx.nd.NDArray or mx.sym.Symbol
future dynamic real-valued features
Returns
-------
mx.nd.NDArray or mx.sym.Symbol
the computed loss
"""
scaled_decoder_output = self.compute_decoder_outputs(
F,
past_target=past_target,
feat_static_cat=feat_static_cat,
past_feat_dynamic_real=past_feat_dynamic_real,
future_feat_dynamic_real=future_feat_dynamic_real,
)
projected = self.quantile_proj(scaled_decoder_output)
loss = self.loss(future_target, projected)
# TODO: there used to be "nansum" here, to be fully equivalent we
# TODO: should have a "nanmean" here
# TODO: shouldn't we sum and divide by the number of observed values
# TODO: here?
return loss
class Seq2SeqPredictionNetwork(Seq2SeqNetworkBase):
# noinspection PyMethodOverriding
def hybrid_forward(
self,
F,
past_target: Tensor,
feat_static_cat: Tensor,
past_feat_dynamic_real: Tensor,
future_feat_dynamic_real: Tensor,
) -> Tensor:
"""
Parameters
----------
F: mx.symbol or mx.ndarray
Gluon function space
past_target: mx.nd.NDArray or mx.sym.Symbol
past target
feat_static_cat: mx.nd.NDArray or mx.sym.Symbol
static categorical features
past_feat_dynamic_real: mx.nd.NDArray or mx.sym.Symbol
past dynamic real-valued features
future_feat_dynamic_real: mx.nd.NDArray or mx.sym.Symbol
future dynamic real-valued features
Returns
-------
mx.nd.NDArray or mx.sym.Symbol
the predicted sequence
"""
scaled_decoder_output = self.compute_decoder_outputs(
F,
past_target=past_target,
feat_static_cat=feat_static_cat,
past_feat_dynamic_real=past_feat_dynamic_real,
future_feat_dynamic_real=future_feat_dynamic_real,
)
predictions = self.quantile_proj(scaled_decoder_output).swapaxes(2, 1)
return predictions
| 6,383 | 31.738462 | 78 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/transformer/trans_encoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
from gluonts.model.transformer.layers import (
TransformerProcessBlock,
TransformerFeedForward,
MultiHeadSelfAttention,
InputLayer,
)
class TransformerEncoder(HybridBlock):
@validated()
def __init__(self, encoder_length: int, config: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.encoder_length = encoder_length
with self.name_scope():
self.enc_input_layer = InputLayer(model_size=config["model_dim"])
self.enc_pre_self_att = TransformerProcessBlock(
sequence=config["pre_seq"],
dropout=config["dropout_rate"],
prefix="pretransformerprocessblock_",
)
self.enc_self_att = MultiHeadSelfAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadselfattention_",
)
self.enc_post_self_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postselfatttransformerprocessblock_",
)
self.enc_ff = TransformerFeedForward(
inner_dim=config["model_dim"] * config["inner_ff_dim_scale"],
out_dim=config["model_dim"],
act_type=config["act_type"],
dropout=config["dropout_rate"],
prefix="transformerfeedforward_",
)
self.enc_post_ff = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postfftransformerprocessblock_",
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, data: Tensor) -> Tensor:
"""
A transformer encoder block consists of a self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
# input layer
inputs = self.enc_input_layer(data)
# self-attention
data_self_att, _ = self.enc_self_att(
self.enc_pre_self_att(inputs, None)
)
data = self.enc_post_self_att(data_self_att, inputs)
# feed-forward
data_ff = self.enc_ff(data)
data = self.enc_post_ff(data_ff, data)
return data
| 3,242 | 33.5 | 118 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/transformer/layers.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional, Tuple
# Third-party imports
import mxnet as mx
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.model.common import Tensor
def split_heads(F, x: Tensor, dim_per_head: int, heads: int) -> Tensor:
r"""
Returns a tensor with head dimension folded into batch and last dimension divided by the number of heads.
Parameters
----------
x
Tensor of shape (batch_size, time_length, dim).
dim_per_head
Dimension per head
heads
Number of heads
Returns
-------
Tensor of shape (batch_size * heads, time_length, dim_per_head).
"""
# (batch_size, time_length, heads, dim_per_head)
x = F.reshape(data=x, shape=(0, -1, heads, dim_per_head))
# (batch_size, heads, time_length, dim/heads)
x = F.transpose(data=x, axes=(0, 2, 1, 3))
# (batch_size * heads, time_length, dim/heads)
return F.reshape(data=x, shape=(-3, -1, dim_per_head))
def dot_attention(
F,
queries: Tensor,
keys: Tensor,
values: Tensor,
mask: Optional[Tensor] = None,
dropout: float = 0.0,
) -> Tensor:
r"""
Parameters
----------
queries
Attention queries of shape (n, lq, d)
keys
Attention keys of shape (n, lk, d)
values
Attention values of shape (n, lk, dv)
mask
Optional mask tensor
dropout
Dropout rate
Returns
-------
'Context' vectors for each query of shape (n, lq, dv)
"""
# (n, lq, lk)
logits = F.batch_dot(lhs=queries, rhs=keys, transpose_b=True)
if mask is not None:
logits = F.broadcast_add(logits, mask)
probs = F.softmax(logits, axis=-1)
probs = F.Dropout(probs, p=dropout) if dropout > 0.0 else probs
# (n, lq, lk) x (n, lk, dv) -> (n, lq, dv)
return F.batch_dot(lhs=probs, rhs=values)
def combine_heads(F, x: Tensor, dim_per_head: int, heads: int) -> Tensor:
r"""
Parameters
----------
x
Tensor of shape (batch_size * heads, time_length, dim_per_head)
dim_per_head
Dimension per head
heads
Number of heads
Returns
-------
Tensor of shape (batch_size, time_length, dim)
"""
# (batch_size, heads, time_length, dim_per_head)
x = F.reshape(data=x, shape=(-4, -1, heads, 0, dim_per_head))
# (batch_size, time_length, heads, dim_per_head)
x = F.transpose(x, axes=(0, 2, 1, 3))
# (batch_size, time_length, dim)
return F.reshape(x, shape=(-1, 0, dim_per_head * heads))
class LayerNormalization(HybridBlock):
"""
Implements layer normalization as proposed in [BKH16]_.
"""
def __init__(
self,
scale_init: str = "ones",
shift_init: str = "zeros",
eps: float = 1e-06,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.scale_init = scale_init
self.shift_init = shift_init
with self.name_scope():
self.lnorm = mx.gluon.nn.LayerNorm(
axis=-1,
gamma_initializer=self.scale_init,
beta_initializer=self.shift_init,
epsilon=eps,
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, data: Tensor) -> Tensor:
r"""
Normalizes hidden units of data as follows:
data = scale * (data - mean) / sqrt(var + eps) + shift
Normalization is performed over the last dimension of the input data.
Parameters
----------
data
Data to normalize of shape (d0, ..., dn, num_hidden)
Returns
-------
Normalized inputs of shape: (d0, ..., dn, num_hidden)
"""
return self.lnorm(data)
class InputLayer(HybridBlock):
r"""
Transforms the input vector to model_size with an one-layer MPL, i.e.,
(batch_size, time_length, input_dim) -> (batch_size, time_length, model_size)
"""
def __init__(self, model_size: int = 64, **kwargs) -> None:
super().__init__(**kwargs)
self.model_size = model_size
with self.name_scope():
self.net = mx.gluon.nn.Dense(units=self.model_size, flatten=False)
def hybrid_forward(self, F, data: Tensor, *args):
return self.net(data)
class MultiHeadAttentionBase(HybridBlock):
"""
Base class for Multi-head attention.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
assert (
att_dim_in % heads == 0
), "Number of heads {} must divide attention att_dim_in {}".format(
heads, att_dim_in
)
self.att_dim_in = att_dim_in
self.heads = heads
self.att_dim_out = att_dim_out
self.dropout = dropout
self.dim_per_head = self.att_dim_in // self.heads
with self.name_scope():
self.dense_att = mx.gluon.nn.Dense(
units=self.att_dim_out, flatten=False
)
def _attend(
self,
F,
queries: Tensor,
keys: Tensor,
values: Tensor,
mask: Optional[Tensor] = None,
) -> Tensor:
r"""
Returns context vectors of multi-head dot attention.
Parameters
----------
queries
Queries tensor of shape (batch_size, query_max_length, dim)
keys
Keys tensor of shape (batch_size, memory_max_length, dim)
values
Values tensor of shape (batch_size, memory_max_length, dim)
mask
Returns
-------
Context vectors of shape (batch_size, query_max_length, att_dim_out)
"""
# scale by 1/sqrt(dim_per_head)
queries = queries * (self.dim_per_head ** -0.5)
# (batch_size * heads, length, dim/heads)
queries = split_heads(F, queries, self.dim_per_head, self.heads)
keys = split_heads(F, keys, self.dim_per_head, self.heads)
values = split_heads(F, values, self.dim_per_head, self.heads)
# (batch_size * heads, query_max_length, dim_per_head)
contexts = dot_attention(
F, queries, keys, values, mask=mask, dropout=self.dropout
)
# (batch_size, query_max_length, input_dim)
contexts = combine_heads(F, contexts, self.dim_per_head, self.heads)
# contexts: (batch_size, query_max_length, output_dim)
contexts = self.dense_att(contexts)
return contexts
def hybrid_forward(self, F, *args, **kwargs):
raise NotImplementedError
class MultiHeadSelfAttention(MultiHeadAttentionBase):
r"""
Multi-head self-attention. Independent linear projections of inputs serve as
queries, keys, and values for the attention.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(att_dim_in, heads, att_dim_out, dropout, **kwargs)
with self.name_scope():
self.dense_pre_satt = mx.gluon.nn.Dense(
units=self.att_dim_in * 3, flatten=False
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
inputs: Tensor,
mask: Optional[Tensor] = None,
cache: Optional[Dict[str, Optional[Tensor]]] = None,
) -> Tuple[Tensor, Optional[Dict]]:
r"""
Computes multi-head attention on a set of inputs, serving as queries,
keys, and values. If sequence lengths are provided, they will be used
to mask the attention scores. May also use a cache of previously
computed inputs.
Parameters
----------
inputs
Input data of shape (batch_size, max_length, att_dim_in)
mask
Optional tensor to mask attention scores
cache
Optional dictionary of previously computed keys and values
Returns
-------
Tensor
A tensor of shape (batch_size, max_length, att_dim_out)
"""
# Q = K = V -> Q * W_q, K * W_k, V * W_v
# combined: (batch_size, max_length, att_dim_in * 3)
combined = self.dense_pre_satt(inputs)
# split into queries, keys and values
# (batch_size, max_length, att_dim_in)
queries, keys, values = F.split(data=combined, num_outputs=3, axis=2)
if cache is not None:
# append new keys and values to cache, update the cache
keys = cache["k"] = (
keys
if "k" not in cache.keys()
else F.concat(cache["k"], keys, dim=1)
)
values = cache["v"] = (
values
if "v" not in cache.keys()
else F.concat(cache["v"], values, dim=1)
)
return self._attend(F, queries, keys, values, mask), cache
class MultiHeadAttention(MultiHeadAttentionBase):
r"""
Multi-head attention layer for queries independent from keys/values.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(att_dim_in, heads, att_dim_out, dropout, **kwargs)
with self.name_scope():
self.dense_pre_att_q = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
self.dense_pre_att_k = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
self.dense_pre_att_v = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, queries: Tensor, memory: Tensor, mask: Optional[Tensor] = None
) -> Tensor:
r"""
Computes multi-head attention for queries given a memory tensor.
If sequence lengths are provided, they will be used to mask the attention scores.
A mask tensor may also be used to mask the attention scores.
Returns a tensor of shape (batch_size, max_length, att_dim_out).
Parameters
----------
queries
Queries tensor of shape (batch_size, query_max_length, att_dim_in)
memory
Memory tensor to attend to of shape (batch_size, memory_max_length, att_dim_in)
mask
Optional tensor to mask attention scores
Returns
-------
Tensor of shape (batch_size, query_seq_len, att_dim_out)
"""
# Q -> Q * W_q
# K = V -> K * W_k, V * W_v
# (batch, query_max_length, att_dim_in)
queries = self.dense_pre_att_q(queries)
# (batch, memory_max_length, att_dim_in)
keys = self.dense_pre_att_k(memory)
# (batch, memory_max_length, att_dim_in)
values = self.dense_pre_att_v(memory)
return self._attend(F, queries, keys, values, mask=mask)
class TransformerFeedForward(HybridBlock):
r"""
Position-wise feed-forward network with activation.
.. math::
activation(XW_1 + b_1)W_2 + b_2
:math:`W_1`: (batch_size, d, inner_dim)
:math:`W_2`: (batch_size, inner_dim, out_dim)
"""
def __init__(
self,
inner_dim: int = 32, # W1: (batch_size, d, inner_dim)
out_dim: int = 32, # W2: (batch_size, inner_dim, out_dim)
act_type: str = "softrelu",
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.inner_dim = inner_dim
self.out_dim = out_dim
self.dropout = dropout
self.act_type = act_type
with self.name_scope():
self.mlp = mx.gluon.nn.HybridSequential()
self.mlp.add(
mx.gluon.nn.Dense(
units=self.inner_dim,
use_bias=True,
activation=self.act_type,
flatten=False,
)
)
if self.dropout > 0.0:
self.mlp.add(mx.gluon.nn.Dropout(self.dropout))
self.mlp.add(
mx.gluon.nn.Dense(units=out_dim, use_bias=True, flatten=False)
) # no activation
def hybrid_forward(self, F, x: Tensor, *args) -> Tensor:
r"""
Position-wise feed-forward network with activation.
Parameters
----------
x
Tensor of shape (batch_size, d, in_dim)
Returns
-------
Tensor of shape (batch_size, d1, out_dim)
"""
return self.mlp(x)
class TransformerProcessBlock(HybridBlock):
r"""
Block to perform pre/post processing on layer inputs.
The processing steps are determined by the sequence argument, which can contain one of the three operations:
n: layer normalization
r: residual connection
d: dropout
"""
def __init__(self, sequence: str, dropout: float, **kwargs) -> None:
super().__init__(**kwargs)
self.sequence = sequence
self.dropout = dropout
self.layer_norm = None
if "n" in sequence:
self.layer_norm = LayerNormalization()
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, data: Tensor, prev: Optional[Tensor] = None
) -> Tensor:
r"""
Apply processing sequence to data with optional previous input.
Parameters
----------
data
Input data of shape: (batch_size, length, num_hidden)
prev
Previous data of shape (batch_size, length, num_hidden)
Returns
-------
Processed data of shape (batch_size, length, num_hidden).
"""
if not self.sequence:
return data
if prev is None:
assert (
"r" not in self.sequence
), "Residual connection not allowed if no previous value given."
for step in self.sequence:
if step == "r":
data = F.broadcast_add(data, prev)
elif step == "n":
data = self.layer_norm(data)
elif step == "d":
if self.dropout > 0.0:
data = F.Dropout(data, p=self.dropout)
else:
raise ValueError("Unknown step in sequence: %s" % step)
return data
| 15,991 | 27.506239 | 112 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/transformer/trans_decoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
from gluonts.model.transformer.layers import (
TransformerProcessBlock,
TransformerFeedForward,
MultiHeadSelfAttention,
MultiHeadAttention,
InputLayer,
)
class TransformerDecoder(HybridBlock):
@validated()
def __init__(self, decoder_length: int, config: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.decoder_length = decoder_length
self.cache = {}
with self.name_scope():
self.enc_input_layer = InputLayer(model_size=config["model_dim"])
self.dec_pre_self_att = TransformerProcessBlock(
sequence=config["pre_seq"],
dropout=config["dropout_rate"],
prefix="pretransformerprocessblock_",
)
self.dec_self_att = MultiHeadSelfAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadselfattention_",
)
self.dec_post_self_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postselfatttransformerprocessblock_",
)
self.dec_enc_att = MultiHeadAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadattention_",
)
self.dec_post_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postatttransformerprocessblock_",
)
self.dec_ff = TransformerFeedForward(
inner_dim=config["model_dim"] * config["inner_ff_dim_scale"],
out_dim=config["model_dim"],
act_type=config["act_type"],
dropout=config["dropout_rate"],
prefix="transformerfeedforward_",
)
self.dec_post_ff = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postffransformerprocessblock_",
)
def cache_reset(self):
self.cache = {}
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
data: Tensor,
enc_out: Tensor,
mask: Optional[Tensor] = None,
is_train: bool = True,
) -> Tensor:
"""
A transformer encoder block consists of a self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
# embedding
inputs = self.enc_input_layer(data)
# self-attention
data_att, cache = self.dec_self_att(
self.dec_pre_self_att(inputs, None),
mask,
self.cache.copy() if not is_train else None,
)
data = self.dec_post_self_att(data_att, inputs)
# encoder attention
data_att = self.dec_enc_att(data, enc_out)
data = self.dec_post_att(data_att, data)
# feed-forward
data_ff = self.dec_ff(data)
data = self.dec_post_ff(data_ff, data)
if not is_train:
self.cache = cache.copy()
return data
| 4,259 | 32.543307 | 118 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/transformer/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Tuple, List, Optional
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.scaler import NOPScaler, MeanScaler
from gluonts.block.feature import FeatureEmbedder
from gluonts.core.component import validated
from gluonts.distribution import DistributionOutput
from gluonts.model.common import Tensor
from gluonts.model.transformer.trans_encoder import TransformerEncoder
from gluonts.model.transformer.trans_decoder import TransformerDecoder
LARGE_NEGATIVE_VALUE = -99999999
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class TransformerNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
encoder: TransformerEncoder,
decoder: TransformerDecoder,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
cardinality: List[int],
embedding_dimension: int,
lags_seq: List[int],
scaling: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.scaling = scaling
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.distr_output = distr_output
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.target_shape = distr_output.event_shape
with self.name_scope():
self.proj_dist_args = distr_output.get_args_proj()
self.encoder = encoder
self.decoder = decoder
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=[embedding_dimension for _ in cardinality],
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted. Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and I = len(indices), containing lagged
subsequences. Specifically, lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, found lag {max(indices)} "
f"while history length is only {sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def create_network_input(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, num_features, history_length)
past_target: Tensor, # (batch_size, history_length, 1)
past_observed_values: Tensor, # (batch_size, history_length)
future_time_feat: Optional[
Tensor
], # (batch_size, num_features, prediction_length)
future_target: Optional[Tensor], # (batch_size, prediction_length)
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Creates inputs for the transformer network.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
return inputs, scale, static_feat
@staticmethod
def upper_triangular_mask(F, d):
mask = F.zeros_like(F.eye(d))
for k in range(d - 1):
mask = mask + F.eye(d, d, k + 1)
return mask * LARGE_NEGATIVE_VALUE
def hybrid_forward(self, F, x, *args, **kwargs):
raise NotImplementedError
class TransformerTrainingNetwork(TransformerNetwork):
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
) -> Tensor:
"""
Computes the loss for training Transformer, all inputs tensors representing time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
Returns
-------
Loss with shape (batch_size, context + prediction_length, 1)
"""
# create the inputs for the encoder
inputs, scale, _ = self.create_network_input(
F=F,
feat_static_cat=feat_static_cat,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
enc_input = F.slice_axis(
inputs, axis=1, begin=0, end=self.context_length
)
dec_input = F.slice_axis(
inputs, axis=1, begin=self.context_length, end=None
)
# pass through encoder
enc_out = self.encoder(enc_input)
# input to decoder
dec_output = self.decoder(
dec_input,
enc_out,
self.upper_triangular_mask(F, self.prediction_length),
)
# compute loss
distr_args = self.proj_dist_args(dec_output)
distr = self.distr_output.distribution(distr_args, scale=scale)
loss = distr.loss(future_target)
return loss.mean()
class TransformerPredictionNetwork(TransformerNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one,
# at the first time-step of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
enc_out: Tensor,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length, 1).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, ).
enc_out: Tensor
output of the encoder. Shape: (batch_size, num_cells)
Returns
--------
sample_paths : Tensor
a tensor containing sampled paths. Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_enc_out = enc_out.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
dec_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
dec_output = self.decoder(dec_input, repeated_enc_out, None, False)
distr_args = self.proj_dist_args(dec_output)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample()
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# reset cache of the decoder
self.decoder.cache_reset()
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, *target_shape, prediction_length)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ self.target_shape
+ (self.prediction_length,)
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns predicted samples
-------
"""
# create the inputs for the encoder
inputs, scale, static_feat = self.create_network_input(
F=F,
feat_static_cat=feat_static_cat,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
# pass through encoder
enc_out = self.encoder(inputs)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
enc_out=enc_out,
)
| 16,590 | 33.564583 | 116 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/transformer/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import StudentTOutput, DistributionOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
get_lags_for_frequency,
time_features_from_frequency_str,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from gluonts.model.transformer._network import (
TransformerPredictionNetwork,
TransformerTrainingNetwork,
)
from gluonts.model.transformer.trans_encoder import TransformerEncoder
from gluonts.model.transformer.trans_decoder import TransformerDecoder
class TransformerEstimator(GluonEstimator):
"""
Construct a Transformer estimator.
This implements a Transformer model, close to the one described in
[Vaswani2017]_.
.. [Vaswani2017] Vaswani, Ashish, et al. "Attention is all you need."
Advances in neural information processing systems. 2017.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
trainer
Trainer object to be used (default: Trainer())
dropout_rate
Dropout regularization parameter (default: 0.1)
cardinality
Number of values of the each categorical feature (default: [1])
embedding_dimension
Dimension of the embeddings for categorical features (the same
dimension is used for all embeddings, default: 5)
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
model_dim
Dimension of the transformer network, i.e., embedding dimension of the input
(default: 32)
inner_ff_dim_scale
Dimension scale of the inner hidden layer of the transformer's
feedforward network (default: 4)
pre_seq
Sequence that defined operations of the processing block before the main transformer
network. Available operations: 'd' for dropout, 'r' for residual connections
and 'n' for normalization (default: 'dn')
post_seq
seq
Sequence that defined operations of the processing block in and after the main
transformer network. Available operations: 'd' for dropout, 'r' for residual connections
and 'n' for normalization (default: 'drn').
act_type
Activation type of the transformer network (default: 'softrelu')
num_heads
Number of heads in the multi-head attention (default: 8)
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
context_length: Optional[int] = None,
trainer: Trainer = Trainer(),
dropout_rate: float = 0.1,
cardinality: Optional[List[int]] = None,
embedding_dimension: int = 20,
distr_output: DistributionOutput = StudentTOutput(),
model_dim: int = 32,
inner_ff_dim_scale: int = 4,
pre_seq: str = "dn",
post_seq: str = "drn",
act_type: str = "softrelu",
num_heads: int = 8,
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
num_parallel_samples: int = 100,
) -> None:
super().__init__(trainer=trainer)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (
cardinality is not None or not use_feat_static_cat
), "You must set `cardinality` if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert (
embedding_dimension > 0
), "The value of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.prediction_length = prediction_length
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.distr_output = distr_output
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.cardinality = cardinality if use_feat_static_cat else [1]
self.embedding_dimension = embedding_dimension
self.num_parallel_samples = num_parallel_samples
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.scaling = scaling
self.config = {
"model_dim": model_dim,
"pre_seq": pre_seq,
"post_seq": post_seq,
"dropout_rate": dropout_rate,
"inner_ff_dim_scale": inner_ff_dim_scale,
"act_type": act_type,
"num_heads": num_heads,
}
self.encoder = TransformerEncoder(
self.context_length, self.config, prefix="enc_"
)
self.decoder = TransformerDecoder(
self.prediction_length, self.config, prefix="dec_"
)
def create_transformation(self) -> Transformation:
remove_field_names = [
FieldName.FEAT_DYNAMIC_CAT,
FieldName.FEAT_STATIC_REAL,
]
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ [
AsNumpyArray(field=FieldName.FEAT_STATIC_CAT, expected_ndim=1),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> TransformerTrainingNetwork:
training_network = TransformerTrainingNetwork(
encoder=self.encoder,
decoder=self.decoder,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=True,
)
return training_network
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = TransformerPredictionNetwork(
encoder=self.encoder,
decoder=self.decoder,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=True,
num_parallel_samples=self.num_parallel_samples,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
| 12,230 | 37.583596 | 100 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/gp_forecaster/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.distribution.distribution import softplus
from gluonts.gp import GaussianProcess
from gluonts.kernels import KernelOutputDict
from gluonts.model.common import Tensor
class GaussianProcessNetworkBase(mx.gluon.HybridBlock):
"""
Defines a Gluon block used for GP training and predictions.
"""
# The two subclasses GaussianProcessTrainingNetwork and
# GaussianProcessPredictionNetwork define how to
# compute the loss and how to generate predictions, respectively.
@validated()
def __init__(
self,
prediction_length: int,
context_length: int,
cardinality: int,
kernel_output: KernelOutputDict,
params_scaling: bool,
ctx: mx.Context,
float_type: DType,
max_iter_jitter: int,
jitter_method: str,
**kwargs,
) -> None:
"""
Parameters
----------
prediction_length
Prediction length.
context_length
Training length.
cardinality
Number of time series.
kernel_output
KernelOutput instance to determine which kernel subclass to be instantiated.
params_scaling
Determines whether or not to scale the model parameters.
ctx
Determines whether to compute on the cpu or gpu.
float_type
Determines whether to use single or double precision.
max_iter_jitter
Maximum number of iterations for jitter to iteratively make the matrix positive definite.
jitter_method
Iteratively jitter method or use eigenvalue decomposition depending on problem size.
**kwargs
Arbitrary keyword arguments.
"""
super().__init__(**kwargs)
self.prediction_length = prediction_length
self.context_length = context_length
self.cardinality = cardinality
self.kernel_output = kernel_output
self.params_scaling = params_scaling
self.float_type = float_type
self.ctx = ctx
self.max_iter_jitter = max_iter_jitter
self.jitter_method = jitter_method
with self.name_scope():
self.proj_kernel_args = kernel_output.get_args_proj(
self.float_type
)
self.num_hyperparams = kernel_output.get_num_args()
self.embedding = mx.gluon.nn.Embedding(
# Noise sigma is additional parameter so add 1 to output dim
input_dim=self.cardinality,
output_dim=self.num_hyperparams + 1,
dtype=self.float_type,
)
# noinspection PyMethodOverriding,PyPep8Naming
def get_gp_params(
self,
F,
past_target: Tensor,
past_time_feat: Tensor,
feat_static_cat: Tensor,
) -> Tuple:
"""
This function returns the GP hyper-parameters for the model.
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
past_target
Training time series values of shape (batch_size, context_length).
past_time_feat
Training features of shape (batch_size, context_length, num_features).
feat_static_cat
Time series indices of shape (batch_size, 1).
Returns
-------
Tuple
Tuple of kernel hyper-parameters of length num_hyperparams.
Each is a Tensor of shape (batch_size, 1, 1).
Model noise sigma.
Tensor of shape (batch_size, 1, 1).
"""
output = self.embedding(
feat_static_cat.squeeze()
) # Shape (batch_size, num_hyperparams + 1)
kernel_args = self.proj_kernel_args(output)
sigma = softplus(
F,
output.slice_axis( # sigma is the last hyper-parameter
axis=1,
begin=self.num_hyperparams,
end=self.num_hyperparams + 1,
),
)
if self.params_scaling:
scalings = self.kernel_output.gp_params_scaling(
F, past_target, past_time_feat
)
sigma = F.broadcast_mul(sigma, scalings[self.num_hyperparams])
kernel_args = (
F.broadcast_mul(kernel_arg, scaling)
for kernel_arg, scaling in zip(
kernel_args, scalings[0 : self.num_hyperparams]
)
)
min_value = 1e-5
max_value = 1e8
kernel_args = (
kernel_arg.clip(min_value, max_value).expand_dims(axis=2)
for kernel_arg in kernel_args
)
sigma = sigma.clip(min_value, max_value).expand_dims(axis=2)
return kernel_args, sigma
class GaussianProcessTrainingNetwork(GaussianProcessNetworkBase):
# noinspection PyMethodOverriding,PyPep8Naming
@validated()
def __init__(self, *args, **kwargs) -> None:
"""
Parameters
----------
*args
Variable length argument list.
**kwargs
Arbitrary keyword arguments.
"""
super().__init__(*args, **kwargs)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
past_target: Tensor,
past_time_feat: Tensor,
feat_static_cat: Tensor,
) -> Tensor:
"""
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
past_target
Training time series values of shape (batch_size, context_length).
past_time_feat
Training features of shape (batch_size, context_length, num_features).
feat_static_cat
Time series indices of shape (batch_size, 1).
Returns
-------
Tensor
GP loss of shape (batch_size, 1)
"""
kernel_args, sigma = self.get_gp_params(
F, past_target, past_time_feat, feat_static_cat
)
kernel = self.kernel_output.kernel(kernel_args)
gp = GaussianProcess(
sigma=sigma,
kernel=kernel,
context_length=self.context_length,
ctx=self.ctx,
float_type=self.float_type,
max_iter_jitter=self.max_iter_jitter,
jitter_method=self.jitter_method,
)
return gp.log_prob(past_time_feat, past_target)
class GaussianProcessPredictionNetwork(GaussianProcessNetworkBase):
@validated()
def __init__(
self, num_parallel_samples: int, sample_noise: bool, *args, **kwargs
) -> None:
r"""
Parameters
----------
num_parallel_samples
Number of samples to be drawn.
sample_noise
Boolean to determine whether to add :math:`\sigma^2I` to the predictive covariance matrix.
*args
Variable length argument list.
**kwargs
Arbitrary keyword arguments.
"""
super().__init__(*args, **kwargs)
self.num_parallel_samples = num_parallel_samples
self.sample_noise = sample_noise
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
past_target: Tensor,
past_time_feat: Tensor,
future_time_feat: Tensor,
feat_static_cat: Tensor,
) -> Tensor:
"""
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
past_target
Training time series values of shape (batch_size, context_length).
past_time_feat
Training features of shape (batch_size, context_length, num_features).
future_time_feat
Test features of shape (batch_size, prediction_length, num_features).
feat_static_cat
Time series indices of shape (batch_size, 1).
Returns
-------
Tensor
GP samples of shape (batch_size, num_samples, prediction_length).
"""
kernel_args, sigma = self.get_gp_params(
F, past_target, past_time_feat, feat_static_cat
)
gp = GaussianProcess(
sigma=sigma,
kernel=self.kernel_output.kernel(kernel_args),
context_length=self.context_length,
prediction_length=self.prediction_length,
num_samples=self.num_parallel_samples,
ctx=self.ctx,
float_type=self.float_type,
max_iter_jitter=self.max_iter_jitter,
jitter_method=self.jitter_method,
sample_noise=self.sample_noise,
)
samples, _, _ = gp.exact_inference(
past_time_feat, past_target, future_time_feat
) # Shape (batch_size, prediction_length, num_samples)
return samples.swapaxes(1, 2)
| 9,726 | 33.01049 | 102 | py |
rankpredictor | rankpredictor-master/sub/gluonts/model/gp_forecaster/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
import numpy as np
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.kernels import KernelOutput, RBFKernelOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import TimeFeature, time_features_from_frequency_str
from gluonts.trainer import Trainer
from gluonts.transform import (
AddTimeFeatures,
AsNumpyArray,
CanonicalInstanceSplitter,
Chain,
SetFieldIfNotPresent,
TestSplitSampler,
Transformation,
)
# Relative imports
from ._network import (
GaussianProcessPredictionNetwork,
GaussianProcessTrainingNetwork,
)
class GaussianProcessEstimator(GluonEstimator):
r"""
GaussianProcessEstimator shows how to build a local time series model using
Gaussian Processes (GP).
Each time series has a GP with its own
hyper-parameters. For the radial basis function (RBF) Kernel, the
learnable hyper-parameters are the amplitude and lengthscale. The periodic
kernel has those hyper-parameters with an additional learnable frequency
parameter. The RBFKernel is the default, but either kernel can be used by
inputting the desired KernelOutput object. The noise sigma in the model is
another learnable hyper-parameter for both kernels. These parameters are
fit using an Embedding of the integer time series indices (each time series
has its set of hyper-parameter that is static in time). The observations
are the time series values. In this model, the time features are hour of
the day and day of the week.
Parameters
----------
freq
Time series frequency.
prediction_length
Prediction length.
cardinality
Number of time series.
trainer
Trainer instance to be used for model training (default: Trainer()).
context_length
Training length (default: None, in which case context_length = prediction_length).
kernel_output
KernelOutput instance to determine which kernel subclass to be
instantiated (default: RBFKernelOutput()).
params_scaling
Determines whether or not to scale the model parameters (default: True).
float_type
Determines whether to use single or double precision (default: np.float64).
max_iter_jitter
Maximum number of iterations for jitter to iteratively make the matrix positive definite (default: 10).
jitter_method
Iteratively jitter method or use eigenvalue decomposition depending on problem size (default: "iter").
sample_noise
Boolean to determine whether to add :math:`\sigma^2I` to the predictive covariance matrix (default: True).
time_features
Time features to use as inputs of the model (default: None, in which
case these are automatically determined based on the frequency).
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100).
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
cardinality: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None,
kernel_output: KernelOutput = RBFKernelOutput(),
params_scaling: bool = True,
dtype: DType = np.float64,
max_iter_jitter: int = 10,
jitter_method: str = "iter",
sample_noise: bool = True,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
) -> None:
self.float_type = dtype
super().__init__(trainer=trainer, dtype=self.float_type)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert cardinality > 0, "The value of `cardinality` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.prediction_length = prediction_length
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.cardinality = cardinality
self.kernel_output = kernel_output
self.params_scaling = params_scaling
self.max_iter_jitter = max_iter_jitter
self.jitter_method = jitter_method
self.sample_noise = sample_noise
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.num_parallel_samples = num_parallel_samples
def create_transformation(self) -> Transformation:
return Chain(
[
AsNumpyArray(field=FieldName.TARGET, expected_ndim=1),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
SetFieldIfNotPresent(
field=FieldName.FEAT_STATIC_CAT, value=[0.0]
),
AsNumpyArray(field=FieldName.FEAT_STATIC_CAT, expected_ndim=1),
CanonicalInstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
instance_sampler=TestSplitSampler(),
time_series_fields=[FieldName.FEAT_TIME],
instance_length=self.context_length,
use_prediction_features=True,
prediction_length=self.prediction_length,
),
]
)
def create_training_network(self) -> HybridBlock:
return GaussianProcessTrainingNetwork(
prediction_length=self.prediction_length,
context_length=self.context_length,
cardinality=self.cardinality,
kernel_output=self.kernel_output,
params_scaling=self.params_scaling,
ctx=self.trainer.ctx,
float_type=self.float_type,
max_iter_jitter=self.max_iter_jitter,
jitter_method=self.jitter_method,
)
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = GaussianProcessPredictionNetwork(
prediction_length=self.prediction_length,
context_length=self.context_length,
cardinality=self.cardinality,
num_parallel_samples=self.num_parallel_samples,
params=trained_network.collect_params(),
kernel_output=self.kernel_output,
params_scaling=self.params_scaling,
ctx=self.trainer.ctx,
float_type=self.float_type,
max_iter_jitter=self.max_iter_jitter,
jitter_method=self.jitter_method,
sample_noise=self.sample_noise,
)
copy_parameters(
net_source=trained_network, net_dest=prediction_network
)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
dtype=self.float_type,
)
| 8,691 | 38.689498 | 114 | py |
rankpredictor | rankpredictor-master/sub/gluonts/kernels/_kernel_output.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Tuple
import numpy as np
from mxnet import gluon
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.distribution.distribution_output import ArgProj
from gluonts.model.common import Tensor
# Relative imports
from . import Kernel
class KernelOutput:
"""
Class to connect a network to a kernel.
"""
def get_args_proj(self, float_type: DType) -> gluon.HybridBlock:
raise NotImplementedError()
def kernel(self, args) -> Kernel:
raise NotImplementedError()
# noinspection PyMethodOverriding,PyPep8Naming
@staticmethod
def compute_std(F, data: Tensor, axis: int) -> Tensor:
"""
This function computes the standard deviation of the data along a given
axis.
Parameters
----------
F : ModuleType
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
data : Tensor
Data to be used to compute the standard deviation.
axis : int
Axis along which to compute the standard deviation.
Returns
-------
Tensor
The standard deviation of the given data.
"""
return F.sqrt(
F.mean(
F.broadcast_minus(
data, F.mean(data, axis=axis).expand_dims(axis=axis)
)
** 2,
axis=axis,
)
)
class KernelOutputDict(KernelOutput):
args_dim: Dict[str, int]
kernel_cls: type
@validated()
def __init__(self) -> None:
pass
def get_num_args(self) -> int:
return len(self.args_dim)
def get_args_proj(self, float_type: DType = np.float32) -> ArgProj:
"""
This method calls the ArgProj block in distribution_output to project
from a dense layer to kernel arguments.
Parameters
----------
float_type : DType
Determines whether to use single or double precision.
Returns
-------
ArgProj
"""
return ArgProj(
args_dim=self.args_dim,
domain_map=gluon.nn.HybridLambda(self.domain_map),
dtype=float_type,
)
# noinspection PyMethodOverriding,PyPep8Naming
def gp_params_scaling(
self, F, past_target: Tensor, past_time_feat: Tensor
) -> Tuple[Tensor, Tensor, Tensor]:
raise NotImplementedError()
# noinspection PyMethodOverriding,PyPep8Naming
def domain_map(self, F, *args: Tensor):
raise NotImplementedError()
def kernel(self, kernel_args) -> Kernel:
"""
Parameters
----------
kernel_args
Variable length argument list.
Returns
-------
Kernel
Instantiated specified Kernel subclass object.
"""
return self.kernel_cls(*kernel_args)
| 3,541 | 27.111111 | 79 | py |
malware-uncertainty | malware-uncertainty-master/tools/utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import tensorflow_probability as tfp
import os
import sys
import shutil
import hashlib
import warnings
import functools
from collections import namedtuple, defaultdict
class ParamWrapper(object):
def __init__(self, params):
if not isinstance(params, dict):
params = vars(params)
self.params = params
def __getattr__(self, name):
val = self.params.get(name)
if val is None:
MSG = "Setting params ({}) is deprecated"
warnings.warn(MSG.format(name))
return val
def retrive_files_set(base_dir, dir_ext, file_ext):
"""
get file paths given the directory
:param base_dir: basic directory
:param dir_ext: directory append at the rear of base_dir
:param file_ext: file extension
:return: set of file paths. Avoid the repetition
"""
def get_file_name(root_dir, file_ext):
for dir_path, dir_names, file_names in os.walk(root_dir, topdown=True):
for file_name in file_names:
_ext = file_ext
if os.path.splitext(file_name)[1] == _ext:
yield os.path.join(dir_path, file_name)
elif '.' not in file_ext:
_ext = '.' + _ext
if os.path.splitext(file_name)[1] == _ext:
yield os.path.join(dir_path, file_name)
else:
pass
else:
pass
if file_ext is not None:
file_exts = file_ext.split("|")
else:
file_exts = ['']
file_path_list = list()
for ext in file_exts:
file_path_list.extend(get_file_name(os.path.join(base_dir, dir_ext), ext))
# remove duplicate elements
from collections import OrderedDict
return list(OrderedDict.fromkeys(file_path_list))
def get_file_name(path):
return os.path.splitext(os.path.basename(path))[0]
def get_file_nameext(path):
return os.path.basename(path)
def dump_pickle(data, path):
try:
import pickle as pkl
except Exception as e:
import cPickle as pkl
if not os.path.exists(os.path.dirname(path)):
mkdir(os.path.dirname(path))
with open(path, 'wb') as wr:
pkl.dump(data, wr)
return True
def read_pickle(path):
try:
import pickle as pkl
except Exception as e:
import cPickle as pkl
if os.path.isfile(path):
with open(path, 'rb') as fr:
return pkl.load(fr)
else:
raise IOError("The {0} is not been found.".format(path))
def dump_joblib(data, path):
if not os.path.exists(os.path.dirname(path)):
mkdir(os.path.dirname(path))
try:
import joblib
with open(path, 'wb') as wr:
joblib.dump(data, wr)
return
except IOError:
raise IOError("Dump data failed.")
def read_joblib(path):
import joblib
if os.path.isfile(path):
with open(path, 'rb') as fr:
return joblib.load(fr)
else:
raise IOError("The {0} is not a file.".format(path))
def read_txt(path, mode='r'):
if os.path.isfile(path):
with open(path, mode) as f_r:
lines = f_r.read().strip().splitlines()
return lines
else:
raise ValueError("{} does not seen like a file path.\n".format(path))
def dump_txt(data_str, path, mode='w'):
if not isinstance(data_str, str):
raise TypeError
with open(path, mode) as f_w:
f_w.write(data_str)
def readdata_np(data_path):
try:
with open(data_path, 'rb') as f_r:
data = np.load(f_r)
return data
except IOError as e:
raise IOError("Unable to open {0}: {1}.\n".format(data_path, str(e)))
def dumpdata_np(data, data_path):
if not isinstance(data, np.ndarray):
warnings.warn("The array is not the numpy.ndarray type.")
data_dir = os.path.dirname(data_path)
try:
if not os.path.exists(data_dir):
os.makedirs(data_dir)
with open(data_path, 'wb') as f_s:
np.save(f_s, data)
except OSError as e:
sys.stderr.write(e)
def safe_load_json(json_path):
try:
import yaml
with open(json_path, 'r') as rh:
return yaml.safe_load(rh)
except IOError as ex:
raise IOError(str(ex) + ": Unable to load json file.")
def load_json(json_path):
try:
import json
with open(json_path, 'r') as rh:
return json.load(rh)
except IOError as ex:
raise IOError(str(ex) + ": Unable to load json file.")
def dump_json(obj_dict, file_path):
try:
import json
if not os.path.exists(os.path.dirname(file_path)):
mkdir(os.path.dirname(file_path))
with open(file_path, 'w') as fh:
json.dump(obj_dict, fh)
except IOError as ex:
raise IOError(str(ex) + ": Fail to dump dict using json toolbox")
def mkdir(target):
try:
if os.path.isfile(target):
target = os.path.dirname(target)
if not os.path.exists(target):
os.makedirs(target)
return 0
except IOError as e:
raise Exception("Fail to create directory! Error:" + str(e))
def copy_files(src_file_list, dst_dir):
if not isinstance(src_file_list, list):
raise TypeError
if os.path.isdir(dst_dir):
raise ValueError
for src in src_file_list:
if not os.path.isfile(src):
continue
shutil.copy(src, dst_dir)
def get_sha256(file_path):
assert os.path.isfile(file_path), 'permit only file path'
fh = open(file_path, 'rb')
sha256 = hashlib.sha256()
while True:
data = fh.read(8192)
if not data:
break
sha256.update(data)
fh.close()
return sha256.hexdigest()
def merge_namedtuples(tp1, tp2):
from collections import namedtuple
_TP12 = namedtuple('tp12', tp1._fields + tp2._fields)
return _TP12(*(tp1 + tp2))
def expformat(f, pos, prec=0, exp_digits=1, sign='off'):
"""Scientific-format a number with a given number of digits in the exponent.
Optionally remove the sign in the exponent"""
s = "%.*e" % (prec, f)
mantissa, exp = s.split('e')
if sign == 'on':
# add 1 to digits as 1 is taken by sign +/-
return "%se%+0*d" % (mantissa, exp_digits+1, int(exp))
else :
return "%se%0*d" % (mantissa, exp_digits, int(exp))
def bootstrap(data, fun, n_resamples=1000, alpha=0.05, seed=0):
"""Compute confidence interval for values of function fun
Parameters
==========
data: list of arguments to fun
"""
assert isinstance(data, list)
n_samples = len(data[0])
np.random.seed(seed)
idx = np.random.randint(0, n_samples, (n_resamples, n_samples))
def select(sample):
return [d[sample] for d in data]
def evaluate(sample):
result = select(sample)
values = []
for elems in zip(*result):
values.append(fun(*elems))
return np.stack(values, axis=0)
values = evaluate(idx)
idx = idx[np.argsort(values, axis=0, kind='mergesort')]
values = np.sort(values, axis=0, kind='mergesort')
stat = namedtuple('stat', ['value', 'index'])
low = stat(value=values[int((alpha / 2.0) * n_resamples)],
index=idx[int((alpha / 2.0) * n_resamples)])
high = stat(value=values[int((1 - alpha / 2.0) * n_resamples)],
index=idx[int((1 - alpha / 2.0) * n_resamples)])
mean = stat(value=np.mean(values, axis=0),
index=None)
return low, high, mean
########################################################################################
############################# functions for tf models ##################################
########################################################################################
ensemble_method_scope = ['vanilla', 'mc_dropout', 'deep_ensemble', 'weighted_ensemble', 'bayesian']
class DenseDropout(tf.keras.layers.Layer):
def __init__(self, units,
dropout_rate,
activation=None,
use_dropout=True,
**kwargs):
"""
Initialize a dense-dropout layer
:param units: number of neurons
:param dropout_rate: a float value between 0 and 1. A portion of activations will be dropped randomly
:param activation: activation function
param use_dropout: performing dropout in both training and testing phases
:param kwargs: other arguments for tf.keras.layers.Dense
"""
super(DenseDropout, self).__init__()
self.units = units
self.activation = activation
self.dropout_rate = dropout_rate
self.use_dropout = use_dropout
self.kwargs = kwargs
self.dense_layer = tf.keras.layers.Dense(units, activation=self.activation, **self.kwargs)
self.dropout_layer = tf.keras.layers.Dropout(rate=dropout_rate)
def call(self, inputs, training=True):
return self.dropout_layer(self.dense_layer(inputs), training=self.use_dropout)
class Conv2DDropout(tf.keras.layers.Layer):
def __init__(self,
filters,
kernel_size,
dropout_rate,
activation=None,
use_dropout=True,
**kwargs):
"""
Initialize a convolution-dropout layer
:param filters: Positive integer, number of ouput channels
:param kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of 2D convolution window
:param dropout_rate: a float value between 0 and 1. A portion of activations will be dropped randomly
:param activation: activation function
:param use_dropout: performing dropout in both training and testing phases
:param kwargs: other arguments for tf.keras.layers.Conv2D
"""
super(Conv2DDropout, self).__init__()
self.filters = filters
self.kernel_size = kernel_size
self.dropout_rate = dropout_rate
self.use_dropout = use_dropout
self.conv2d_layer = tf.keras.layers.Conv2D(filters, kernel_size, activation=activation, **kwargs)
self.dropout_layer = tf.keras.layers.Dropout(rate=dropout_rate)
def call(self, inputs, training=True):
return self.dropout_layer(self.conv2d_layer(inputs), training=self.use_dropout)
class LSTMDropout(tf.keras.layers.Layer):
def __init__(self,
units,
dropout_rate,
use_dropout=True,
go_backwards=True,
return_sequences=True, **kwargs):
"""
Initialize a LSTM-dropout layer
:param dropout_rate: a float value between 0 and 1. A portion of activations will be dropped randomly
:param units: Positive Integer, number of neurons
:param use_dropout: performing dropout in both training and testing phases
:param kwargs: other arguments for tf.keras.layers.LSTM
"""
super(LSTMDropout, self).__init__()
self.units = units
self.dropout_rate = dropout_rate
self.use_dropout = use_dropout
self.go_backwards = go_backwards
self.return_sequences = return_sequences
self.lstm = tf.keras.layers.LSTM(units, dropout=self.dropout_rate, return_sequences=self.return_sequences,
**kwargs)
def call(self, inputs, training=True):
return self.lstm(inputs, training=self.use_dropout)
@property
def return_state(self):
return self.lstm.return_state
def get_config(self):
config = super(LSTMDropout, self).get_config()
config['dropout_rate'] = self.dropout_rate
config['units'] = self.units
config['use_dropout'] = self.use_dropout
config['go_backwards'] = self.go_backwards
return config
class DropoutDense(tf.keras.layers.Layer):
def __init__(self, units,
dropout_rate,
activation=None,
use_dropout=True,
**kwargs):
"""
Initialize a dense-dropout layer
:param units: number of neurons
:param dropout_rate: a float value between 0 and 1. A portion of activations will be dropped randomly
:param activation: activation function
param use_dropout: performing dropout in both training and testing phases
:param kwargs: other arguments for tf.keras.layers.Dense
"""
super(DropoutDense, self).__init__()
self.units = units
self.activation = activation
self.dropout_rate = dropout_rate
self.use_dropout = use_dropout
self.kwargs = kwargs
self.dense_layer = tf.keras.layers.Dense(units, activation=self.activation, **self.kwargs)
self.dropout_layer = tf.keras.layers.Dropout(rate=dropout_rate)
def call(self, inputs, training=True):
return self.dense_layer(self.dropout_layer(inputs, training=self.use_dropout))
def dense_dropout(dropout_rate=0.4):
return functools.partial(DenseDropout, dropout_rate=dropout_rate)
def conv2d_dropout(dropout_rate=0.4):
return functools.partial(Conv2DDropout, dropout_rate=dropout_rate)
def lstm_dropout(dropout_rate=0.4):
return functools.partial(LSTMDropout, dropout_rate=dropout_rate)
def dropout_dense(dropout_rate=0.4):
return functools.partial(DropoutDense, dropout_rate=dropout_rate)
def scaled_reparameterization_layer(tfp_varitional_layer_obj, scale_factor=1. / 10000):
def scaled_kl_fn(q, p, _):
return tfp.distributions.kl_divergence(q, p) * scale_factor
return functools.partial(tfp_varitional_layer_obj,
kernel_divergence_fn=scaled_kl_fn,
bias_divergence_fn=scaled_kl_fn)
def customized_reparameterization_dense_layer(scale_factor=1. / 10000):
# code from: https://github.com/tensorflow/probability/issues/409
# and https://github.com/tensorflow/probability/blob/v0.11.0/tensorflow_probability/python/layers/util.py#L202-L224
tfd = tfp.distributions
def _posterior_mean_field(kernel_size, bias_size=0, dtype=None):
n = kernel_size + bias_size
c = np.log(np.expm1(1.))
return tf.keras.Sequential([
tfp.layers.VariableLayer(2 * n, dtype=dtype),
tfp.layers.DistributionLambda(lambda t: tfd.Independent(
tfd.Normal(loc=t[..., :n],
scale=1e-5 + tf.nn.softplus(c + t[..., n:])),
reinterpreted_batch_ndims=1)),
])
def _non_trainable_prior_fn(kernel_size, bias_size=0, dtype=None):
def _distribution_fn(_):
return tfd.Independent(tfd.Normal(loc=tf.zeros(kernel_size + bias_size, dtype=dtype),
scale=1.),
reinterpreted_batch_ndims=1)
return _distribution_fn
def _trainable_prior_fn(kernel_size, bias_size=0, dtype=None):
return tf.keras.Sequential([
tfp.layers.VariableLayer(kernel_size + bias_size, dtype=dtype),
tfp.layers.DistributionLambda(
lambda mu: tfd.Independent(tfd.Normal(loc=mu, scale=1),
reinterpreted_batch_ndims=1)),
])
return functools.partial(
tfp.layers.DenseVariational,
make_posterior_fn=_posterior_mean_field,
make_prior_fn=_non_trainable_prior_fn,
kl_weight=scale_factor)
def produce_layer(ensemble_type=None, **kwargs):
assert ensemble_type in ensemble_method_scope, 'only support ensemble method {}.'.format(
','.join(ensemble_method_scope)
)
if ensemble_type == 'vanilla' or ensemble_type == 'deep_ensemble' or ensemble_type == 'weighted_ensemble':
Dense = tf.keras.layers.Dense
Conv2D = tf.keras.layers.Conv2D
LSTM = tf.keras.layers.LSTM
last_Dense = tf.keras.layers.Dense
elif ensemble_type == 'mc_dropout':
Dense = dense_dropout(kwargs['dropout_rate'])
Conv2D = conv2d_dropout(kwargs['dropout_rate'])
LSTM = lstm_dropout(kwargs['dropout_rate'])
last_Dense = dropout_dense(kwargs['dropout_rate'])
elif ensemble_type == 'bayesian':
Dense = scaled_reparameterization_layer(tfp.layers.DenseReparameterization, kwargs['kl_scaler']) # customized_reparameterization_dense_layer(kwargs['kl_scaler']) #
Conv2D = scaled_reparameterization_layer(tfp.layers.Convolution2DReparameterization, kwargs['kl_scaler'])
LSTM = tf.keras.layers.LSTM
last_Dense = scaled_reparameterization_layer(tfp.layers.DenseReparameterization, kwargs['kl_scaler']) # customized_reparameterization_dense_layer(kwargs['kl_scaler']) #
else:
raise ValueError('only support ensemble method {}.'.format(','.join(ensemble_method_scope)))
return Dense, Conv2D, LSTM, last_Dense
##### neural network initialization ###########
def get_fans(shape):
fan_in = shape[0] if len(shape) == 2 else np.prod(shape[:-1])
fan_out = shape[1] if len(shape) == 2 else shape[-1]
return fan_in, fan_out
def glorot_uniform(shape):
if len(shape) > 1:
fan_in, fan_out = get_fans(shape)
scale = np.sqrt(6. / (fan_in + fan_out))
return np.random.uniform(low=-scale, high=scale, size=shape)
else:
return np.zeros(shape, dtype=np.float32)
| 17,663 | 32.904031 | 176 | py |
malware-uncertainty | malware-uncertainty-master/core/feature/feature_extraction.py | import os
import multiprocessing
import collections
import warnings
import tempfile
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import KMeans
from tools import utils
from tools import progressbar_wrapper
from core.ensemble.dataset_lib import build_dataset_from_numerical_data, \
build_dataset_via_generator, \
build_dataset_from_img_generator
from config import logging, ErrorHandler
logger = logging.getLogger('core.feature.feature_extraction')
logger.addHandler(ErrorHandler)
class FeatureExtraction(object):
"""Produce features for ML algorithms"""
def __init__(self,
naive_data_save_dir,
intermediate_save_dir,
file_ext=None,
update=False,
proc_number=2):
"""
initialization
:param naive_data_save_dir: a directory for saving intermediates
:param intermediate_save_dir: a directory for saving meta information
:param file_ext: file extent
:param update: boolean indicator for recomputing the naive features
:param proc_number: process number
"""
self.naive_data_save_dir = naive_data_save_dir
utils.mkdir(self.naive_data_save_dir)
self.meta_data_save_dir = intermediate_save_dir
utils.mkdir(self.meta_data_save_dir)
self.file_ext = file_ext
self.update = update
self.proc_number = int(proc_number)
def feature_extraction(self, sample_dir, use_order_features=False):
"""
extract the android features from Android packages and save the extractions into designed directory
:param sample_dir: malicious / benign samples for the subsequent process of feature extraction
:param use_order_features: following the order of the provided sample paths
"""
raise NotImplementedError
def feature_preprocess(self, feature_path_list, gt_labels):
"""
pre-processing the naive data to accommodate the input format of ML algorithms
:param feature_path_list: feature paths produced by the method of feature_extraction
:param gt_labels: corresponding ground truth labels
"""
raise NotImplementedError
def feature2ipt(self, feature_path_list, labels=None, is_training_set=False):
"""
Mapping features to the input space
:param feature_path_list, a list of paths point to the features
:param labels, ground truth labels
:param is_training_set, boolean type
"""
raise NotImplementedError
@staticmethod
def _check(sample_dir):
"""
check a valid directory and produce a list of file paths
"""
if isinstance(sample_dir, str):
if not os.path.exists(sample_dir):
MSG = "No such directory or file {} exists!".format(sample_dir)
raise ValueError(MSG)
elif os.path.isfile(sample_dir):
sample_path_list = [sample_dir]
elif os.path.isdir(sample_dir):
sample_path_list = list(utils.retrive_files_set(sample_dir, "", ".apk|"))
assert len(sample_path_list) > 0, 'No files'
else:
raise ValueError(" No such path {}".format(sample_dir))
elif isinstance(sample_dir, list):
sample_path_list = [path for path in sample_dir if os.path.isfile(path)]
else:
MSG = "A directory or a list of paths are allowed!"
raise ValueError(MSG)
return sample_path_list
class DrebinFeature(FeatureExtraction):
def __init__(self,
naive_data_save_dir,
intermediate_save_dir,
file_ext='.drebin',
update=False,
proc_number=2):
super(DrebinFeature, self).__init__(naive_data_save_dir,
intermediate_save_dir,
file_ext,
update,
proc_number)
def feature_extraction(self, sample_dir, use_order_features=False):
"""
drebin features
:return: 2D list, [[a list of features from an apk],...,[a list of features from an apk]]
"""
from core.feature.drebin import AxplorerMapping, get_drebin_feature
sample_path_list = self._check(sample_dir)
pool = multiprocessing.Pool(self.proc_number)
pbar = progressbar_wrapper.ProgressBar()
process_results = []
tasks = []
pmap = AxplorerMapping()
for i, apk_path in enumerate(sample_path_list):
sha256 = os.path.splitext(os.path.basename(apk_path))[0] # utils.get_sha256(apk_path)
save_path = os.path.join(self.naive_data_save_dir, sha256 + self.file_ext)
if os.path.exists(save_path) and (not self.update):
continue
tasks.append(apk_path)
process_results = pool.apply_async(get_drebin_feature,
args=(apk_path, pmap, save_path),
callback=pbar.CallbackForProgressBar)
pool.close()
if process_results:
pbar.DisplayProgressBar(process_results, len(tasks), type='hour')
pool.join()
feature_path_list = []
for i, apk_path in enumerate(sample_path_list):
sha256_code = os.path.splitext(os.path.basename(apk_path))[0] # utils.get_sha256(apk_path)
save_path = os.path.join(self.naive_data_save_dir, sha256_code + self.file_ext)
if os.path.exists(save_path):
feature_path_list.append(save_path)
else:
warnings.warn("Fail to perform feature extraction for '{}'".format(apk_path))
return feature_path_list
def load_features(self, feature_path_list):
"""
load features
:param feature_path_list: feature paths produced by the method of feature_extraction
:return: a list of features
"""
from core.feature.drebin import wrapper_load_features
feature_list = []
n_proc = 1 if multiprocessing.cpu_count() // 2 <= 1 else multiprocessing.cpu_count() // 2
pool = multiprocessing.Pool(n_proc)
for res in pool.imap(wrapper_load_features, feature_path_list):
if not isinstance(res, Exception):
feature_list.append(res)
else:
print(str(res))
return feature_list
def feature_preprocess(self, feature_path_list, gt_labels):
"""
pre-processing the naive data to accommodate the input format of ML algorithms
:param feature_path_list: feature paths produced by the method of feature_extraction
:param gt_labels: corresponding ground truth labels
"""
vocab_path = os.path.join(self.meta_data_save_dir, 'drebin.vocab')
if self.update or (not os.path.exists(vocab_path)):
assert len(feature_path_list) == len(gt_labels)
features = self.load_features(feature_path_list)
tmp_vocab = self.get_vocabulary(features)
# we select 10,000 features
selected_vocab = self.feature_selection(features, gt_labels, tmp_vocab, dim=10000)
vocab_path = os.path.join(self.meta_data_save_dir, 'drebin.vocab')
utils.dump_pickle(selected_vocab, vocab_path)
return
def feature2ipt(self, feature_path_list, labels=None, is_training_set=False):
"""
Mapping features to the input space
:param feature_path_list: the feature paths produced by the method of feature_extraction
:param labels: the ground truth labels correspond to features
:param is_training_set, not used here
:return tf.data; input dimension of an item of data
:rtype tf.data.Dataset object; integer
"""
# load
vocab_path = os.path.join(self.meta_data_save_dir, 'drebin.vocab')
if not os.path.exists(vocab_path):
raise ValueError("A vocabulary is needed.")
vocab = utils.read_pickle(vocab_path)
features = self.load_features(feature_path_list)
dataX_np = self.get_feature_representation(features, vocab)
input_dim = dataX_np.shape[1]
if labels is not None:
return build_dataset_from_numerical_data((dataX_np, labels)), input_dim
else:
return build_dataset_from_numerical_data(dataX_np), input_dim
def feature_selection(self, train_features, train_y, vocab, dim):
"""
feature selection
:param train_features: 2D feature
:type train_features: numpy object
:param train_y: ground truth labels
:param vocab: a list of words (i.e., features)
:param dim: the number of remained words
:return: chose vocab
"""
is_malware = (train_y == 1)
mal_features = np.array(train_features, dtype=object)[is_malware]
ben_features = np.array(train_features, dtype=object)[~is_malware]
if (len(mal_features) <= 0) or (len(ben_features) <= 0):
return vocab
mal_representations = self.get_feature_representation(mal_features, vocab)
mal_frequency = np.sum(mal_representations, axis=0) / float(len(mal_features))
ben_representations = self.get_feature_representation(ben_features, vocab)
ben_frequency = np.sum(ben_representations, axis=0) / float(len(ben_features))
# eliminate the words showing zero occurrence in apk files
is_null_feature = np.all(mal_representations == 0, axis=0) & np.all(ben_representations, axis=0)
mal_representations, ben_representations = None, None
vocab_filtered = list(np.array(vocab)[~is_null_feature])
if len(vocab_filtered) <= dim:
return vocab_filtered
else:
feature_frq_diff = np.abs(mal_frequency[~is_null_feature] - ben_frequency[~is_null_feature])
position_flag = np.argsort(feature_frq_diff)[::-1][:dim]
vocab_selected = []
for p in position_flag:
vocab_selected.append(vocab_filtered[p])
return vocab_selected
def load_vocabulary(self):
vocab_path = os.path.join(self.meta_data_save_dir, 'drebin.vocab')
if not os.path.exists(vocab_path):
raise ValueError("A vocabulary is needed.")
vocab = utils.read_pickle(vocab_path)
return vocab
@staticmethod
def get_vocabulary(feature_list, n=300000):
"""
obtain the vocabulary based on the feature
:param feature_list: 2D list of naive feature
:param n: the number of top frequency items
:return: feature vocabulary
"""
c = collections.Counter()
for features in feature_list:
for feature in features:
c[feature] = c[feature] + 1
vocab, count = zip(*c.most_common(n))
return list(vocab)
@staticmethod
def get_feature_representation(feature_list, vocab):
"""
mapping feature to numerical representation
:param feature_list: 2D feature list with shape [number of files, number of feature]
:param vocab: a list of words
:return: 2D representation
:rtype numpy.ndarray
"""
N = len(feature_list)
M = len(vocab)
assert N > 0 and M > 0
representations = np.zeros((N, M), dtype=np.float32)
dictionary = dict(zip(vocab, range(len(vocab))))
for i, features in enumerate(feature_list):
if len(features) > 0:
filled_positions = [idx for idx in list(map(dictionary.get, features)) if idx is not None]
if len(filled_positions) != 0:
representations[i, filled_positions] = 1.
else:
warnings.warn("Produce zero feature vector.")
return representations
class OpcodeSeq(FeatureExtraction):
"""
get opcode sequences
"""
def __init__(self,
naive_data_save_dir,
intermediate_save_dir=None,
file_ext='.opcode',
update=False,
proc_number=2):
"""
initialization
:param naive_data_save_dir: a directory for saving intermediates
:param file_ext: file extent
:param update: boolean indicator for recomputing the naive features
:param proc_number: process number
"""
super(OpcodeSeq, self).__init__(naive_data_save_dir,
intermediate_save_dir,
file_ext,
update,
proc_number)
def feature_extraction(self, sample_dir, use_order_features=False):
from core.feature.opcodeseq import feature_extr_wrapper
sample_path_list = self._check(sample_dir)
pool = multiprocessing.Pool(self.proc_number)
pbar = progressbar_wrapper.ProgressBar()
process_results = []
tasks = []
for i, apk_path in enumerate(sample_path_list):
sha256_code = os.path.splitext(os.path.basename(apk_path))[0] # utils.get_sha256(apk_path)
save_path = os.path.join(self.naive_data_save_dir, sha256_code + self.file_ext)
if os.path.exists(save_path) and not self.update:
continue
tasks.append(apk_path)
process_results = pool.apply_async(feature_extr_wrapper,
args=(apk_path, save_path),
callback=pbar.CallbackForProgressBar)
pool.close()
if process_results:
pbar.DisplayProgressBar(process_results, len(tasks), type='hour')
pool.join()
feature_paths = []
for i, apk_path in enumerate(sample_path_list):
sha256_code = os.path.splitext(os.path.basename(apk_path))[0] # utils.get_sha256(apk_path)
save_path = os.path.join(self.naive_data_save_dir, sha256_code + self.file_ext)
if os.path.exists(save_path):
feature_paths.append(save_path)
else:
warnings.warn("Fail to perform feature extraction for '{}'".format(apk_path))
return feature_paths
def feature_preprocess(self, feature_path_list, gt_labels):
"""
pre-processing the naive data to accommodate the input format of ML algorithms
:param feature_path_list: a list of paths directing to saved features
:param gt_labels: corresponding ground truth labels
"""
return
def feature2ipt(self, feature_path_list, labels=None, is_training_set=False):
"""
Mapping features to the input space
"""
from core.feature.opcodeseq import read_opcode, read_opcode_wrapper
from core.ensemble.model_hp import text_cnn_hparam
def padding_opcodes(features_of_an_apk, padding_char=0):
padding_seq = []
padding_chars = [padding_char] * text_cnn_hparam.kernel_size
for i, seq in enumerate(features_of_an_apk):
padding_seq.extend(seq)
padding_seq.extend(padding_chars)
if len(padding_seq) > text_cnn_hparam.max_sequence_length:
break
return np.array(padding_seq[:text_cnn_hparam.max_sequence_length])
def generator():
if labels is not None:
for path, label in zip(feature_path_list, labels):
data_padded = padding_opcodes(read_opcode(path))
yield data_padded[:text_cnn_hparam.max_sequence_length], label
else:
for path in feature_path_list:
data_padded = padding_opcodes(read_opcode(path))
yield data_padded[:text_cnn_hparam.max_sequence_length]
with tempfile.NamedTemporaryFile() as f:
return build_dataset_via_generator(generator, labels, f.name), None
class MultiModality(FeatureExtraction):
def __init__(self,
naive_data_save_dir,
intermediate_save_dir,
use_feature_selection=True,
feature_dimension=10000,
cluster_centers=100,
similar_threshold=0.5,
file_ext='.multimod',
update=False,
proc_number=2
):
"""
initialization
:param naive_data_save_dir: a directory for saving intermediates
:param intermediate_save_dir: a directory for saving meta information
:param use_feature_selection: select features with top frequencies
:param feature_dimension: the number of selected features, default 10,000
:param cluster_centers: the number of cluster centers, default 100
:param file_ext: file extent
:param update: boolean indicator for recomputing the naive features
:param proc_number: process number
"""
super(MultiModality, self).__init__(naive_data_save_dir,
intermediate_save_dir,
file_ext,
update,
proc_number
)
self.use_feature_selection = use_feature_selection
self.feature_dimension = feature_dimension
self.cluster_centers = cluster_centers
self.similar_threshold = similar_threshold
def feature_extraction(self, sample_dir, use_order_features=False):
"""
extract the android features from Android packages and save the extractions into designed directory
"""
from core.feature.multimodality import API_LIST, get_multimod_feature
sample_path_list = self._check(sample_dir)
pool = multiprocessing.Pool(self.proc_number)
pbar = progressbar_wrapper.ProgressBar()
process_results = []
tasks = []
for i, apk_path in enumerate(sample_path_list):
sha256_code = os.path.splitext(os.path.basename(apk_path))[0] # utils.get_sha256(apk_path)
save_path = os.path.join(self.naive_data_save_dir, sha256_code + self.file_ext)
if os.path.exists(save_path) and (not self.update):
continue
tasks.append(apk_path)
process_results = pool.apply_async(get_multimod_feature,
args=(apk_path, API_LIST, save_path),
callback=pbar.CallbackForProgressBar)
pool.close()
if process_results:
pbar.DisplayProgressBar(process_results, len(tasks), type='hour')
pool.join()
feature_paths = []
for i, apk_path in enumerate(sample_path_list):
sha256_code = os.path.splitext(os.path.basename(apk_path))[0] # utils.get_sha256(apk_path)
save_path = os.path.join(self.naive_data_save_dir, sha256_code + self.file_ext)
if os.path.exists(save_path):
feature_paths.append(save_path)
else:
warnings.warn("Fail to perform feature extraction for '{}'".format(apk_path))
return feature_paths
def feature_preprocess(self, feature_path_list, gt_labels):
"""
pre-processing the naive data to accommodate the input format of ML algorithms
:param feature_path_list: a list of paths directing to save features. For each apk,
features produced by the method of feature_extraction, 2D list [[feature type 1,...,feature type 5],...,]
:param gt_labels: corresponding ground truth labels
"""
assert len(feature_path_list) == len(gt_labels), 'inconsistent dataset'
vocab_path = os.path.join(self.meta_data_save_dir, 'multimodality.vocab')
if os.path.exists(vocab_path) and not self.update:
vocab_list = self.load_meta_info(vocab_path)
else:
vocab_list = self.get_vocab(feature_path_list,
gt_labels,
self.use_feature_selection,
self.feature_dimension)
# saving
self.save_meta_info(vocab_list, vocab_path)
dataX_list = self.feature_mapping(feature_path_list, vocab_list)
# further processing
scaler_path = os.path.join(self.meta_data_save_dir, 'multimodality.scaler')
scaled_dataX_list = self.data_scaling(dataX_list, scaler_path)
# clustering for last three types of features
cluster_center_path = os.path.join(self.meta_data_save_dir, 'multimodality.center')
if not os.path.exists(cluster_center_path) or self.update:
cluster_centers = []
for i, dataX in enumerate(scaled_dataX_list[2:]): # produce the last three similarity-based feature
center_vec = self.k_means_clustering(dataX,
self.cluster_centers)
cluster_centers.append(center_vec)
# saving
self.save_meta_info(cluster_centers, cluster_center_path)
return
def feature2ipt(self, feature_path_list, labels=None, is_training_set=False):
"""
Mapping features to the input space
"""
# assert self._check_features(features)
vocab_path = os.path.join(self.meta_data_save_dir, 'multimodality.vocab')
vocab_list = self.load_meta_info(vocab_path)
scaler_path = os.path.join(self.meta_data_save_dir, 'multimodality.scaler')
scalers = self.load_meta_info(scaler_path)
cluster_center_path = os.path.join(self.meta_data_save_dir, 'multimodality.center')
centers = self.load_meta_info(cluster_center_path)
dataX_list = self.feature_mapping(feature_path_list, vocab_list)
for i, dataX in enumerate(dataX_list):
dataX_list[i] = scalers[i].transform(dataX)
for i, center in enumerate(centers):
dataX_list[2 + i] = self._get_similarity(dataX_list[2 + i], center, self.similar_threshold)
input_dim = []
for x in dataX_list:
input_dim.append(x.shape[1])
# build dataset
if labels is not None:
data_tf = build_dataset_from_numerical_data(tuple(dataX_list))
y = build_dataset_from_numerical_data(labels)
from tensorflow import data
return data.Dataset.zip((data_tf, y)), input_dim
else:
return dataX_list, input_dim
def save_meta_info(self, data, path):
if self.update or (not os.path.exists(path)):
utils.dump_joblib(data, path)
return
@staticmethod
def load_meta_info(path):
if os.path.exists(path):
return utils.read_joblib(path)
else:
raise ValueError("No such data.")
@staticmethod
def get_vocab(feature_path_list, gt_labels=None, use_feature_selection=False, dim=10000):
"""
build vocabulary for five kinds of feature, including permission/component/environment, string, method api,
method opcodes, shared library, each of which are presented in the 'collections.defaultdict' format
:param feature_path_list: a list of paths redirecting to save features
:param gt_labels: ground truth labels (optional)
:param use_feature_selection: conducting feature extraction or not (False means no, and True means yes)
:param dim: the number of selected feature (optional)
:return: list of vocabularies corresponding to five kinds of feature
"""
from core.feature.multimodality import wrapper_load_features
feature_list = []
n_proc = 1 if multiprocessing.cpu_count() // 2 <= 1 else multiprocessing.cpu_count() // 2
pool = multiprocessing.Pool(n_proc)
for res in pool.imap(wrapper_load_features, feature_path_list):
if not isinstance(res, Exception):
feature_list.append(res)
else:
print(str(res))
assert isinstance(feature_list, list) and len(feature_list) > 0, 'Type: {} and length: {}'.format(
type(feature_list), len(feature_list))
number_of_types = len(feature_list[0])
number_of_samples = len(feature_list)
vocabulary_list = []
for t in range(number_of_types):
c = collections.Counter()
for j in range(number_of_samples):
feature_dict = feature_list[j][t]
for k, v in feature_dict.items():
c[k] += v
if not use_feature_selection:
if len(c) > 0:
vocab, count = zip(*c.items())
else:
vocab = []
else:
if len(c) > 0:
vocab, count = zip(*c.most_common(dim)) # filter out words with low frequency
else:
vocab = []
vocabulary_list.append(list(vocab))
return vocabulary_list
@staticmethod
def feature_mapping(feature_path_list, vocab_list):
"""
mapping feature to numerical representation
:param feature_path_list: a list of paths redirecting to saved features
:param vocab_list: several lists of words
:return: 2D representation
:rtype numpy.ndarray
"""
from core.feature.multimodality import wrapper_load_features
feature_list = []
n_proc = 1 if multiprocessing.cpu_count() // 2 <= 1 else multiprocessing.cpu_count() // 2
pool = multiprocessing.Pool(n_proc)
for res in pool.imap(wrapper_load_features, feature_path_list):
if not isinstance(res, Exception):
feature_list.append(res)
else:
print(str(res))
pool.close()
pool.join()
assert len(feature_list[0]) == len(vocab_list)
number_of_feature_types = len(vocab_list)
representation_list = []
for t in range(number_of_feature_types):
# feature_dict = np.array(feature_list)[:, t]
number_of_samples = len(feature_list)
vocab = vocab_list[t]
M = len(list(vocab))
representation = np.zeros((number_of_samples, M), dtype=np.float32)
dictionary = dict(zip(vocab, range(M)))
for j in range(number_of_samples):
feature_dict = feature_list[j][t]
if len(feature_dict) > 0:
filled_positions = [idx for idx in list(map(dictionary.get, list(feature_dict.keys()))) if
idx is not None]
filled_values = [feature_dict.get(key) for key in list(feature_dict.keys()) if
dictionary.get(key) is not None]
if len(filled_positions) != 0:
representation[j, filled_positions] = filled_values[:]
else:
warnings.warn("Produce zero feature vector.")
representation_list.append(representation)
return representation_list
def data_scaling(self, data_x_list, scalar_saving_path=None):
"""
minmax scaling for numerical feature representations
:param data_x_list: a list of un-normalized feature representation
:param scalar_saving_path:
:return: scaled feature representation
:rtype : list of 2d numpy.ndarray
"""
if os.path.exists(scalar_saving_path) and not self.update:
scalers = self.load_meta_info(scalar_saving_path)
else:
scalers = []
for i, dataX in enumerate(data_x_list):
scaler = MinMaxScaler()
scaler.fit(dataX)
scalers.append(scaler)
self.save_meta_info(scalers, scalar_saving_path)
for i, dataX in enumerate(data_x_list):
data_x_list[i] = scalers[i].transform(dataX)
return data_x_list
@staticmethod
def k_means_clustering(data_x, number_of_cluster_centers=100):
N = data_x.shape[0]
n_clusters = number_of_cluster_centers if number_of_cluster_centers < N else N // 2
kmeans = KMeans(n_clusters=n_clusters,
random_state=0).fit(data_x)
return kmeans.cluster_centers_
@staticmethod
def _get_similarity(data_x, anchor, threshold=0.5):
"""
get similarity matrix
:return: similarity-based feature representation
"""
# The following method of calculating the similarity matrix might be different to the proposal
# in the paper (i.e., Algorithm 2), which is confusing to us.
similar_mat = 1. / np.column_stack(
[np.max(np.square(data_x - center) ** 0.5 + 1, axis=-1) for center in anchor])
return np.greater(similar_mat, threshold).astype(np.float32)
@staticmethod
def _check_features(features):
"""
check the completeness
:param features: a list of features, each item presented in the 'collections.defaultdict' format
:return: True or False
"""
return (isinstance(features, list)) and (len(features) > 0) and (
isinstance(features[0][0], dict))
class DexToImage(FeatureExtraction):
"""
Convert the dex files to a RGB image
"""
def __init__(self,
naive_data_save_dir,
intermediate_save_dir,
file_ext='.jpg',
update=False,
proc_number=2
):
super(DexToImage, self).__init__(naive_data_save_dir,
intermediate_save_dir,
file_ext,
update,
proc_number)
def feature_extraction(self, sample_dir, use_order_features=False):
from core.feature.dex2img import dex2img
sample_path_list = self._check(sample_dir)
pool = multiprocessing.Pool(self.proc_number)
pbar = progressbar_wrapper.ProgressBar()
process_results = []
tasks = []
for i, apk_path in enumerate(sample_path_list):
sha256_code = os.path.splitext(os.path.basename(apk_path))[0] # utils.get_sha256(apk_path)
save_path = os.path.join(self.naive_data_save_dir, sha256_code + self.file_ext)
if os.path.exists(save_path) and (not self.update):
continue
tasks.append(apk_path)
process_results = pool.apply_async(dex2img,
args=(apk_path, save_path),
callback=pbar.CallbackForProgressBar)
pool.close()
if process_results:
pbar.DisplayProgressBar(process_results, len(tasks), type='hour')
pool.join()
result_paths = []
for i, apk_path in enumerate(sample_path_list):
sha256_code = os.path.splitext(os.path.basename(apk_path))[0] # utils.get_sha256(apk_path)
res_path = os.path.join(self.naive_data_save_dir, sha256_code + self.file_ext)
if os.path.exists(res_path):
result_paths.append(res_path)
else:
warnings.warn("Fail to perform feature extraction for '{}'".format(apk_path))
return result_paths
def feature_preprocess(self, feature_path_list, gt_labels):
"""
pre-processing the naive data to accommodate the input format of ML algorithms
:param feature_path_list: a list of paths directing to image files
:param gt_labels: corresponding ground truth labels
"""
return
def feature2ipt(self, feature_path_list, labels=None, is_training_set=False, image_size=[500, 500]):
"""
Mapping features to the input space
"""
image_names = [os.path.basename(path) for path in feature_path_list]
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from core.ensemble.model_hp import train_hparam
if len(image_names) % train_hparam.batch_size == 0:
batches = len(image_names) // train_hparam.batch_size
else:
batches = len(image_names) // train_hparam.batch_size + 1
assert batches >= 0, 'No data. Exit'
img_generator_obj = ImageDataGenerator(rescale=1. / 255)
if is_training_set and labels is not None:
labels_string = [(lambda x: 'malware' if x else 'benware')(label) for label in labels]
w = float(len(labels) - np.sum(labels)) / np.sum(labels)
w = 1. if w <= 1. else w
weights = [(lambda x: w if x else 1.)(label) for label in labels]
img_pd = pd.DataFrame({'image': image_names, 'labels': labels_string, 'sample_weights': weights})
img_generator = img_generator_obj.flow_from_dataframe(img_pd,
directory=self.naive_data_save_dir,
x_col='image',
y_col='labels',
weight_col='sample_weights',
classes=['benware', 'malware'],
target_size=image_size,
class_mode='binary',
shuffle=True,
batch_size=train_hparam.batch_size
)
elif not is_training_set and labels is not None:
labels_string = [(lambda x: 'malware' if x else 'benware')(label) for label in labels]
img_pd = pd.DataFrame({'image': image_names, 'labels': labels_string})
img_generator = img_generator_obj.flow_from_dataframe(img_pd,
directory=self.naive_data_save_dir,
x_col='image',
y_col='labels',
classes=['benware', 'malware'],
target_size=image_size,
class_mode='binary',
shuffle=False,
batch_size=train_hparam.batch_size
)
else:
img_pd = pd.DataFrame({'image': image_names})
img_generator = img_generator_obj.flow_from_dataframe(img_pd,
directory=self.naive_data_save_dir,
x_col='image',
target_size=image_size,
shuffle=False,
class_mode=None,
batch_size=train_hparam.batch_size # lead to
)
def generator():
for _ in range(batches):
yield next(img_generator)
return (build_dataset_from_img_generator(generator,
input_dim=[*image_size, 3],
y=labels,
is_training=is_training_set), [*image_size, 3])
class APISequence(FeatureExtraction):
"""Obtain api sequences based on the function call graph"""
def __init__(self,
naive_data_save_dir,
intermediate_save_dir,
use_feature_selection=True,
ratio=0.25,
file_ext='.seq',
update=False,
proc_number=2
):
"""
initialization
:param naive_data_save_dir: a directory for saving intermediates
:param intermediate_save_dir: a directory for saving meta information
:param use_feature_selection: use feature selection to filtering out entities with high frequencies
:param ratio: resides the range of [0, 1] and denotes a portion of features will be neglected
:param file_ext: file extent
:param update: boolean indicator for recomputing the naive features
:param proc_number: process number
"""
super(APISequence, self).__init__(naive_data_save_dir,
intermediate_save_dir,
file_ext,
update,
proc_number)
self.use_feature_selection = use_feature_selection
self.ratio = ratio
from core.ensemble.model_hp import droidetec_hparam
self.maximum_vocab_size = droidetec_hparam.vocab_size
def feature_extraction(self, sample_dir, use_order_features=False):
""" save the android features and return the saved paths """
from core.feature.apiseq import get_api_sequence
sample_path_list = self._check(sample_dir)
pool = multiprocessing.Pool(self.proc_number)
pbar = progressbar_wrapper.ProgressBar()
process_results = []
tasks = []
for i, apk_path in enumerate(sample_path_list):
sha256_code = os.path.splitext(os.path.basename(apk_path))[0] # utils.get_sha256(apk_path)
save_path = os.path.join(self.naive_data_save_dir, sha256_code + self.file_ext)
if os.path.exists(save_path) and (not self.update):
continue
tasks.append(apk_path)
process_results = pool.apply_async(get_api_sequence,
args=(apk_path, save_path),
callback=pbar.CallbackForProgressBar)
pool.close()
if process_results:
pbar.DisplayProgressBar(process_results, len(tasks), type='hour')
pool.join()
feature_paths = []
for i, apk_path in enumerate(sample_path_list):
sha256_code = os.path.splitext(os.path.basename(apk_path))[0] # utils.get_sha256(apk_path)
save_path = os.path.join(self.naive_data_save_dir, sha256_code + self.file_ext)
if os.path.exists(save_path):
feature_paths.append(save_path)
else:
warnings.warn("Fail to perform feature extraction for '{}'".format(apk_path))
return feature_paths
def feature_preprocess(self, feature_path_list, gt_labels):
"""
pre-processing the naive data to accommodate the input format of ML algorithms
"""
dict_saving_path = os.path.join(self.meta_data_save_dir, 'apiseq.dict')
if not os.path.exists(dict_saving_path) or self.update:
vocab = self.get_vocab(feature_path_list, gt_labels)
dictionary = dict(zip(vocab, range(len(vocab))))
# saving
utils.dump_joblib(dictionary, dict_saving_path)
return
def feature2ipt(self, feature_path_list, labels=None, is_training_set=False):
"""
Mapping features to the input space
"""
dict_saving_path = os.path.join(self.meta_data_save_dir, 'apiseq.dict')
if os.path.exists(dict_saving_path):
dictionary = utils.read_joblib(dict_saving_path)
else:
raise ValueError("No meta data")
from core.ensemble.model_hp import droidetec_hparam
def generator():
if labels is not None:
discrete_features = self.feature_mapping(feature_path_list, dictionary)
assert len(discrete_features) == len(labels), 'inconsistent data vs. corresponding label'
for data, label in zip(*(discrete_features, labels)):
yield data[:droidetec_hparam.max_sequence_length], label
else:
from core.feature.apiseq import wrapper_mapping
for feature_path in feature_path_list:
data = wrapper_mapping([feature_path, dictionary])
if isinstance(data, Exception):
raise ValueError("Cannot load the specified feature:{}".format(
feature_path
))
yield data[:droidetec_hparam.max_sequence_length]
with tempfile.NamedTemporaryFile() as f:
return build_dataset_via_generator(generator, labels, f.name), None
def get_vocab(self, feature_path_list, gt_labels):
"""
create vocabulary based on a list of feature
:param feature_path_list: a list of feature paths
:param gt_labels: ground truth labels
:return: vocabulary
:rtype: list
"""
if self.use_feature_selection:
assert 0. < self.ratio <= 1., 'the ratio should be (0,1]'
from core.feature.apiseq import wrapper_load_feature
c_mal = collections.Counter()
c_ben = collections.Counter()
# has side-effect, consuming lots of local disk
n_proc = 1 if multiprocessing.cpu_count() // 2 <= 1 else multiprocessing.cpu_count() // 2
pool = multiprocessing.Pool(n_proc)
for res, label in zip(pool.imap(wrapper_load_feature, feature_path_list), gt_labels):
if isinstance(res, list):
if label:
c_mal.update(res)
else:
c_ben.update(res)
elif isinstance(res, Exception):
print(str(res))
else:
raise ValueError
pool.close()
pool.join()
if not self.use_feature_selection:
c_mal.update(c_ben)
# c_all = c_mal
else:
api_num_mal = len(c_mal)
api_hf_mal = dict(c_mal.most_common(int(api_num_mal * self.ratio))).keys()
api_num_ben = len(c_ben)
api_hf_ben = dict(c_ben.most_common(int(api_num_ben * self.ratio))).keys()
common_apis = [e for e in api_hf_mal if e in api_hf_ben]
for api in common_apis:
c_mal[api] = 0
c_ben[api] = 0
c_mal.update(c_ben)
logger.info('Filtering out {} apis.'.format(len(common_apis)))
c_all = dict(c_mal.most_common(self.maximum_vocab_size - 1)) # saving a slot for null features
c_all['sos'] = 1
vocab, count = zip(*c_all.items())
return list(vocab)
def feature_mapping(self, feature_path_list, dictionary):
"""
mapping feature to numerical representation
:param feature_path_list: a list of feature paths
:param dictionary: vocabulary -> index
:return: 2D representation
:rtype numpy.ndarray
"""
numerical_features = []
from core.feature.apiseq import wrapper_mapping
from core.ensemble.model_hp import droidetec_hparam
n_proc = 1 if multiprocessing.cpu_count() // 2 <= 1 else multiprocessing.cpu_count() // 2
pool = multiprocessing.Pool(n_proc)
pargs = [(path, dictionary) for path in feature_path_list]
for res, path in zip(pool.imap(wrapper_mapping, pargs), feature_path_list):
if not isinstance(res, Exception):
numerical_features.append(res[:droidetec_hparam.max_sequence_length])
else:
warnings.warn(str(res) + ': ' + path)
pool.close()
pool.join()
return numerical_features
| 44,809 | 43.322453 | 115 | py |
malware-uncertainty | malware-uncertainty-master/core/ensemble/deep_ensemble.py | from os import path
import time
import numpy as np
import tensorflow as tf
from core.ensemble.vanilla import Vanilla
from core.ensemble.model_hp import train_hparam
from core.ensemble.dataset_lib import build_dataset_from_numerical_data
from tools import utils
from config import logging, ErrorHandler
logger = logging.getLogger('ensemble.deep_ensemble')
logger.addHandler(ErrorHandler)
class DeepEnsemble(Vanilla):
def __init__(self,
architecture_type='dnn',
base_model=None,
n_members=5,
model_directory=None,
name='DEEPENSEMBLE'
):
super(DeepEnsemble, self).__init__(architecture_type,
base_model,
n_members,
model_directory,
name)
self.hparam = train_hparam
self.ensemble_type = 'deep_ensemble'
class WeightedDeepEnsemble(Vanilla):
def __init__(self,
architecture_type='dnn',
base_model=None,
n_members=5,
model_directory=None,
name='WEIGTHEDDEEPENSEMBLE'
):
super(WeightedDeepEnsemble, self).__init__(architecture_type,
base_model,
n_members,
model_directory,
name)
self.hparam = train_hparam
self.ensemble_type = 'deep_ensemble'
self.weight_modular = None
def get_weight_modular(self):
class Simplex(tf.keras.constraints.Constraint):
def __call__(self, w):
return tf.math.softmax(w - tf.math.reduce_max(w), axis=0)
inputs = tf.keras.Input(shape=(self.n_members,))
outs = tf.keras.layers.Dense(1, use_bias=False, activation=None, kernel_constraint=Simplex(), name='simplex')(
inputs)
return tf.keras.Model(inputs=inputs, outputs=outs)
def predict(self, x, use_prob=False):
""" conduct prediction """
self.base_model = None
self.weight_modular = None
self.weights_list = []
self._optimizers_dict = []
self.load_ensemble_weights()
output_list = []
start_time = time.time()
for base_model in self.model_generator():
if isinstance(x, tf.data.Dataset):
output_list.append(base_model.predict(x, verbose=1))
elif isinstance(x, (np.ndarray, list)):
output_list.append(base_model.predict(x, batch_size=self.hparam.batch_size, verbose=1))
else:
raise ValueError
total_time = time.time() - start_time
logger.info('Inference costs {} seconds.'.format(total_time))
assert self.weight_modular is not None
output = self.weight_modular(np.hstack(output_list)).numpy()
if not use_prob:
return np.stack(output_list, axis=1), self.weight_modular.get_layer('simplex').get_weights()
else:
return output
def fit(self, train_set, validation_set=None, input_dim=None, **kwargs):
"""
fit the ensemble by producing a lists of model weights
:param train_set: tf.data.Dataset, the type shall accommodate to the input format of Tensorflow models
:param validation_set: validation data, optional
:param input_dim: integer or list, input dimension except for the batch size
"""
# training preparation
if self.base_model is None:
self.build_model(input_dim=input_dim)
if self.weight_modular is None:
self.weight_modular = self.get_weight_modular()
self.base_model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=self.hparam.learning_rate,
clipvalue=self.hparam.clipvalue),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy()],
)
self.weight_modular.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=self.hparam.learning_rate,
clipvalue=self.hparam.clipvalue),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy()],
)
# training
logger.info("hyper-parameters:")
logger.info(dict(self.hparam._asdict()))
logger.info("...training start!")
best_val_accuracy = 0.
total_time = 0.
for epoch in range(self.hparam.n_epochs):
for member_idx in range(self.n_members):
if member_idx < len(self.weights_list): # loading former weights
self.base_model.set_weights(self.weights_list[member_idx])
self.base_model.optimizer.set_weights(self._optimizers_dict[member_idx])
elif member_idx == 0:
pass # do nothing
else:
self.reinitialize_base_model()
msg = 'Epoch {}/{}, member {}/{}, and {} member(s) in list'.format(epoch + 1,
self.hparam.n_epochs, member_idx + 1,
self.n_members,
len(self.weights_list))
print(msg)
start_time = time.time()
self.base_model.fit(train_set,
epochs=epoch + 1,
initial_epoch=epoch,
validation_data=validation_set
)
self.update_weights(member_idx,
self.base_model.get_weights(),
self.base_model.optimizer.get_weights())
end_time = time.time()
total_time += end_time - start_time
# training weight modular
msg = "train the weight modular at epoch {}/{}"
print(msg.format(epoch, self.hparam.n_epochs))
start_time = time.time()
history = self.fit_weight_modular(train_set, validation_set, epoch)
end_time = time.time()
total_time += end_time - start_time
# saving
logger.info('Training ensemble costs {} in total (including validation).'.format(total_time))
train_acc = history.history['binary_accuracy'][0]
val_acc = history.history['val_binary_accuracy'][0]
msg = 'Epoch {}/{}: training accuracy {:.5f}, validation accuracy {:.5f}.'.format(
epoch + 1, self.hparam.n_epochs, train_acc, val_acc
)
logger.info(msg)
if (epoch + 1) % self.hparam.interval == 0:
if val_acc > best_val_accuracy:
self.save_ensemble_weights()
best_val_accuracy = val_acc
msg = '\t The best validation accuracy is {:.5f}, obtained at epoch {}/{}'.format(
best_val_accuracy, epoch + 1, self.hparam.n_epochs
)
logger.info(msg)
return
def fit_weight_modular(self, train_set, validation_set, epoch):
"""
fit weight modular
:param train_set: training set
:param validation_set: validation set
:param epoch: integer, training epoch
:return: None
"""
# obtain data
def get_data(x_y_set):
tsf_x = []
tsf_y = []
for _x, _y in x_y_set:
_x_list = []
for base_model in self.model_generator():
_x_pred = base_model(_x)
_x_list.append(_x_pred)
tsf_x.append(np.hstack(_x_list))
tsf_y.append(_y)
return np.vstack(tsf_x), np.concatenate(tsf_y)
transform_train_set = build_dataset_from_numerical_data(get_data(train_set))
transform_val_set = build_dataset_from_numerical_data(get_data(validation_set))
history = self.weight_modular.fit(transform_train_set,
epochs=epoch + 1,
initial_epoch=epoch,
validation_data=transform_val_set
)
return history
def save_ensemble_weights(self):
if not path.exists(self.save_dir):
utils.mkdir(self.save_dir)
# save model configuration
try:
config = self.base_model.to_json()
utils.dump_json(config, path.join(self.save_dir,
self.architecture_type + '.json')) # lightweight method for saving model configurature
except Exception as e:
pass
finally:
if not path.exists(path.join(self.save_dir, self.architecture_type)):
utils.mkdir(path.join(self.save_dir, self.architecture_type))
self.base_model.save(path.join(self.save_dir, self.architecture_type))
print("Save the model configuration to directory {}".format(self.save_dir))
# save model weights
utils.dump_joblib(self.weights_list, path.join(self.save_dir, self.architecture_type + '.model'))
utils.dump_joblib(self._optimizers_dict, path.join(self.save_dir, self.architecture_type + '.model.metadata'))
print("Save the model weights to directory {}".format(self.save_dir))
# save weight modular
self.weight_modular.save(path.join(self.save_dir, self.architecture_type + '_weight_modular'))
print("Save the weight modular weights to directory {}".format(self.save_dir))
return
def load_ensemble_weights(self):
if path.exists(path.join(self.save_dir, self.architecture_type + '.json')):
config = utils.load_json(path.join(self.save_dir, self.architecture_type + '.json'))
self.base_model = tf.keras.models.model_from_json(config)
elif path.exists(path.join(self.save_dir, self.architecture_type)):
self.base_model = tf.keras.models.load_model(path.join(self.save_dir, self.architecture_type))
else:
logger.error("File not found: ".format(path.join(self.save_dir, self.architecture_type + '.json')))
raise FileNotFoundError
print("Load model config from {}.".format(self.save_dir))
if path.exists(path.join(self.save_dir, self.architecture_type + '.model')):
self.weights_list = utils.read_joblib(path.join(self.save_dir, self.architecture_type + '.model'))
else:
logger.error("File not found: ".format(path.join(self.save_dir, self.architecture_type + '.model')))
raise FileNotFoundError
print("Load model weights from {}.".format(self.save_dir))
if path.exists(path.join(self.save_dir, self.architecture_type + '.model.metadata')):
self._optimizers_dict = utils.read_joblib(
path.join(self.save_dir, self.architecture_type + '.model.metadata'))
else:
self._optimizers_dict = [None] * len(self.weights_list)
if path.exists(path.join(self.save_dir, self.architecture_type + '_weight_modular')):
self.weight_modular = tf.keras.models.load_model(
path.join(self.save_dir, self.architecture_type + '_weight_modular'))
return
| 11,862 | 44.98062 | 133 | py |
malware-uncertainty | malware-uncertainty-master/core/ensemble/anchor_ensemble.py | import os.path as path
import time
import tensorflow as tf
import numpy as np
from core.ensemble.vanilla import Vanilla
from core.ensemble.model_hp import train_hparam, anchor_hparam
from core.ensemble.model_lib import model_builder
from tools import utils
from config import logging
logger = logging.getLogger('ensemble.vanilla')
class AnchorEnsemble(Vanilla):
def __init__(self,
architecture_type='dnn',
base_model=None,
n_members=2,
model_directory=None,
name='ANCHOR'):
"""
initialization
:param architecture_type: the type of base model
:param base_model: an object of base model
:param n_members: number of base models
:param model_directory: a folder for saving ensemble weights
"""
super(AnchorEnsemble, self).__init__(architecture_type, base_model, n_members, model_directory)
self.hparam = utils.merge_namedtuples(train_hparam, anchor_hparam)
self.ensemble_type = 'anchor'
self.name = name.lower()
self.save_dir = path.join(self.model_directory, self.name)
def build_model(self, input_dim=None):
"""
Build an ensemble model -- only the homogeneous structure is considered
:param input_dim: integer or list, input dimension shall be set in some cases under eager mode
"""
callable_graph = model_builder(self.architecture_type)
@callable_graph(input_dim)
def _builder():
seed = np.random.choice(self.hparam.random_seed)
return utils.produce_layer(self.ensemble_type,
scale=self.hparam.scale,
batch_size=self.hparam.batch_size,
seed=seed)
self.base_model = _builder()
def model_generator(self):
try:
for m in range(self.n_members):
self.base_model = None
self.load_ensemble_weights(m)
yield self.base_model
except Exception as e:
raise Exception("Cannot load model:{}.".format(str(e)))
def fit(self, train_set, validation_set=None, input_dim=None, **kwargs):
"""
fit the ensemble by producing a lists of model weights
:param train_set: tf.data.Dataset, the type shall accommodate to the input format of Tensorflow models
:param validation_set: validation data, optional
:param input_dim: integer or list, input dimension except for the batch size
"""
# training
logger.info("hyper-parameters:")
logger.info(dict(self.hparam._asdict()))
logger.info("...training start!")
np.random.seed(self.hparam.random_seed)
train_set = train_set.shuffle(buffer_size=100, reshuffle_each_iteration=True)
for member_idx in range(self.n_members):
self.base_model = None
self.build_model(input_dim=input_dim)
self.base_model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=self.hparam.learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy()],
)
for epoch in range(self.hparam.n_epochs):
total_time = 0.
msg = 'Epoch {}/{}, and member {}/{}'.format(epoch + 1,
self.hparam.n_epochs, member_idx + 1,
self.n_members)
print(msg)
start_time = time.time()
self.base_model.fit(train_set,
epochs=epoch + 1,
initial_epoch=epoch,
validation_data=validation_set
)
end_time = time.time()
total_time += end_time - start_time
# saving
logger.info('Training ensemble costs {} seconds at this epoch'.format(total_time))
if (epoch + 1) % self.hparam.interval == 0:
self.save_ensemble_weights(member_idx)
def save_ensemble_weights(self, member_idx=0):
if not path.exists(path.join(self.save_dir, self.architecture_type + '_{}'.format(member_idx))):
utils.mkdir(path.join(self.save_dir, self.architecture_type + '_{}'.format(member_idx)))
# save model configuration
self.base_model.save(path.join(self.save_dir, self.architecture_type + '_{}'.format(member_idx)))
print("Save the model to directory {}".format(self.save_dir))
def load_ensemble_weights(self, member_idx=0):
if path.exists(path.join(self.save_dir, self.architecture_type + '_{}'.format(member_idx))):
self.base_model = tf.keras.models.load_model(
path.join(self.save_dir, self.architecture_type + '_{}'.format(member_idx)))
def get_n_members(self):
return self.n_members
def update_weights(self, member_idx, model_weights, optimizer_weights=None):
raise NotImplementedError
| 5,238 | 42.658333 | 110 | py |
malware-uncertainty | malware-uncertainty-master/core/ensemble/vanilla.py | import os.path as path
import time
import tensorflow as tf
import numpy as np
from core.ensemble.ensemble import Ensemble
from core.ensemble.model_hp import train_hparam, finetuning_hparam
from core.ensemble.model_lib import model_builder
from tools import utils
from config import logging, ErrorHandler
logger = logging.getLogger('ensemble.vanilla')
logger.addHandler(ErrorHandler)
class Vanilla(Ensemble):
""" vanilla model, i.e., the so-called ensemble just has a single model """
def __init__(self, architecture_type='dnn', base_model=None, n_members=1, model_directory=None, name='VANILLA'):
"""
initialization
:param architecture_type: the type of base model
:param base_model: an object of base model
:param n_members: number of base models
:param model_directory: a folder for saving ensemble weights
"""
super(Vanilla, self).__init__(architecture_type, base_model, n_members, model_directory)
self.hparam = train_hparam
self.ensemble_type = 'vanilla'
self.name = name.lower()
self.save_dir = path.join(self.model_directory, self.name)
def build_model(self, input_dim=None):
"""
Build an ensemble model -- only the homogeneous structure is considered
:param input_dim: integer or list, input dimension shall be set in some cases under eager mode
"""
callable_graph = model_builder(self.architecture_type)
@callable_graph(input_dim)
def _builder():
return utils.produce_layer(self.ensemble_type)
self.base_model = _builder()
return
def predict(self, x, use_prob=False):
""" conduct prediction """
self.base_model = None
self.weights_list = []
self._optimizers_dict = []
self.load_ensemble_weights()
output_list = []
start_time = time.time()
for base_model in self.model_generator():
if isinstance(x, tf.data.Dataset):
output_list.append(base_model.predict(x, verbose=1))
elif isinstance(x, (np.ndarray, list)):
output_list.append(base_model.predict(x, verbose=1, batch_size=self.hparam.batch_size))
else:
raise ValueError
total_time = time.time() - start_time
logger.info('Inference costs {} seconds.'.format(total_time))
if not use_prob:
return np.stack(output_list, axis=1)
else:
return np.mean(np.stack(output_list, axis=1), axis=1)
def evaluate(self, x, gt_labels, threshold=0.5, name='test'):
"""
get some statistical values
:param x: tf.data.Dataset object
:param gt_labels: ground truth labels
:param threshold: float value between 0 and 1, to decide the predicted label
:return: None
"""
x_prob = self.predict(x, use_prob=True)
x_pred = (x_prob >= threshold).astype(np.int32)
# metrics
from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, balanced_accuracy_score
accuracy = accuracy_score(gt_labels, x_pred)
b_accuracy = balanced_accuracy_score(gt_labels, x_pred)
MSG = "The accuracy on the {} dataset is {:.5f}%"
logger.info(MSG.format(name, accuracy * 100))
MSG = "The balanced accuracy on the {} dataset is {:.5f}%"
logger.info(MSG.format(name, b_accuracy * 100))
is_single_class = False
if np.all(gt_labels == 1.) or np.all(gt_labels == 0.):
is_single_class = True
if not is_single_class:
tn, fp, fn, tp = confusion_matrix(gt_labels, x_pred).ravel()
fpr = fp / float(tn + fp)
fnr = fn / float(tp + fn)
f1 = f1_score(gt_labels, x_pred, average='binary')
print("Other evaluation metrics we may need:")
MSG = "False Negative Rate (FNR) is {:.5f}%, False Positive Rate (FPR) is {:.5f}%, F1 score is {:.5f}%"
logger.info(MSG.format(fnr * 100, fpr * 100, f1 * 100))
return
def model_generator(self):
try:
if len(self.weights_list) <= 0:
self.load_ensemble_weights()
except Exception as e:
raise Exception("Cannot load model weights:{}.".format(str(e)))
for i, weights in enumerate(self.weights_list):
self.base_model.set_weights(weights=weights)
# if i in self._optimizers_dict and self.base_model.optimizer is not None:
# self.base_model.optimizer.set_weights(self._optimizers_dict[i])
yield self.base_model
def fit(self, train_set, validation_set=None, input_dim=None, **kwargs):
"""
fit the ensemble by producing a lists of model weights
:param train_set: tf.data.Dataset, the type shall accommodate to the input format of Tensorflow models
:param validation_set: validation data, optional
:param input_dim: integer or list, input dimension except for the batch size
"""
# training preparation
if self.base_model is None:
self.build_model(input_dim=input_dim)
self.base_model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=self.hparam.learning_rate,
clipvalue=self.hparam.clipvalue),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy()],
)
# training
logger.info("hyper-parameters:")
logger.info(dict(self.hparam._asdict()))
logger.info("The number of trainable variables: {}".format(len(self.base_model.trainable_variables)))
logger.info("...training start!")
best_val_accuracy = 0.
total_time = 0.
for epoch in range(self.hparam.n_epochs):
train_acc = 0.
val_acc = 0.
for member_idx in range(self.n_members):
if member_idx < len(self.weights_list): # loading former weights
self.base_model.set_weights(self.weights_list[member_idx])
self.base_model.optimizer.set_weights(self._optimizers_dict[member_idx])
elif member_idx == 0:
pass # do nothing
else:
self.reinitialize_base_model()
msg = 'Epoch {}/{}, member {}/{}, and {} member(s) in list'.format(epoch + 1,
self.hparam.n_epochs, member_idx + 1,
self.n_members,
len(self.weights_list))
print(msg)
start_time = time.time()
history = self.base_model.fit(train_set,
epochs=epoch + 1,
initial_epoch=epoch,
validation_data=validation_set
)
train_acc += history.history['binary_accuracy'][0]
val_acc += history.history['val_binary_accuracy'][0]
self.update_weights(member_idx,
self.base_model.get_weights(),
self.base_model.optimizer.get_weights())
end_time = time.time()
total_time += end_time - start_time
# saving
logger.info('Training ensemble costs {} seconds in total (including validation).'.format(total_time))
train_acc = train_acc / self.n_members
val_acc = val_acc / self.n_members
msg = 'Epoch {}/{}: training accuracy {:.5f}, validation accuracy {:.5f}.'.format(
epoch + 1, self.hparam.n_epochs, train_acc, val_acc
)
logger.info(msg)
if (epoch + 1) % self.hparam.interval == 0:
if val_acc > best_val_accuracy:
self.save_ensemble_weights()
best_val_accuracy = val_acc
msg = '\t The best validation accuracy is {:.5f}, obtained at epoch {}/{}'.format(
best_val_accuracy, epoch + 1, self.hparam.n_epochs
)
logger.info(msg)
return
def fit_finetuning(self, train_set, validation_set=None, input_dim=None, **kwargs):
"""
just for experiments of r2d2 model
:param train_set: tf.data.Dataset, the type shall accommodate to the input format of Tensorflow models
:param validation_set: validation data, optional
:param input_dim: integer or list, input dimension except for the batch size
"""
# training preparation
if self.base_model is None:
self.build_model(input_dim=input_dim)
# training
logger.info("hyper-parameters:")
logger.info(dict(finetuning_hparam._asdict()))
logger.info("...training start!")
def train(n_epochs, learning_rate, ft=False):
logger.info("The number of trainable variables: {}".format(len(self.base_model.trainable_variables)))
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate, clipvalue=self.hparam.clipvalue)
if ft:
optimizer = tf.keras.optimizers.RMSprop(learning_rate=learning_rate, clipvalue=self.hparam.clipvalue)
self.base_model.compile(
optimizer=optimizer,
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy()],
)
if ft:
self.load_ensemble_weights()
best_val_accuracy = 0.
total_time = 0.
for epoch in range(n_epochs):
train_acc = 0.
val_acc = 0.
for member_idx in range(self.n_members):
if member_idx < len(self.weights_list): # loading former weights
self.base_model.set_weights(self.weights_list[member_idx])
if epoch > 0:
self.base_model.optimizer.set_weights(self._optimizers_dict[member_idx])
elif member_idx == 0:
pass # do nothing
else:
self.reinitialize_base_model()
msg = 'Epoch {}/{}, member {}/{}, and {} member(s) in list'.format(epoch + 1,
n_epochs, member_idx + 1,
self.n_members,
len(self.weights_list))
print(msg)
start_time = time.time()
history = self.base_model.fit(train_set,
epochs=epoch + 1,
initial_epoch=epoch,
validation_data=validation_set
)
train_acc += history.history['binary_accuracy'][0]
val_acc += history.history['val_binary_accuracy'][0]
self.update_weights(member_idx,
self.base_model.get_weights(),
self.base_model.optimizer.get_weights())
end_time = time.time()
total_time += end_time - start_time
# saving
logger.info('Training ensemble costs {} in total (including validation).'.format(total_time))
train_acc = train_acc / self.n_members
val_acc = val_acc / self.n_members
msg = 'Epoch {}/{}: training accuracy {:.5f}, validation accuracy {:.5f}.'.format(
epoch + 1, self.hparam.n_epochs, train_acc, val_acc
)
logger.info(msg)
if (epoch + 1) % self.hparam.interval == 0:
if val_acc > best_val_accuracy:
self.save_ensemble_weights()
best_val_accuracy = val_acc
msg = '\t The best validation accuracy is {:.5f}, obtained at epoch {}/{}'.format(
best_val_accuracy, epoch + 1, self.hparam.n_epochs
)
logger.info(msg)
train(finetuning_hparam.n_epochs, finetuning_hparam.learning_rate)
logger.info("...training finished!")
# finetuning:
for layer in self.base_model.layers[-finetuning_hparam.unfreezed_layers:]:
layer.trainable = True
logger.info("...fine-tuning start!")
train(finetuning_hparam.n_epochs_ft, finetuning_hparam.learning_rate_ft, True)
logger.info("...fine-tuning finished!")
return
def update_weights(self, member_idx, model_weights, optimizer_weights=None):
if member_idx < len(self.weights_list):
self.weights_list[member_idx] = model_weights
self._optimizers_dict[member_idx] = optimizer_weights
else:
# append the weights at the rear of list
assert len(self.weights_list) == len(self._optimizers_dict)
self.weights_list.append(model_weights)
set_idx = len(self.weights_list) - 1
self._optimizers_dict[set_idx] = optimizer_weights
return
def save_ensemble_weights(self):
# if not path.exists(self.save_dir):
# utils.mkdir(self.save_dir)
# # save model configuration
# try:
# config = self.base_model.to_json()
# utils.dump_json(config, path.join(self.save_dir,
# self.architecture_type + '.json')) # lightweight method for saving model configurature
# except Exception as e:
# pass
# finally:
if not path.exists(path.join(self.save_dir, self.architecture_type)):
utils.mkdir(path.join(self.save_dir, self.architecture_type))
self.base_model.save(path.join(self.save_dir, self.architecture_type))
print("Save the model configuration to directory {}".format(self.save_dir))
# save model weights
utils.dump_joblib(self.weights_list, path.join(self.save_dir, self.architecture_type + '.model'))
utils.dump_joblib(self._optimizers_dict, path.join(self.save_dir, self.architecture_type + '.model.metadata'))
print("Save the model weights to directory {}".format(self.save_dir))
return
def load_ensemble_weights(self):
# if path.exists(path.join(self.save_dir, self.architecture_type + '.json')):
# config = utils.load_json(path.join(self.save_dir, self.architecture_type + '.json'))
# self.base_model = tf.keras.models.model_from_json(config)
if path.exists(path.join(self.save_dir, self.architecture_type)):
self.base_model = tf.keras.models.load_model(path.join(self.save_dir, self.architecture_type))
else:
logger.error("File not found: ".format(path.join(self.save_dir, self.architecture_type + '.json')))
raise FileNotFoundError
print("Load model config from {}.".format(self.save_dir))
if path.exists(path.join(self.save_dir, self.architecture_type + '.model')):
self.weights_list = utils.read_joblib(path.join(self.save_dir, self.architecture_type + '.model'))
else:
logger.error("File not found: ".format(path.join(self.save_dir, self.architecture_type + '.model')))
raise FileNotFoundError
print("Load model weights from {}.".format(self.save_dir))
if path.exists(path.join(self.save_dir, self.architecture_type + '.model.metadata')):
self._optimizers_dict = utils.read_joblib(
path.join(self.save_dir, self.architecture_type + '.model.metadata'))
else:
self._optimizers_dict = [None] * len(self.weights_list)
return
def get_n_members(self):
return len(self.weights_list)
def reinitialize_base_model(self):
new_weights = []
for w in self.base_model.weights:
if w.trainable:
if '/kernel' in w.name: # default name
new_w = utils.glorot_uniform(w.numpy().shape)
elif '/recurrent_kernel' in w.name:
initilizer = tf.keras.initializers.Orthogonal()
new_w = initilizer(w.numpy().shape).numpy()
elif '/bias' in w.name:
new_w = utils.glorot_uniform(w.numpy().shape)
else:
new_w = utils.glorot_uniform(w.numpy().shape)
else:
new_w = w.numpy()
new_weights.append(new_w)
self.base_model.set_weights(new_weights)
return
def gradient_loss_wrt_input(self, x, y=None):
if self.base_model is None:
raise ValueError("A learned model is expected. Please try load_ensemble_weights() first")
# we set y[...]=1 by default
y = np.ones(shape=x.shape[0], dtype=np.int64)
binary_ce = tf.losses.binary_crossentropy
grad = 0.
for model_fn in self.model_generator():
with tf.GradientTape() as g:
g.watch(x)
loss = binary_ce(y, model_fn(x))
grad += g.gradient(loss, x)
return grad
| 17,864 | 46.387268 | 135 | py |
malware-uncertainty | malware-uncertainty-master/core/ensemble/bayesian_ensemble.py | import time
import tensorflow as tf
from core.ensemble.vanilla import model_builder
from core.ensemble.mc_dropout import MCDropout
from core.ensemble.model_hp import train_hparam, bayesian_ensemble_hparam
from config import logging, ErrorHandler
from tools import utils
logger = logging.getLogger('ensemble.bayesian_ensemble')
logger.addHandler(ErrorHandler)
class BayesianEnsemble(MCDropout):
def __init__(self,
architecture_type='dnn',
base_model=None,
n_members=1,
model_directory=None,
name='BAYESIAN_ENSEMBLE'
):
super(BayesianEnsemble, self).__init__(architecture_type,
base_model,
n_members,
model_directory,
name)
self.hparam = utils.merge_namedtuples(train_hparam, bayesian_ensemble_hparam)
self.ensemble_type = 'bayesian'
def build_model(self, input_dim=None, scaler=1. / 10000):
"""
Build an ensemble model -- only the homogeneous structure is considered
:param input_dim: integer or list, input dimension shall be set in some cases under eager mode
:param scaler: float value in the rage of [0, 1], weighted kl divergence
"""
callable_graph = model_builder(self.architecture_type)
@callable_graph(input_dim)
def _builder():
return utils.produce_layer(self.ensemble_type, kl_scaler=scaler)
self.base_model = _builder()
return
def fit(self, train_set, validation_set=None, input_dim=None, **kwargs):
"""
fit the ensemble by producing a lists of model weights
:param train_set: tf.data.Dataset, the type shall accommodate to the input format of Tensorflow models
:param validation_set: validation data, optional
:param input_dim: integer or list, input dimension except for the batch size
"""
# training preparation
if self.base_model is None:
# scaler = 1. / (len(list(train_set)) * self.hparam.batch_size) # time-consuming
scaler = 1. / 50000.
self.build_model(input_dim=input_dim, scaler=scaler)
self.base_model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=self.hparam.learning_rate,
clipvalue=self.hparam.clipvalue),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy()],
experimental_run_tf_function=False
)
# training
logger.info("hyper-parameters:")
logger.info(dict(self.hparam._asdict()))
logger.info("...training start!")
best_val_accuracy = 0.
total_time = 0.
for epoch in range(self.hparam.n_epochs):
train_acc = 0.
val_acc = 0.
for member_idx in range(self.n_members):
if member_idx < len(self.weights_list): # loading former weights
self.base_model.set_weights(self.weights_list[member_idx])
self.base_model.optimizer.set_weights(self._optimizers_dict[member_idx])
elif member_idx == 0:
pass # do nothing
else:
self.reinitialize_base_model()
msg = 'Epoch {}/{}, member {}/{}, and {} member(s) in list'.format(epoch + 1,
self.hparam.n_epochs,
member_idx + 1,
self.n_members,
len(self.weights_list))
print(msg)
start_time = time.time()
history = self.base_model.fit(train_set,
epochs=epoch + 1,
initial_epoch=epoch,
validation_data=validation_set
)
train_acc += history.history['binary_accuracy'][0]
val_acc += history.history['val_binary_accuracy'][0]
self.update_weights(member_idx,
self.base_model.get_weights(),
self.base_model.optimizer.get_weights())
end_time = time.time()
total_time += end_time - start_time
# saving
logger.info('Training ensemble costs {} in total (including validation).'.format(total_time))
train_acc = train_acc / self.n_members
val_acc = val_acc / self.n_members
msg = 'Epoch {}/{}: training accuracy {:.5f}, validation accuracy {:.5f}.'.format(
epoch + 1, self.hparam.n_epochs, train_acc, val_acc
)
logger.info(msg)
if (epoch + 1) % self.hparam.interval == 0:
if val_acc >= best_val_accuracy:
self.save_ensemble_weights()
best_val_accuracy = val_acc
msg = '\t The best validation accuracy is {:.5f}, obtained at epoch {}/{}'.format(
best_val_accuracy, epoch + 1, self.hparam.n_epochs
)
logger.info(msg)
return
| 5,667 | 44.709677 | 110 | py |
malware-uncertainty | malware-uncertainty-master/core/ensemble/model_lib.py | """ This script is for building model graph"""
import tensorflow as tf
from tools import utils
from config import logging, ErrorHandler
logger = logging.getLogger('core.ensemble.model_lib')
logger.addHandler(ErrorHandler)
def model_builder(architecture_type='dnn'):
assert architecture_type in model_name_type_dict, 'models are {}'.format(','.join(model_name_type_dict.keys()))
return model_name_type_dict[architecture_type]
def _change_scaler_to_list(scaler):
if not isinstance(scaler, (list, tuple)):
return [scaler]
else:
return scaler
def _dnn_graph(input_dim=None, use_mc_dropout=False):
"""
The deep neural network based malware detector.
The implement is based on the paper, entitled ``Adversarial Examples for Malware Detection'',
which can be found here: http://patrickmcdaniel.org/pubs/esorics17.pdf
We slightly change the model architecture by reducing the number of neurons at the last layer to one.
"""
input_dim = _change_scaler_to_list(input_dim)
from core.ensemble.model_hp import dnn_hparam
logger.info(dict(dnn_hparam._asdict()))
def wrapper(func):
def graph():
Dense, _1, _2, _3 = func()
model = tf.keras.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=(input_dim[0],)))
for units in dnn_hparam.hidden_units:
model.add(Dense(units, activation=dnn_hparam.activation))
if use_mc_dropout:
model.add(tf.keras.layers.Dense(dnn_hparam.output_dim, activation=tf.nn.sigmoid))
else:
model.add(tf.keras.layers.Dropout(dnn_hparam.dropout_rate))
model.add(Dense(dnn_hparam.output_dim, activation=tf.nn.sigmoid))
return model
return graph
return wrapper
def _text_cnn_graph(input_dim=None, use_mc_dropout=False):
"""
deep android malware detection
The implement is based on the paper, entitled ``Deep Android Malware Detection'',
which can be found here: https://dl.acm.org/doi/10.1145/3029806.3029823
"""
input_dim = _change_scaler_to_list(input_dim) # dynamical input shape is permitted
from core.ensemble.model_hp import text_cnn_hparam
logger.info(dict(text_cnn_hparam._asdict()))
def wrapper(func):
def graph():
Dense, Conv2D, _1, _2 = func()
class TextCNN(tf.keras.models.Model):
def __init__(self):
super(TextCNN, self).__init__()
self.embedding = tf.keras.layers.Embedding(text_cnn_hparam.vocab_size,
text_cnn_hparam.n_embedding_dim)
self.spatial_dropout = tf.keras.layers.SpatialDropout2D(rate=text_cnn_hparam.dropout_rate)
self.conv = Conv2D(text_cnn_hparam.n_conv_filters, text_cnn_hparam.kernel_size,
activation=text_cnn_hparam.activation)
self.conv_dropout = tf.keras.layers.Dropout(rate=text_cnn_hparam.dropout_rate)
self.pooling = tf.keras.layers.GlobalMaxPool2D() # produce a fixed length vector
self.denses = [Dense(neurons, activation='relu') for neurons in text_cnn_hparam.hidden_units]
self.dropout = tf.keras.layers.Dropout(text_cnn_hparam.dropout_rate)
if use_mc_dropout:
self.d_out = tf.keras.layers.Dense(text_cnn_hparam.output_dim, activation=tf.nn.sigmoid)
else:
self.d_out = Dense(text_cnn_hparam.output_dim, activation=tf.nn.sigmoid)
def call(self, x, training=False):
embed_code = self.embedding(x)
# batch_size, seq_length, embedding_dim, 1. Note: seq_length >= conv_kernel_size
embed_code = tf.expand_dims(embed_code, axis=-1)
if text_cnn_hparam.use_spatial_dropout:
embed_code = self.spatial_dropout(embed_code, training=training)
conv_x = self.conv(embed_code)
if text_cnn_hparam.use_conv_dropout:
conv_x = self.conv_dropout(conv_x)
flatten_x = self.pooling(conv_x)
for i, dense in enumerate(self.denses):
flatten_x = dense(flatten_x)
if not use_mc_dropout:
flatten_x = self.dropout(flatten_x, training=training)
return self.d_out(flatten_x)
return TextCNN()
return graph
return wrapper
def _multimodalitynn(input_dim=None, use_mc_dropout=False):
"""
A Multimodal Deep Learning Method for Android Malware Detection Using Various Features
The implement is based on our understanding of the paper, entitled
``A Multimodal Deep Learning Method for Android Malware Detection Using Various Features'':
@ARTICLE{8443370,
author={T. {Kim} and B. {Kang} and M. {Rho} and S. {Sezer} and E. G. {Im}},
journal={IEEE Transactions on Information Forensics and Security},
title={A Multimodal Deep Learning Method for Android Malware Detection Using Various Features},
year={2019},
volume={14},
number={3},
pages={773-788},}
"""
input_dim = _change_scaler_to_list(input_dim)
assert isinstance(input_dim, (list, tuple)), 'a list of input dimensions are mandatory.'
from core.ensemble.model_hp import multimodalitynn_hparam
assert len(input_dim) == multimodalitynn_hparam.n_modalities, 'Expected input number {}, but got {}'.format(
multimodalitynn_hparam.n_modalities, len(input_dim))
logger.info(dict(multimodalitynn_hparam._asdict()))
def wrapper(func):
def graph():
input_layers = []
Dense, _1, _2, _3 = func()
for idx, header in enumerate(range(multimodalitynn_hparam.n_modalities)):
input_layers.append(
tf.keras.Input(input_dim[idx], name='HEADER_{}'.format(idx + 1))
)
x_initial_out = []
for x in input_layers:
for units in multimodalitynn_hparam.initial_hidden_units:
x = Dense(units, activation=multimodalitynn_hparam.activation)(x)
x_initial_out.append(x)
x_out = tf.keras.layers.concatenate(x_initial_out)
for units in multimodalitynn_hparam.hidden_units:
x_out = Dense(units, activation=multimodalitynn_hparam.activation)(x_out)
if use_mc_dropout:
out = tf.keras.layers.Dense(multimodalitynn_hparam.output_dim, activation=tf.nn.sigmoid)(x_out)
else:
out = tf.keras.layers.Dropout(rate=multimodalitynn_hparam.dropout_rate)(x_out)
out = Dense(multimodalitynn_hparam.output_dim, activation=tf.nn.sigmoid)(out)
return tf.keras.Model(inputs=input_layers, outputs=out)
return graph
return wrapper
def _r2d2(input_dim=None, use_mc_dropout=False):
"""
R2-D2: ColoR-inspired Convolutional NeuRal Network (CNN)-based AndroiD Malware Detections
The implement is based on our understanding of the paper, entitled
``R2-D2: ColoR-inspired Convolutional NeuRal Network (CNN)-based AndroiD Malware Detections'':
@INPROCEEDINGS{8622324,
author={T. H. {Huang} and H. {Kao}},
booktitle={2018 IEEE International Conference on Big Data (Big Data)},
title={R2-D2: ColoR-inspired Convolutional NeuRal Network (CNN)-based AndroiD Malware Detections},
year={2018},
volume={},
number={},
pages={2633-2642},}
"""
input_dim = _change_scaler_to_list(input_dim)
from core.ensemble.model_hp import r2d2_hparam
logger.info(dict(r2d2_hparam._asdict()))
def wrapper(func):
def graph():
Dense, _1, _2, last_Dense = func()
base_model = tf.keras.applications.MobileNetV2(input_shape=input_dim,
include_top=False,
weights='imagenet')
base_model.trainable = False
for layer in base_model.layers[-r2d2_hparam.unfreezed_layers:]:
layer.trainable = True
x_new = base_model.layers[-1].output
x_new = tf.keras.layers.GlobalAveragePooling2D()(x_new)
if use_mc_dropout:
# x_new = tf.nn.dropout(x_new, rate=mc_droput_rate)
# out = tf.keras.layers.Dense(r2d2_hparam.output_dim, activation=tf.nn.sigmoid)(x_new)
out = last_Dense(r2d2_hparam.output_dim, activation=tf.nn.sigmoid)(x_new)
else:
x_new = tf.keras.layers.Dropout(r2d2_hparam.dropout_rate)(x_new)
out = Dense(r2d2_hparam.output_dim, activation=tf.nn.sigmoid)(x_new)
return tf.keras.Model(inputs=base_model.input, outputs=out)
return graph
return wrapper
def _droidectc_graph(input_dim=None, use_mc_dropout=False):
"""
DROIDETEC: Android Malware Detection and Malicious Code Localization through Deep Learning
The implement is based on our understanding of the paper, entitled
``DROIDETEC: Android Malware Detection and Malicious Code Localization through Deep Learning'':
@article{ma2020droidetec,
title={Droidetec: Android malware detection and malicious code localization through deep learning},
author={Ma, Zhuo and Ge, Haoran and Wang, Zhuzhu and Liu, Yang and Liu, Ximeng},
journal={arXiv preprint arXiv:2002.03594},
year={2020}
}
"""
input_dim = _change_scaler_to_list(input_dim) # dynamic input shape is permitted
from core.ensemble.model_hp import droidetec_hparam
logger.info(dict(droidetec_hparam._asdict()))
def wrapper(func):
def graph():
Dense, _1, LSTM, last_Dense = func()
class BiLSTMAttention(tf.keras.models.Model):
def __init__(self):
super(BiLSTMAttention, self).__init__()
self.embedding = tf.keras.layers.Embedding(droidetec_hparam.vocab_size,
droidetec_hparam.n_embedding_dim)
self.bi_lstm = tf.keras.layers.Bidirectional(LSTM(droidetec_hparam.lstm_units,
return_sequences=True),
merge_mode='sum'
)
self.dense_layer = tf.keras.layers.Dense(droidetec_hparam.lstm_units, use_bias=False)
# for units in droidetec_hparam.hidden_units:
# self.dense_layers.append(Dense(droidetec_hparam.hidden_units, use_bias=False))
if use_mc_dropout:
self.output_layer = last_Dense(droidetec_hparam.output_dim, activation=tf.nn.sigmoid)
else:
self.output_layer = Dense(droidetec_hparam.output_dim, activation=tf.nn.sigmoid)
def call(self, x, training=False):
embed_x = self.embedding(x)
# if use_mc_dropout:
# stateful_x = self.bi_lstm(embed_x, training=True)
# else:
stateful_x = self.bi_lstm(embed_x)
alpha_wights = tf.nn.softmax(self.dense_layer(tf.nn.tanh(stateful_x)), axis=1)
attn_x = tf.reduce_sum(alpha_wights * stateful_x, axis=1)
# if use_mc_dropout:
# attn_x = tf.nn.dropout(attn_x, rate=mc_dropout_rate)
return self.output_layer(attn_x)
return BiLSTMAttention()
return graph
return wrapper
model_name_type_dict = {
'dnn': _dnn_graph,
'text_cnn': _text_cnn_graph,
'multimodalitynn': _multimodalitynn,
'r2d2': _r2d2,
'droidectc': _droidectc_graph
}
def build_models(input_x, architecture_type, ensemble_type='vanilla', input_dim=None, use_mc_dropout=False):
builder = model_builder(architecture_type)
@builder(input_dim, use_mc_dropout)
def graph():
return utils.produce_layer(ensemble_type, dropout_rate=0.4)
model = graph()
return model(input_x)
| 12,509 | 42.741259 | 115 | py |
diaparser | diaparser-master/setup.py | # -*- coding: utf-8 -*-
from setuptools import find_packages, setup
setup(
name='diaparser',
version='1.1.3',
author='Yu Zhang, Giuseppe Attardi',
author_email='yzhang.cs@outlook.com, attardi@di.unipi.it',
description='Direct Attentive Dependency Parser',
long_description=open('README.md', 'r').read(),
long_description_content_type='text/markdown',
url='https://github.com/Unipisa/diaparser',
packages=find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Text Processing :: Linguistic'
],
setup_requires=[
'setuptools>=18.0',
],
# stanza 1.3 has incompatible changes (attribute feat_dropout instead of dropout)
install_requires=['torch>=2.0', 'transformers', 'nltk', 'stanza', 'numpy'],
entry_points={
'console_scripts': [
'diaparser=diaparser.cmds.biaffine_dependency:main',
]
},
python_requires='>=3.6',
zip_safe=False
)
| 1,148 | 31.828571 | 85 | py |
diaparser | diaparser-master/diaparser/modules/bert.py | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from transformers import AutoModel, AutoConfig
from .scalar_mix import ScalarMix
from .dropout import TokenDropout
# from torch.cuda import memory_allocated
class BertEmbedding(nn.Module):
r"""
A module that directly utilizes the pretrained models in `transformers`_ to produce BERT representations.
While mainly tailored to provide input preparation and post-processing for the BERT model,
it is also compatible with other pretrained language models like XLNet, RoBERTa and ELECTRA, etc.
Args:
model (str):
Path or name of the pretrained models registered in `transformers`_, e.g., ``'bert-base-cased'``.
n_layers (int):
The number of layers from the model to use.
If 0, uses all layers.
n_out (int):
The requested size of the embeddings.
If 0, uses the size of the pretrained embedding model.
stride (int):
A sequence longer than the limited max length will be splitted into several small pieces
with a window size of ``stride``. Default: 5.
pad_index (int):
The index of the padding token in the BERT vocabulary. Default: 0.
dropout (float):
The dropout ratio of BERT layers. Default: 0.
This value will be passed into the :class:`ScalarMix` layer.
requires_grad (bool):
If ``True``, the model parameters will be updated together with the downstream task.
Default: ``False``.
.. _transformers:
https://github.com/huggingface/transformers
"""
def __init__(self, model, n_layers, n_out, stride=5, pad_index=0, dropout=0, requires_grad=False,
mask_token_id=0, token_dropout=0.0, mix_dropout=0.0,
use_hidden_states=True, use_attentions=False,
attention_head=0, attention_layer=8):
r"""
:param model (str): path or name of the pretrained model.
:param n_layers (int): number of layers from the model to use.
If 0, use all layers.
:param n_out (int): the requested size of the embeddings.
If 0, use the size of the pretrained embedding model
:param requires_grad (bool): whether to fine tune the embeddings.
:param mask_token_id (int): the value of the [MASK] token to use for dropped tokens.
:param dropout (float): drop layers with this probability when comuting their
weighted average with ScalarMix.
:param use_hidden_states (bool): use the output hidden states from bert if True, or else
the outputs.
:param use_attentions (bool): wheth4er to use attention weights.
:param attention_head (int): which attention head to use.
:param attention_layer (int): which attention layer to use.
"""
super().__init__()
config = AutoConfig.from_pretrained(model, output_hidden_states=True,
output_attentions=use_attentions)
self.bert = AutoModel.from_pretrained(model, config=config)
self.bert.requires_grad_(requires_grad)
self.model = model
self.n_layers = n_layers or self.bert.config.num_hidden_layers
self.hidden_size = self.bert.config.hidden_size
self.n_out = n_out or self.hidden_size
self.stride = stride
self.pad_index = self.bert.config.pad_token_id
self.mix_dropout = mix_dropout
self.token_dropout = token_dropout
self.requires_grad = requires_grad
self.max_len = self.bert.config.max_position_embeddings
self.use_hidden_states = use_hidden_states
self.mask_token_id = mask_token_id
self.use_attentions = use_attentions
self.head = attention_head
self.attention_layer = attention_layer
self.token_dropout = TokenDropout(token_dropout, mask_token_id) if token_dropout else None
self.scalar_mix = ScalarMix(self.n_layers, mix_dropout)
if self.hidden_size != self.n_out:
self.projection = nn.Linear(self.hidden_size, self.n_out, False)
def __repr__(self):
s = f"{self.model}, n_layers={self.n_layers}, n_out={self.n_out}"
s += f", pad_index={self.pad_index}"
s += f", max_len={self.max_len}"
if self.mix_dropout > 0:
s += f", mix_dropout={self.mix_dropout}"
if self.use_attentions:
s += f", use_attentions={self.use_attentions}"
if self.hidden_size != self.n_out:
s += f", projection=({self.hidden_size} x {self.n_out})"
s += f", mask_token_id={self.mask_token_id}"
if self.requires_grad:
s += f", requires_grad={self.requires_grad}"
return f"{self.__class__.__name__}({s})"
def forward(self, subwords):
r"""
Args:
subwords (~torch.Tensor): ``[batch_size, seq_len, fix_len]``.
Returns:
~torch.Tensor:
BERT embeddings of shape ``[batch_size, seq_len, n_out]``.
"""
batch_size, seq_len, fix_len = subwords.shape
if self.token_dropout:
subwords = self.token_dropout(subwords)
mask = subwords.ne(self.pad_index)
lens = mask.sum((1, 2))
if not self.requires_grad:
self.bert.eval() # CHECK_ME (supar does not do)
# [batch_size, n_subwords]
subwords = pad_sequence(subwords[mask].split(lens.tolist()), True)
bert_mask = pad_sequence(mask[mask].split(lens.tolist()), True)
# Outputs from the transformer:
# - last_hidden_state: [batch, seq_len, hidden_size]
# - pooler_output: [batch, hidden_size],
# - hidden_states (optional): [[batch_size, seq_length, hidden_size]] * (1 + layers)
# - attentions (optional): [[batch_size, num_heads, seq_length, seq_length]] * layers
# print('<BERT, GPU MiB:', memory_allocated() // (1024*1024)) # DEBUG
outputs = self.bert(subwords[:, :self.max_len], attention_mask=bert_mask[:, :self.max_len].float())
# print('BERT>, GPU MiB:', memory_allocated() // (1024*1024)) # DEBUG
if self.use_hidden_states:
bert_idx = -2 if self.use_attentions else -1
bert = outputs[bert_idx]
# [n_layers, batch_size, n_subwords, hidden_size]
bert = bert[-self.n_layers:]
# [batch_size, n_subwords, hidden_size]
bert = self.scalar_mix(bert)
for i in range(self.stride, (subwords.shape[1]-self.max_len+self.stride-1)//self.stride*self.stride+1, self.stride):
part = self.bert(subwords[:, i:i+self.max_len], attention_mask=bert_mask[:, i:i+self.max_len].float())[bert_idx]
bert = torch.cat((bert, self.scalar_mix(part[-self.n_layers:])[:, self.max_len-self.stride:]), 1)
else:
bert = outputs[0]
# [batch_size, n_subwords]
bert_lens = mask.sum(-1)
bert_lens = bert_lens.masked_fill_(bert_lens.eq(0), 1)
# [batch_size, seq_len, fix_len, hidden_size]
embed = bert.new_zeros(*mask.shape, self.hidden_size)
embed = embed.masked_scatter_(mask.unsqueeze(-1), bert[bert_mask])
# [batch_size, seq_len, hidden_size]
embed = embed.sum(2) / bert_lens.unsqueeze(-1) # sum wordpieces
seq_attn = None
if self.use_attentions:
# (a list of layers) = [ [batch, num_heads, sent_len, sent_len] ]
attns = outputs[-1]
# [batch, n_subwords, n_subwords]
attn = attns[self.attention_layer][:,self.head,:,:] # layer 9 represents syntax
# squeeze out multiword tokens
mask2 = ~mask
mask2[:,:,0] = True # keep first column
sub_masks = pad_sequence(mask2[mask].split(lens.tolist()), True)
seq_mask = torch.einsum('bi,bj->bij', sub_masks, sub_masks) # outer product
seq_lens = seq_mask.sum((1,2))
# [batch_size, seq_len, seq_len]
sub_attn = attn[seq_mask].split(seq_lens.tolist())
# fill a tensor [batch_size, seq_len, seq_len]
seq_attn = attn.new_zeros(batch_size, seq_len, seq_len)
for i, attn_i in enumerate(sub_attn):
size = sub_masks[i].sum(0)
attn_i = attn_i.view(size, size)
seq_attn[i,:size,:size] = attn_i
if hasattr(self, 'projection'):
embed = self.projection(embed)
return embed, seq_attn
| 8,608 | 46.563536 | 128 | py |
diaparser | diaparser-master/diaparser/modules/lstm.py | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from .dropout import SharedDropout
from torch.nn.modules.rnn import apply_permutation
from torch.nn.utils.rnn import PackedSequence
class LSTM(nn.Module):
r"""
LSTM is an variant of the vanilla bidirectional LSTM adopted by Biaffine Parser
with the only difference of the dropout strategy.
It drops nodes in the LSTM layers (input and recurrent connections)
and applies the same dropout mask at every recurrent timesteps.
APIs are roughly the same as :class:`~torch.nn.LSTM` except that we only allows
:class:`~torch.nn.utils.rnn.PackedSequence` as input.
References:
- Timothy Dozat and Christopher D. Manning. 2017.
`Deep Biaffine Attention for Neural Dependency Parsing`_.
Args:
input_size (int):
The number of expected features in the input.
hidden_size (int):
The number of features in the hidden state `h`.
num_layers (int):
The number of recurrent layers. Default: 1.
bidirectional (bool):
If ``True``, becomes a bidirectional LSTM. Default: ``False``
dropout (float):
If non-zero, introduces a :class:`SharedDropout` layer on the outputs of each LSTM layer except the last layer.
Default: 0.
.. _Deep Biaffine Attention for Neural Dependency Parsing:
https://openreview.net/forum?id=Hk95PK9le
"""
def __init__(self, input_size, hidden_size, num_layers=1, bidirectional=False, dropout=0):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bidirectional = bidirectional
self.dropout = dropout
self.f_cells = nn.ModuleList()
if bidirectional:
self.b_cells = nn.ModuleList()
for _ in range(self.num_layers):
self.f_cells.append(nn.LSTMCell(input_size=input_size,
hidden_size=hidden_size))
if bidirectional:
self.b_cells.append(nn.LSTMCell(input_size=input_size,
hidden_size=hidden_size))
input_size = hidden_size * (1 + self.bidirectional)
self.reset_parameters()
def __repr__(self):
s = f"{self.input_size}, {self.hidden_size}"
if self.num_layers > 1:
s += f", num_layers={self.num_layers}"
if self.bidirectional:
s += f", bidirectional={self.bidirectional}"
if self.dropout > 0:
s += f", dropout={self.dropout}"
return f"{self.__class__.__name__}({s})"
def reset_parameters(self):
for param in self.parameters():
# apply orthogonal_ to weight
if len(param.shape) > 1:
nn.init.orthogonal_(param)
# apply zeros_ to bias
else:
nn.init.zeros_(param)
def permute_hidden(self, hx, permutation):
if permutation is None:
return hx
h = apply_permutation(hx[0], permutation)
c = apply_permutation(hx[1], permutation)
return h, c
def layer_forward(self, x, hx, cell, batch_sizes, reverse=False):
hx_0 = hx_i = hx
hx_n, output = [], []
steps = reversed(range(len(x))) if reverse else range(len(x))
if self.training:
hid_mask = SharedDropout.get_mask(hx_0[0], self.dropout)
for t in steps:
last_batch_size, batch_size = len(hx_i[0]), batch_sizes[t]
if last_batch_size < batch_size:
hx_i = [torch.cat((h, ih[last_batch_size:batch_size]))
for h, ih in zip(hx_i, hx_0)]
else:
hx_n.append([h[batch_size:] for h in hx_i])
hx_i = [h[:batch_size] for h in hx_i]
hx_i = [h for h in cell(x[t], hx_i)]
output.append(hx_i[0])
if self.training:
hx_i[0] = hx_i[0] * hid_mask[:batch_size]
if reverse:
hx_n = hx_i
output.reverse()
else:
hx_n.append(hx_i)
hx_n = [torch.cat(h) for h in zip(*reversed(hx_n))]
output = torch.cat(output)
return output, hx_n
def forward(self, sequence, hx=None):
r"""
Args:
sequence (~torch.nn.utils.rnn.PackedSequence):
A packed variable length sequence.
hx (~torch.Tensor, ~torch.Tensor):
A tuple composed of two tensors `h` and `c`.
`h` of shape ``[num_layers*num_directions, batch_size, hidden_size]`` holds the initial hidden state
for each element in the batch.
`c` of shape ``[num_layers*num_directions, batch_size, hidden_size]`` holds the initial cell state
for each element in the batch.
If `hx` is not provided, both `h` and `c` default to zero.
Default: ``None``.
Returns:
~torch.nn.utils.rnn.PackedSequence, (~torch.Tensor, ~torch.Tensor):
The first is a packed variable length sequence.
The second is a tuple of tensors `h` and `c`.
`h` of shape ``[num_layers*num_directions, batch_size, hidden_size]`` holds the hidden state for `t=seq_len`.
Like output, the layers can be separated using ``h.view(num_layers, 2, batch_size, hidden_size)``
and similarly for c.
`c` of shape ``[num_layers*num_directions, batch_size, hidden_size]`` holds the cell state for `t=seq_len`.
"""
x, batch_sizes = sequence.data, sequence.batch_sizes.tolist()
batch_size = batch_sizes[0]
h_n, c_n = [], []
if hx is None:
ih = x.new_zeros(self.num_layers * 2, batch_size, self.hidden_size)
h, c = ih, ih
else:
h, c = self.permute_hidden(hx, sequence.sorted_indices)
h = h.view(self.num_layers, 2, batch_size, self.hidden_size)
c = c.view(self.num_layers, 2, batch_size, self.hidden_size)
for i in range(self.num_layers):
x = torch.split(x, batch_sizes)
if self.training:
mask = SharedDropout.get_mask(x[0], self.dropout)
x = [i * mask[:len(i)] for i in x]
x_i, (h_i, c_i) = self.layer_forward(x=x,
hx=(h[i, 0], c[i, 0]),
cell=self.f_cells[i],
batch_sizes=batch_sizes)
if self.bidirectional:
x_b, (h_b, c_b) = self.layer_forward(x=x,
hx=(h[i, 1], c[i, 1]),
cell=self.b_cells[i],
batch_sizes=batch_sizes,
reverse=True)
x_i = torch.cat((x_i, x_b), -1)
h_i = torch.stack((h_i, h_b))
c_i = torch.stack((c_i, c_b))
x = x_i
h_n.append(h_i)
c_n.append(h_i)
x = PackedSequence(x,
sequence.batch_sizes,
sequence.sorted_indices,
sequence.unsorted_indices)
hx = torch.cat(h_n, 0), torch.cat(c_n, 0)
hx = self.permute_hidden(hx, sequence.unsorted_indices)
return x, hx
| 7,586 | 39.790323 | 125 | py |
diaparser | diaparser-master/diaparser/modules/mlp.py | # -*- coding: utf-8 -*-
import torch.nn as nn
from ..modules.dropout import SharedDropout
class MLP(nn.Module):
r"""
Applies a linear transformation together with :class:`~torch.nn.LeakyReLU` activation to the incoming tensor:
:math:`y = \mathrm{LeakyReLU}(x A^T + b)`
Args:
n_in (~torch.Tensor):
The size of each input feature.
n_out (~torch.Tensor):
The size of each output feature.
dropout (float):
If non-zero, introduce a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
"""
def __init__(self, n_in, n_out, dropout=0):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.LeakyReLU(negative_slope=0.1)
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f"n_in={self.n_in}, n_out={self.n_out}"
if self.dropout.p > 0:
s += f", dropout={self.dropout.p}"
return f"{self.__class__.__name__}({s})"
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, x):
r"""
Args:
x (~torch.Tensor):
The size of each input feature is `n_in`.
Returns:
A tensor with the size of each output feature `n_out`.
"""
x = self.linear(x)
x = self.activation(x)
x = self.dropout(x)
return x
| 1,581 | 26.275862 | 116 | py |
diaparser | diaparser-master/diaparser/modules/affine.py | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
class Biaffine(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super(Biaffine, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out,
n_in + bias_x,
n_in + bias_y))
self.reset_parameters()
def extra_repr(self):
s = f"n_in={self.n_in}, n_out={self.n_out}"
if self.bias_x:
s += f", bias_x={self.bias_x}"
if self.bias_y:
s += f", bias_y={self.bias_y}"
return s
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y):
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
# [batch_size, n_out, seq_len, seq_len]
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
# remove dim 1 if n_out == 1
s = s.squeeze(1)
return s
| 1,220 | 26.75 | 64 | py |
diaparser | diaparser-master/diaparser/modules/char_lstm.py | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
class CharLSTM(nn.Module):
r"""
CharLSTM aims to generate character-level embeddings for tokens.
It summerizes the information of characters in each token to an embedding using a LSTM layer.
Args:
n_char (int):
The number of characters.
n_embed (int):
The size of each embedding vector as input to LSTM.
n_out (int):
The size of each output vector.
pad_index (int):
The index of the padding token in the vocabulary. Default: 0.
"""
def __init__(self, n_chars, n_word_embed, n_out, pad_index=0):
super().__init__()
self.n_chars = n_chars
self.n_word_embed = n_word_embed
self.n_out = n_out
self.pad_index = pad_index
# the embedding layer
self.embed = nn.Embedding(num_embeddings=n_chars,
embedding_dim=n_word_embed)
# the lstm layer
self.lstm = nn.LSTM(input_size=n_word_embed,
hidden_size=n_out//2,
batch_first=True,
bidirectional=True)
def __repr__(self):
return f"{self.__class__.__name__}({self.n_chars}, {self.n_embed}, n_out={self.n_out}, pad_index={self.pad_index})"
def forward(self, x):
r"""
Args:
x (~torch.Tensor): ``[batch_size, seq_len, fix_len]``.
Characters of all tokens.
Each token holds no more than `fix_len` characters, and the excess is cut off directly.
Returns:
~torch.Tensor:
The embeddings of shape ``[batch_size, seq_len, n_out]`` derived from the characters.
"""
# [batch_size, seq_len, fix_len]
mask = x.ne(self.pad_index)
# [batch_size, seq_len]
lens = mask.sum(-1)
char_mask = lens.gt(0)
# [n, fix_len, n_word_embed]
x = self.embed(x[char_mask])
x = pack_padded_sequence(x, lens[char_mask], True, False)
x, (h, _) = self.lstm(x)
# [n, fix_len, n_out]
h = torch.cat(torch.unbind(h), dim=-1)
# [batch_size, seq_len, n_out]
embed = h.new_zeros(*lens.shape, self.n_out)
embed = embed.masked_scatter_(char_mask.unsqueeze(-1), h)
return embed
| 2,424 | 32.680556 | 123 | py |
diaparser | diaparser-master/diaparser/modules/dropout.py | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
class SharedDropout(nn.Module):
r"""
SharedDropout differs from the vanilla dropout strategy in that
the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f"p={self.p}"
if self.batch_first:
s += f", batch_first={self.batch_first}"
return f"{self.__class__.__name__}({s})"
def forward(self, x):
r"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x *= mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class IndependentDropout(nn.Module):
r"""
For :math:`N` tensors, they use different dropout masks respectively.
When :math:`N-M` of them are dropped, the remaining :math:`M` ones are scaled by a factor of :math:`N/M` to compensate,
and when all of them are dropped together, zeros are returned.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
Examples:
>>> x, y = torch.ones(1, 3, 5), torch.ones(1, 3, 5)
>>> x, y = IndependentDropout()(x, y)
>>> x
tensor([[[1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0.],
[2., 2., 2., 2., 2.]]])
>>> y
tensor([[[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[0., 0., 0., 0., 0.]]])
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def __repr__(self):
return f"{self.__class__.__name__}(p={self.p})"
def forward(self, *items):
r"""
Args:
items (list[~torch.Tensor]):
A list of tensors that have the same shape except the last dimension.
Returns:
The returned tensors are of the same shape as `items`.
"""
if self.training:
masks = [x.new_empty(x.shape[:2]).bernoulli_(1 - self.p)
for x in items]
total = sum(masks)
scale = len(items) / total.max(torch.ones_like(total))
masks = [mask * scale for mask in masks]
items = [item * mask.unsqueeze(dim=-1)
for item, mask in zip(items, masks)]
return items
class TokenDropout(nn.Module):
def __init__(self, p=0.5, value=0):
super(TokenDropout, self).__init__()
self.p = p
self.value = value
def extra_repr(self):
return f"p={self.p}, value={self.value}"
def forward(self, x):
if self.training:
mask = torch.rand_like(x, dtype=torch.float) < self.p
x.masked_fill_(mask, self.value)
return x
| 3,804 | 26.977941 | 123 | py |
diaparser | diaparser-master/diaparser/modules/matrix_tree_theorem.py | # -*- coding: utf-8 -*-
import torch
import torch.autograd as autograd
import torch.nn as nn
class MatrixTreeTheorem(nn.Module):
def __init__(self, *args, **kwargs):
super(MatrixTreeTheorem, self).__init__(*args, **kwargs)
@torch.enable_grad()
def forward(self, scores, mask, target=None):
scores = scores.double()
mask = mask.index_fill(1, mask.new_tensor(0).long(), 1)
A = scores.requires_grad_().exp()
A = A * mask.unsqueeze(1) * mask.unsqueeze(-1)
batch_size, seq_len, _ = A.shape
# D is the weighted degree matrix
D = torch.zeros_like(A)
D.diagonal(0, 1, 2).copy_(A.sum(-1))
# Laplacian matrix
L = nn.init.eye_(torch.empty_like(A[0])).repeat(batch_size, 1, 1)
L[mask] = (D - A)[mask]
# calculate the partition (a.k.a normalization) term
logZ = L[:, 1:, 1:].slogdet()[1].sum()
mask = mask.index_fill(1, mask.new_tensor(0).long(), 0)
# calculate the marginal probablities
probs, = autograd.grad(logZ, scores, retain_graph=scores.requires_grad)
probs = probs.float()
if target is None:
return probs
score = scores.gather(-1, target.unsqueeze(-1)).squeeze(-1)[mask].sum()
loss = (logZ - score).float()
return loss, probs
| 1,329 | 33.102564 | 79 | py |
diaparser | diaparser-master/diaparser/modules/scalar_mix.py | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
class ScalarMix(nn.Module):
r"""
Computes a parameterised scalar mixture of :math:`N` tensors, :math:`mixture = \gamma * \sum_{k}(s_k * tensor_k)`
where :math:`s = \mathrm{softmax}(w)`, with :math:`w` and :math:`\gamma` scalar parameters.
Args:
n_layers (int):
The number of layers to be mixed, i.e., :math:`N`.
dropout (float):
The dropout ratio of the layer weights.
If dropout > 0, then for each scalar weight, adjust its softmax weight mass to 0
with the dropout probability (i.e., setting the unnormalized weight to -inf).
This effectively redistributes the dropped probability mass to all other weights.
Default: 0.
"""
def __init__(self, n_layers: int, dropout: float = 0.0):
super().__init__()
self.n_layers = n_layers
self.weights = nn.Parameter(torch.zeros(n_layers))
self.gamma = nn.Parameter(torch.tensor([1.0]))
self.dropout = nn.Dropout(dropout)
def __repr__(self):
s = f"n_layers={self.n_layers}"
if self.dropout.p > 0:
s += f", dropout={self.dropout.p}"
return f"{self.__class__.__name__}({s})"
def forward(self, tensors):
r"""
Args:
tensors (list[~torch.Tensor]):
:math:`N` tensors to be mixed.
Returns:
The mixture of :math:`N` tensors.
"""
normed_weights = self.dropout(self.weights.softmax(-1))
weighted_sum = sum(w * h for w, h in zip(normed_weights, tensors))
return self.gamma * weighted_sum
| 1,679 | 30.698113 | 117 | py |
diaparser | diaparser-master/diaparser/models/dependency.py | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from ..modules import MLP, BertEmbedding, Biaffine, LSTM, CharLSTM
from ..modules.dropout import IndependentDropout, SharedDropout
from ..utils.config import Config
from ..utils.alg import eisner, mst
from ..utils.transform import CoNLL
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from typing import Tuple
class BiaffineDependencyModel(nn.Module):
r"""
The implementation of Biaffine Dependency Parser.
References:
- Timothy Dozat and Christopher D. Manning. 2017.
`Deep Biaffine Attention for Neural Dependency Parsing`_.
Args:
n_words (int):
The size of the word vocabulary.
n_feats (int):
The size of the feat vocabulary.
n_rels (int):
The number of labels in the treebank.
feat (str):
Specifies which type of additional feature to use: ``'char'`` | ``'bert'`` | ``'tag'``.
``'char'``: Character-level representations extracted by CharLSTM.
``'bert'``: BERT representations, other pretrained langugae models like XLNet are also feasible.
``'tag'``: POS tag embeddings.
Default: ``'char'``.
n_word_embed (int):
The size of word embeddings. Default: 100.
n_feat_embed (int):
The size of feature representations. Default: 100.
n_char_embed (int):
The size of character embeddings serving as inputs of CharLSTM, required if ``feat='char'``. Default: 50.
bert (str):
Specifies which kind of language model to use, e.g., ``'bert-base-cased'`` and ``'xlnet-base-cased'``.
This is required if ``feat='bert'``. The full list can be found in `transformers`_.
Default: ``None``.
n_bert_layers (int):
Specifies how many last layers to use. Required if ``feat='bert'``.
The final outputs would be the weight sum of the hidden states of these layers.
Default: 4.
bert_fine_tune (bool):
Weather to fine tune the BERT model.
Deafult: False.
mix_dropout (float):
The dropout ratio of BERT layers. Required if ``feat='bert'``. Default: .0.
token_dropout (float):
The dropout ratio of tokens. Default: .0.
embed_dropout (float):
The dropout ratio of input embeddings. Default: .33.
n_lstm_hidden (int):
The size of LSTM hidden states. Default: 400.
n_lstm_layers (int):
The number of LSTM layers. Default: 3.
lstm_dropout (float):
The dropout ratio of LSTM. Default: .33.
n_mlp_arc (int):
Arc MLP size. Default: 500.
n_mlp_rel (int):
Label MLP size. Default: 100.
mlp_dropout (float):
The dropout ratio of MLP layers. Default: .33.
use_hidden_states (bool):
Wethre to use hidden states rather than outputs from BERT.
Default: True.
use_attentions (bool):
Wethre to use attention heads from BERT.
Default: False.
attention_head (int):
Which attention head from BERT to use. Default: 0.
attention_layer (int):
Which attention layer from BERT to use; use all if 0. Default: 6.
feat_pad_index (int):
The index of the padding token in the feat vocabulary. Default: 0.
pad_index (int):
The index of the padding token in the word vocabulary. Default: 0.
unk_index (int):
The index of the unknown token in the word vocabulary. Default: 1.
.. _Deep Biaffine Attention for Neural Dependency Parsing:
https://openreview.net/forum?id=Hk95PK9le
.. _transformers:
https://github.com/huggingface/transformers
"""
def __init__(self,
n_words,
n_feats,
n_rels,
feat='char',
n_word_embed=100,
n_feat_embed=100,
n_char_embed=50,
bert=None,
n_bert_layers=4,
bert_fine_tune=False,
mix_dropout=.0,
token_dropout=.0,
embed_dropout=.33,
n_lstm_hidden=400,
n_lstm_layers=3,
lstm_dropout=.33,
n_mlp_arc=500,
n_mlp_rel=100,
mask_token_id=.0,
mlp_dropout=.33,
use_hidden_states=True,
use_attentions=False,
attention_head=0,
attention_layer=6,
feat_pad_index=0,
pad_index=0,
unk_index=1,
**kwargs):
super().__init__()
# cant use Config(**locals()) because it includes self
self.args = Config().update(locals())
args = self.args
if args.n_word_embed:
# the embedding layer
self.word_embed = nn.Embedding(num_embeddings=args.n_words,
embedding_dim=args.n_word_embed)
self.unk_index = args.unk_index
else:
self.word_embed = None
if args.feat == 'char':
self.feat_embed = CharLSTM(n_chars=args.n_feats,
n_word_embed=args.n_char_embed,
n_out=args.n_feat_embed,
pad_index=args.feat_pad_index)
elif args.feat == 'bert':
self.feat_embed = BertEmbedding(model=args.bert,
n_layers=args.n_bert_layers,
n_out=args.n_feat_embed,
requires_grad=args.bert_fine_tune,
mask_token_id=args.mask_token_id,
token_dropout=args.token_dropout,
mix_dropout=args.mix_dropout,
use_hidden_states=args.use_hidden_states,
use_attentions=args.use_attentions,
attention_layer=args.attention_layer)
# Setting this requires rebuilding models:
# args.n_mlp_arc = self.feat_embed.bert.config.max_position_embeddings
args.n_feat_embed = self.feat_embed.n_out # taken from the model
args.n_bert_layers = self.feat_embed.n_layers # taken from the model
elif args.feat == 'tag':
self.feat_embed = nn.Embedding(num_embeddings=args.n_feats,
embedding_dim=args.n_feat_embed)
else:
raise RuntimeError("The feat type should be in ['char', 'bert', 'tag'].")
self.embed_dropout = IndependentDropout(p=args.embed_dropout)
if args.n_lstm_layers:
# the lstm layer
self.lstm = LSTM(input_size=args.n_word_embed+args.n_feat_embed,
hidden_size=args.n_lstm_hidden,
num_layers=args.n_lstm_layers,
bidirectional=True,
dropout=args.lstm_dropout)
self.lstm_dropout = SharedDropout(p=args.lstm_dropout)
mlp_input_size = args.n_lstm_hidden*2
else:
self.lstm = None
mlp_input_size = args.n_word_embed + args.n_feat_embed
# the MLP layers
self.mlp_arc_d = MLP(n_in=mlp_input_size,
n_out=args.n_mlp_arc,
dropout=args.mlp_dropout)
self.mlp_arc_h = MLP(n_in=mlp_input_size,
n_out=args.n_mlp_arc,
dropout=args.mlp_dropout)
self.mlp_rel_d = MLP(n_in=mlp_input_size,
n_out=args.n_mlp_rel,
dropout=args.mlp_dropout)
self.mlp_rel_h = MLP(n_in=mlp_input_size,
n_out=args.n_mlp_rel,
dropout=args.mlp_dropout)
# the Biaffine layers
self.arc_attn = Biaffine(n_in=args.n_mlp_arc,
bias_x=True,
bias_y=False)
self.rel_attn = Biaffine(n_in=args.n_mlp_rel,
n_out=args.n_rels,
bias_x=True,
bias_y=True)
# transformer attention
if args.use_attentions:
self.attn_mix = nn.Parameter(torch.randn(1))
self.criterion = nn.CrossEntropyLoss()
def extra_repr(self):
total_params = sum(p.numel() for p in self.parameters())
trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
return f"Total parameters: {total_params}\n" \
f"Trainable parameters: {trainable_params}\n" \
f"Features: {self.args.n_feats}"
def load_pretrained(self, embed=None):
if embed is not None:
self.pretrained = nn.Embedding.from_pretrained(embed)
nn.init.zeros_(self.word_embed.weight)
return self
def forward(self, words: torch.Tensor,
feats: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
r"""
Args:
words (~torch.LongTensor): ``[batch_size, seq_len]``.
Word indices.
feats (~torch.LongTensor):
Feat indices.
If feat is ``'char'`` or ``'bert'``, the size of feats should be ``[batch_size, seq_len, fix_len]``.
if ``'tag'``, the size is ``[batch_size, seq_len]``.
Returns:
~torch.Tensor, ~torch.Tensor:
The first tensor of shape ``[batch_size, seq_len, seq_len]`` holds scores of all possible arcs.
The second of shape ``[batch_size, seq_len, seq_len, n_labels]`` holds
scores of all possible labels on each arc.
"""
# words, feats are the first two items in the batch from DataLoader.__iter__()
whole_words = feats[:, :, 0] # drop subpiece dimension
batch_size, seq_len = whole_words.shape
# get the mask and lengths of given batch
mask = whole_words.ne(self.feat_embed.pad_index)
lens = mask.sum(dim=1).cpu() # BUG fix: https://github.com/pytorch/pytorch/issues/43227
# feat_embed: [batch_size, seq_len, n_feat_embed]
# attn: [batch_size, seq_len, seq_len]
feat_embed, attn = self.feat_embed(feats)
if self.word_embed:
ext_words = words
# set the indices larger than num_embeddings to unk_index
if hasattr(self, 'pretrained'):
ext_mask = words.ge(self.word_embed.num_embeddings)
ext_words = words.masked_fill(ext_mask, self.unk_index)
# get outputs from embedding layers
word_embed = self.word_embed(ext_words)
if hasattr(self, 'pretrained'):
word_embed += self.pretrained(words)
word_embed, feat_embed = self.embed_dropout(word_embed, feat_embed)
# concatenate the word and feat representations
embed = torch.cat((word_embed, feat_embed), dim=-1)
else:
embed = self.embed_dropout(feat_embed)[0]
if self.lstm:
x = pack_padded_sequence(embed, lens, True, False)
x, _ = self.lstm(x)
x, _ = pad_packed_sequence(x, True, total_length=seq_len)
x = self.lstm_dropout(x)
else:
x = embed
# apply MLPs to the BiLSTM output states
arc_d = self.mlp_arc_d(x)
arc_h = self.mlp_arc_h(x)
rel_d = self.mlp_rel_d(x)
rel_h = self.mlp_rel_h(x)
# [batch_size, seq_len, seq_len]
s_arc = self.arc_attn(arc_d, arc_h)
# [batch_size, seq_len, seq_len, n_rels]
s_rel = self.rel_attn(rel_d, rel_h).permute(0, 2, 3, 1)
# mix bert attentions
if attn is not None:
s_arc += self.attn_mix * attn
# set the scores that exceed the length of each sentence to -inf
s_arc.masked_fill_(~mask.unsqueeze(1), float('-inf'))
# Lower the diagonal, because the head of a word can't be itself.
s_arc += torch.diag(s_arc.new(seq_len).fill_(float('-inf')))
return s_arc, s_rel
def loss(self, s_arc: torch.Tensor, s_rel: torch.Tensor,
arcs: torch.Tensor, rels: torch.Tensor,
mask: torch.Tensor, partial: bool = False) -> torch.Tensor:
r"""
Computes the arc and tag loss for a sequence given gold heads and tags.
Args:
s_arc (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all possible arcs.
s_rel (~torch.Tensor): ``[batch_size, seq_len, seq_len, n_labels]``.
Scores of all possible labels on each arc.
arcs (~torch.LongTensor): ``[batch_size, seq_len]``.
The tensor of gold-standard arcs.
rels (~torch.LongTensor): ``[batch_size, seq_len]``.
The tensor of gold-standard labels.
mask (~torch.BoolTensor): ``[batch_size, seq_len]``.
The mask for covering the unpadded tokens.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
Returns:
~torch.Tensor:
The training loss.
"""
if partial:
mask = mask & arcs.ge(0)
s_arc, arcs = s_arc[mask], arcs[mask]
s_rel, rels = s_rel[mask], rels[mask]
# select the predicted relations towards the correct heads
s_rel = s_rel[torch.arange(len(arcs)), arcs]
arc_loss = self.criterion(s_arc, arcs)
rel_loss = self.criterion(s_rel, rels)
return arc_loss + rel_loss
def decode(self, s_arc: torch.Tensor, s_rel: torch.Tensor,
mask: torch.Tensor,
tree: bool = False, proj: bool = False) -> Tuple[torch.Tensor, torch.Tensor]:
r"""
Args:
s_arc (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all possible arcs.
s_rel (~torch.Tensor): ``[batch_size, seq_len, seq_len, n_labels]``.
Scores of all possible labels on each arc.
mask (~torch.BoolTensor): ``[batch_size, seq_len]``.
The mask for covering the unpadded tokens.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
Returns:
~torch.Tensor, ~torch.Tensor:
Predicted arcs and labels of shape ``[batch_size, seq_len]``.
"""
lens = mask.sum(1)
# prevent self-loops
s_arc.diagonal(0, 1, 2).fill_(float('-inf'))
# select the most likely arcs
arc_preds = s_arc.argmax(-1)
if tree:
# ensure the arcs form a tree
bad = [not CoNLL.istree(seq[1:i+1], proj)
for i, seq in zip(lens.tolist(), arc_preds.tolist())]
if any(bad):
alg = eisner if proj else mst
arc_preds[bad] = alg(s_arc[bad], mask[bad])
# select the most likely rels
rel_preds = s_rel.argmax(-1)
# choose those corresponding to the predicted arcs
rel_preds = rel_preds.gather(-1, arc_preds.unsqueeze(-1)).squeeze(-1)
return arc_preds, rel_preds
| 15,848 | 42.541209 | 117 | py |
diaparser | diaparser-master/diaparser/utils/embedding.py | # -*- coding: utf-8 -*-
import torch
class Embedding():
def __init__(self, tokens, vectors, unk=None):
self.tokens = tokens
self.vectors = torch.tensor(vectors)
self.pretrained = {w: v for w, v in zip(tokens, vectors)}
self.unk = unk
def __len__(self):
return len(self.tokens)
def __contains__(self, token):
return token in self.pretrained
@property
def dim(self):
return self.vectors.size(1)
@property
def unk_index(self):
if self.unk is not None:
return self.tokens.index(self.unk)
else:
raise AttributeError
@classmethod
def load(cls, path, unk=None):
with open(path, 'r') as f:
lines = [line for line in f]
splits = [line.split() for line in lines]
tokens, vectors = zip(*[(s[0], list(map(float, s[1:])))
for s in splits])
return cls(tokens, vectors, unk=unk)
| 982 | 23.575 | 65 | py |
diaparser | diaparser-master/diaparser/utils/field.py | # -*- coding: utf-8 -*-
from collections import Counter
from ..utils.fn import pad
from ..utils.vocab import Vocab, FieldVocab
import torch
from typing import List
class RawField():
r"""
Defines a general datatype.
A :class:`RawField` object does not assume any property of the datatype and
it holds parameters relating to how a datatype should be processed.
Args:
name (str):
The name of the field.
fn (function):
The function used for preprocessing the examples. Default: ``None``.
"""
def __init__(self, name, fn=None):
self.name = name
self.fn = fn
def __repr__(self):
return f"({self.name}): {self.__class__.__name__}()"
def preprocess(self, sequence):
return self.fn(sequence) if self.fn is not None else sequence
def transform(self, sequences):
return [self.preprocess(seq) for seq in sequences]
def compose(self, sequences):
return sequences
class Field(RawField):
r"""
Defines a datatype together with instructions for converting to :class:`~torch.Tensor`.
:class:`Field` models common text processing datatypes that can be represented by tensors.
It holds a :class:`Vocab` object that defines the set of possible values
for elements of the field and their corresponding numerical representations.
The :class:`Field` object also holds other parameters relating to how a datatype
should be numericalized, such as a tokenization method.
Args:
name (str):
The name of the field.
pad_token (str):
The string token used as padding. Default: ``None``.
unk_token (str):
The string token used to represent OOV words. Default: ``None``.
bos_token (str):
A token that will be prepended to every example using this field, or ``None`` for no `bos_token`.
Default: ``None``.
eos_token (str):
A token that will be appended to every example using this field, or ``None`` for no `eos_token`.
lower (bool):
Whether to lowercase the text in this field. Default: ``False``.
use_vocab (bool):
Whether to use a :class:`Vocab` object. If ``False``, the data in this field should already be numerical.
Default: ``True``.
tokenize (function):
The function used to tokenize strings using this field into sequential examples. Default: ``None``.
fn (function):
The function used for preprocessing the examples. Default: ``None``.
"""
def __init__(self, name, pad=None, unk=None, bos=None, eos=None,
lower=False, use_vocab=True, tokenize=None, fn=None, mask_token_id=0):
self.name = name
self.pad = pad
self.unk = unk
self.bos = bos
self.eos = eos
self.lower = lower
self.use_vocab = use_vocab
self.tokenize = tokenize
self.fn = fn
self.mask_token_id = mask_token_id # Attardi
self.specials = [token for token in [pad, unk, bos, eos]
if token is not None]
def __repr__(self):
s, params = f"({self.name}): {self.__class__.__name__}(", []
if self.pad is not None:
params.append(f"pad={self.pad}")
if self.unk is not None:
params.append(f"unk={self.unk}")
if self.bos is not None:
params.append(f"bos={self.bos}")
if self.eos is not None:
params.append(f"eos={self.eos}")
if self.lower:
params.append(f"lower={self.lower}")
if not self.use_vocab:
params.append(f"use_vocab={self.use_vocab}")
s += ", ".join(params)
s += ")"
return s
@property
def pad_index(self):
if self.pad is None:
return 0
if hasattr(self, 'vocab'):
return self.vocab[self.pad]
return self.specials.index(self.pad)
@property
def unk_index(self):
if self.unk is None:
return 0
if hasattr(self, 'vocab'):
return self.vocab[self.unk]
return self.specials.index(self.unk)
@property
def bos_index(self):
if hasattr(self, 'vocab'):
return self.vocab[self.bos] if self.bos else 0
return self.specials.index(self.bos) if self.bos else 0
@property
def eos_index(self):
if hasattr(self, 'vocab'):
return self.vocab[self.eos] if self.eos else 0
return self.specials.index(self.eos) if self.eos else 0
@property
def device(self):
return 'cuda' if torch.cuda.is_available() else 'cpu'
def preprocess(self, sequence):
r"""
Loads a single example using this field, tokenizing if necessary.
The sequence will be first passed to ``fn`` if available.
If ``tokenize`` is not None, the input will be tokenized.
Then the input will be lowercased optionally.
Args:
sequence (list):
The sequence to be preprocessed.
Returns:
A list of preprocessed sequence.
"""
if self.fn is not None:
sequence = self.fn(sequence)
if self.tokenize is not None:
sequence = self.tokenize(sequence)
if self.lower:
sequence = [str.lower(token) for token in sequence]
return sequence
def build(self, dataset, min_freq=1, embed=None):
r"""
Constructs a :class:`Vocab` object for this field from the dataset.
If the vocabulary has already existed, this function will have no effect.
Args:
dataset (Dataset):
A :class:`Dataset` object. One of the attributes should be named after the name of this field.
min_freq (int):
The minimum frequency needed to include a token in the vocabulary. Default: 1.
embed (Embedding):
An Embedding object, words in which will be extended to the vocabulary. Default: ``None``.
"""
if hasattr(self, 'vocab'):
return
sequences = getattr(dataset, self.name)
counter = Counter(token
for seq in sequences
for token in self.preprocess(seq))
self.vocab = Vocab(counter, min_freq, self.specials, self.unk_index)
if not embed:
self.embed = None
else:
tokens = self.preprocess(embed.tokens)
# if the `unk` token was present in the pretrained,
# then replace it with a self-defined one
if embed.unk:
tokens[embed.unk_index] = self.unk
self.vocab.extend(tokens)
self.embed = torch.zeros(len(self.vocab), embed.dim)
self.embed[self.vocab[tokens]] = embed.vectors
self.embed /= torch.std(self.embed)
def transform(self, sequences: List[List[str]]) -> List[torch.Tensor]:
r"""
Turns a list of sequences that use this field into tensors.
Each sequence is first preprocessed and then numericalized if needed.
Args:
sequences (list[list[str]]):
A list of sequences.
Returns:
A list of tensors transformed from the input sequences.
"""
sequences = [self.preprocess(seq) for seq in sequences]
if self.use_vocab:
sequences = [self.vocab[seq] for seq in sequences]
if self.bos:
sequences = [[self.bos_index] + seq for seq in sequences]
if self.eos:
sequences = [seq + [self.eos_index] for seq in sequences]
sequences = [torch.tensor(seq) for seq in sequences]
return sequences
def compose(self, sequences):
r"""
Composes a batch of sequences into a padded tensor.
Args:
sequences (list[~torch.Tensor]):
A list of tensors.
Returns:
A padded tensor converted to proper device.
"""
return pad(sequences, self.pad_index).to(self.device)
class SubwordField(Field):
r"""
A field that conducts tokenization and numericalization over each token rather the sequence.
This is customized for models requiring character/subword-level inputs, e.g., CharLSTM and BERT.
Args:
fix_len (int):
A fixed length that all subword pieces will be padded to.
This is used for truncating the subword pieces that exceed the length.
To save the memory, the final length will be the smaller value
between the max length of subword pieces in a batch and `fix_len`.
Examples:
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
>>> field = SubwordField('bert',
pad=tokenizer.pad_token,
unk=tokenizer.unk_token,
bos=tokenizer.cls_token,
eos=tokenizer.sep_token,
fix_len=20,
tokenize=tokenizer.tokenize)
>>> field.vocab = tokenizer.get_vocab() # no need to re-build the vocab
>>> field.transform([['This', 'field', 'performs', 'token-level', 'tokenization']])[0]
tensor([[ 101, 0, 0],
[ 1188, 0, 0],
[ 1768, 0, 0],
[10383, 0, 0],
[22559, 118, 1634],
[22559, 2734, 0],
[ 102, 0, 0]])
"""
def __init__(self, *args, fix_len=0, **kwargs):
self.fix_len = fix_len
super().__init__(*args, **kwargs)
def build(self, dataset, min_freq=1, embed=None):
sequences = getattr(dataset, self.name)
counter = Counter(piece
for seq in sequences
for token in seq
for piece in self.preprocess(token))
self.vocab = Vocab(counter, min_freq, self.specials, self.unk_index)
if not embed:
self.embed = None
else:
tokens = self.preprocess(embed.tokens)
# if the `unk` token has existed in the pretrained,
# then replace it with a self-defined one
if embed.unk:
tokens[embed.unk_index] = self.unk
self.vocab.extend(tokens)
self.embed = torch.zeros(len(self.vocab), embed.dim)
self.embed[self.vocab[tokens]] = embed.vectors
def transform(self, sequences):
sequences = [[self.preprocess(token) for token in seq]
for seq in sequences]
if self.fix_len <= 0:
self.fix_len = max(len(token)
for seq in sequences
for token in seq)
if self.use_vocab:
sequences = [[[self.vocab[i] for i in token] for token in seq]
for seq in sequences]
if self.bos:
sequences = [[[self.bos_index]] + seq for seq in sequences]
if self.eos:
sequences = [seq + [[self.eos_index]] for seq in sequences]
lens = [min(self.fix_len, max(len(ids) for ids in seq)) for seq in sequences]
sequences = [pad([torch.tensor(ids[:i]) for ids in seq], self.pad_index, i)
for i, seq in zip(lens, sequences)]
return sequences
class BertField(SubwordField):
r"""
A field that is dealt by a transformer.
Args:
name (str): name of the field.
tokenizer (AutoTokenizer): the tokenizer for the transformer.
fix_len (int):
A fixed length that all subword pieces will be padded to.
This is used for truncating the subword pieces that exceed the length.
To save the memory, the final length will be the smaller value
between the max length of subword pieces in a batch and `fix_len`.
Examples:
>>> tokenizer = BertField.tokenizer('bert-base-cased')
>>> field = BertField('bert',
tokenizer,
fix_len=20)
>>> field.transform([['This', 'field', 'performs', 'token-level', 'tokenization']])[0]
tensor([[ 101, 0, 0],
[ 1188, 0, 0],
[ 1768, 0, 0],
[10383, 0, 0],
[22559, 118, 1634],
[22559, 2734, 0],
[ 102, 0, 0]])
"""
def __init__(self, name, tokenizer, **kwargs):
if hasattr(tokenizer, 'vocab'):
self.vocab = tokenizer.get_vocab()
else:
self.vocab = FieldVocab(tokenizer.unk_token_id,
{tokenizer._convert_id_to_token(i): i
for i in range(len(tokenizer))})
super().__init__(name,
pad=tokenizer.pad_token,
unk=tokenizer.unk_token,
bos=tokenizer.bos_token or tokenizer.cls_token,
mask_token_id=tokenizer.mask_token_id,
tokenize=tokenizer.tokenize,
**kwargs)
def build(self, dataset):
"""
Pretrained: nothing to be done.
"""
return
@classmethod
def tokenizer(cls, name):
"""
Create an instance of tokenizer from either path or name.
:param name: path or name of tokenizer.
"""
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(name)
tokenizer.bos_token = tokenizer.bos_token or tokenizer.cls_token
tokenizer.eos_token = tokenizer.eos_token or tokenizer.sep_token
return tokenizer
class ChartField(Field):
r"""
Field dealing with constituency trees.
This field receives sequences of binarized trees factorized in pre-order,
and returns charts filled with labels on each constituent.
Examples:
>>> sequence = [(0, 5, 'S'), (0, 4, 'S|<>'), (0, 1, 'NP'), (1, 4, 'VP'), (1, 2, 'VP|<>'),
(2, 4, 'S+VP'), (2, 3, 'VP|<>'), (3, 4, 'NP'), (4, 5, 'S|<>')]
>>> field.transform([sequence])[0]
tensor([[ -1, 37, -1, -1, 107, 79],
[ -1, -1, 120, -1, 112, -1],
[ -1, -1, -1, 120, 86, -1],
[ -1, -1, -1, -1, 37, -1],
[ -1, -1, -1, -1, -1, 107],
[ -1, -1, -1, -1, -1, -1]])
"""
def build(self, dataset, min_freq=1):
counter = Counter(label
for seq in getattr(dataset, self.name)
for i, j, label in self.preprocess(seq))
self.vocab = Vocab(counter, min_freq, self.specials, self.unk_index)
def transform(self, sequences):
charts = []
for sequence in sequences:
sequence = self.preprocess(sequence)
seq_len = sequence[0][1] + 1
chart = torch.full((seq_len, seq_len), -1, dtype=torch.long)
for i, j, label in sequence:
chart[i, j] = self.vocab[label]
charts.append(chart)
return charts
| 15,427 | 35.733333 | 117 | py |
diaparser | diaparser-master/diaparser/utils/alg.py | # -*- coding: utf-8 -*-
import torch
from ..utils.fn import pad, stripe
def kmeans(x, k, max_it=32):
r"""
KMeans algorithm for clustering the sentences by length.
Args:
x (list[int]):
The list of sentence lengths.
k (int):
The number of clusters.
This is an approximate value. The final number of clusters can be less or equal to `k`.
max_it (int):
Maximum number of iterations.
If centroids does not converge after several iterations, the algorithm will be early stopped.
Returns:
list[float], list[list[int]]:
The first list contains average lengths of sentences in each cluster.
The second is the list of clusters holding indices of data points.
Examples:
>>> x = torch.randint(10,20,(10,)).tolist()
>>> x
[15, 10, 17, 11, 18, 13, 17, 19, 18, 14]
>>> centroids, clusters = kmeans(x, 3)
>>> centroids
[10.5, 14.0, 17.799999237060547]
>>> clusters
[[1, 3], [0, 5, 9], [2, 4, 6, 7, 8]]
"""
# the number of clusters must not be greater than the number of datapoints
x, k = torch.tensor(x, dtype=torch.float), min(len(x), k)
# collect unique datapoints
d = x.unique()
# initialize k centroids randomly
c = d[torch.randperm(len(d))[:k]]
# assign each datapoint to the cluster with the closest centroid
dists, y = torch.abs_(x.unsqueeze(-1) - c).min(-1)
for _ in range(max_it):
# if an empty cluster is encountered,
# choose the farthest datapoint from the biggest cluster and move that the empty one
mask = torch.arange(k).unsqueeze(-1).eq(y)
none = torch.where(~mask.any(-1))[0].tolist()
while len(none) > 0:
for i in none:
# the biggest cluster
b = torch.where(mask[mask.sum(-1).argmax()])[0]
# the datapoint farthest from the centroid of cluster b
f = dists[b].argmax()
# update the assigned cluster of f
y[b[f]] = i
# re-calculate the mask
mask = torch.arange(k).unsqueeze(-1).eq(y)
none = torch.where(~mask.any(-1))[0].tolist()
# update the centroids
c, old = (x * mask).sum(-1) / mask.sum(-1), c
# re-assign all datapoints to clusters
dists, y = torch.abs_(x.unsqueeze(-1) - c).min(-1)
# stop iteration early if the centroids converge
if c.equal(old):
break
# assign all datapoints to the new-generated clusters
# the empty ones are discarded
assigned = y.unique().tolist()
# get the centroids of the assigned clusters
centroids = c[assigned].tolist()
# map all values of datapoints to buckets
clusters = [torch.where(y.eq(i))[0].tolist() for i in assigned]
return centroids, clusters
def tarjan(sequence):
r"""
Tarjan algorithm for finding Strongly Connected Components (SCCs) of a graph.
Args:
sequence (list):
List of head indices.
Yields:
A list of indices that make up a SCC. All self-loops are ignored.
Examples:
>>> next(tarjan([2, 5, 0, 3, 1])) # (1 -> 5 -> 2 -> 1) is a cycle
[2, 5, 1]
"""
sequence = [-1] + sequence
# record the search order, i.e., the timestep
dfn = [-1] * len(sequence)
# record the the smallest timestep in a SCC
low = [-1] * len(sequence)
# push the visited into the stack
stack, onstack = [], [False] * len(sequence)
def connect(i, timestep):
dfn[i] = low[i] = timestep[0]
timestep[0] += 1
stack.append(i)
onstack[i] = True
for j, head in enumerate(sequence):
if head != i:
continue
if dfn[j] == -1:
yield from connect(j, timestep)
low[i] = min(low[i], low[j])
elif onstack[j]:
low[i] = min(low[i], dfn[j])
# a SCC is completed
if low[i] == dfn[i]:
cycle = [stack.pop()]
while cycle[-1] != i:
onstack[cycle[-1]] = False
cycle.append(stack.pop())
onstack[i] = False
# ignore the self-loop
if len(cycle) > 1:
yield cycle
timestep = [0]
for i in range(len(sequence)):
if dfn[i] == -1:
yield from connect(i, timestep)
def chuliu_edmonds(s):
r"""
ChuLiu/Edmonds algorithm for non-projective decoding.
Some code is borrowed from `tdozat's implementation`_.
Descriptions of notations and formulas can be found in
`Non-projective Dependency Parsing using Spanning Tree Algorithms`_.
Notes:
The algorithm does not guarantee to parse a single-root tree.
References:
- Ryan McDonald, Fernando Pereira, Kiril Ribarov and Jan Hajic. 2005.
`Non-projective Dependency Parsing using Spanning Tree Algorithms`_.
Args:
s (~torch.Tensor): ``[seq_len, seq_len]``.
Scores of all dependent-head pairs.
Returns:
~torch.Tensor:
A tensor with shape ``[seq_len]`` for the resulting non-projective parse tree.
.. _tdozat's implementation:
https://github.com/tdozat/Parser-v3
.. _Non-projective Dependency Parsing using Spanning Tree Algorithms:
https://www.aclweb.org/anthology/H05-1066/
"""
s[0, 1:] = float('-inf')
# prevent self-loops
s.diagonal()[1:].fill_(float('-inf'))
# select heads with highest scores
tree = s.argmax(-1)
# return the cycle finded by tarjan algorithm lazily
cycle = next(tarjan(tree.tolist()[1:]), None)
# if the tree has no cycles, then it is a MST
if not cycle:
return tree
# indices of cycle in the original tree
cycle = torch.tensor(cycle)
# indices of noncycle in the original tree
noncycle = torch.ones(len(s)).index_fill_(0, cycle, 0)
noncycle = torch.where(noncycle.gt(0))[0]
def contract(s):
# heads of cycle in original tree
cycle_heads = tree[cycle]
# scores of cycle in original tree
s_cycle = s[cycle, cycle_heads]
# calculate the scores of cycle's potential dependents
# s(c->x) = max(s(x'->x)), x in noncycle and x' in cycle
s_dep = s[noncycle][:, cycle]
# find the best cycle head for each noncycle dependent
deps = s_dep.argmax(1)
# calculate the scores of cycle's potential heads
# s(x->c) = max(s(x'->x) - s(a(x')->x') + s(cycle)), x in noncycle and x' in cycle
# a(v) is the predecessor of v in cycle
# s(cycle) = sum(s(a(v)->v))
s_head = s[cycle][:, noncycle] - s_cycle.view(-1, 1) + s_cycle.sum()
# find the best noncycle head for each cycle dependent
heads = s_head.argmax(0)
contracted = torch.cat((noncycle, torch.tensor([-1])))
# calculate the scores of contracted graph
s = s[contracted][:, contracted]
# set the contracted graph scores of cycle's potential dependents
s[:-1, -1] = s_dep[range(len(deps)), deps]
# set the contracted graph scores of cycle's potential heads
s[-1, :-1] = s_head[heads, range(len(heads))]
return s, heads, deps
# keep track of the endpoints of the edges into and out of cycle for reconstruction later
s, heads, deps = contract(s)
# y is the contracted tree
y = chuliu_edmonds(s)
# exclude head of cycle from y
y, cycle_head = y[:-1], y[-1]
# fix the subtree with no heads coming from the cycle
# len(y) denotes heads coming from the cycle
subtree = y < len(y)
# add the nodes to the new tree
tree[noncycle[subtree]] = noncycle[y[subtree]]
# fix the subtree with heads coming from the cycle
subtree = ~subtree
# add the nodes to the tree
tree[noncycle[subtree]] = cycle[deps[subtree]]
# fix the root of the cycle
cycle_root = heads[cycle_head]
# break the cycle and add the root of the cycle to the tree
tree[cycle[cycle_root]] = noncycle[cycle_head]
return tree
def mst(scores, mask, multiroot=False):
r"""
MST algorithm for decoding non-pojective trees.
This is a wrapper for ChuLiu/Edmonds algorithm.
The algorithm first runs ChuLiu/Edmonds to parse a tree and then have a check of multi-roots,
If ``multiroot=True`` and there indeed exist multi-roots, the algorithm seeks to find
best single-root trees by iterating all possible single-root trees parsed by ChuLiu/Edmonds.
Otherwise the resulting trees are directly taken as the final outputs.
Args:
scores (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all dependent-head pairs.
mask (~torch.BoolTensor): ``[batch_size, seq_len]``.
The mask to avoid parsing over padding tokens.
The first column serving as pseudo words for roots should be ``False``.
muliroot (bool):
Ensures to parse a single-root tree If ``False``.
Returns:
~torch.Tensor:
A tensor with shape ``[batch_size, seq_len]`` for the resulting non-projective parse trees.
Examples:
>>> scores = torch.tensor([[[-11.9436, -13.1464, -6.4789, -13.8917],
[-60.6957, -60.2866, -48.6457, -63.8125],
[-38.1747, -49.9296, -45.2733, -49.5571],
[-19.7504, -23.9066, -9.9139, -16.2088]]])
>>> scores[:, 0, 1:] = float('-inf')
>>> scores.diagonal(0, 1, 2)[1:].fill_(float('-inf'))
>>> mask = torch.tensor([[False, True, True, True]])
>>> mst(scores, mask)
tensor([[0, 2, 0, 2]])
"""
batch_size, seq_len, _ = scores.shape
scores = scores.cpu().unbind()
preds = []
for i, length in enumerate(mask.sum(1).tolist()):
s = scores[i][:length+1, :length+1]
tree = chuliu_edmonds(s)
roots = torch.where(tree[1:].eq(0))[0] + 1
if not multiroot and len(roots) > 1:
s_root = s[:, 0]
s_best = float('-inf')
s = s.index_fill(1, torch.tensor(0), float('-inf'))
for root in roots:
s[:, 0] = float('-inf')
s[root, 0] = s_root[root]
t = chuliu_edmonds(s)
s_tree = s[1:].gather(1, t[1:].unsqueeze(-1)).sum()
if s_tree > s_best:
s_best, tree = s_tree, t
preds.append(tree)
return pad(preds, total_length=seq_len).to(mask.device)
def eisner(scores, mask):
r"""
First-order Eisner algorithm for projective decoding.
References:
- Ryan McDonald, Koby Crammer and Fernando Pereira. 2005.
`Online Large-Margin Training of Dependency Parsers`_.
Args:
scores (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all dependent-head pairs.
mask (~torch.BoolTensor): ``[batch_size, seq_len]``.
The mask to avoid parsing over padding tokens.
The first column serving as pseudo words for roots should be ``False``.
Returns:
~torch.Tensor:
A tensor with shape ``[batch_size, seq_len]`` for the resulting projective parse trees.
Examples:
>>> scores = torch.tensor([[[-13.5026, -18.3700, -13.0033, -16.6809],
[-36.5235, -28.6344, -28.4696, -31.6750],
[ -2.9084, -7.4825, -1.4861, -6.8709],
[-29.4880, -27.6905, -26.1498, -27.0233]]])
>>> mask = torch.tensor([[False, True, True, True]])
>>> eisner(scores, mask)
tensor([[0, 2, 0, 2]])
.. _Online Large-Margin Training of Dependency Parsers:
https://www.aclweb.org/anthology/P05-1012/
"""
lens = mask.sum(1)
batch_size, seq_len, _ = scores.shape
scores = scores.permute(2, 1, 0)
s_i = torch.full_like(scores, float('-inf'))
s_c = torch.full_like(scores, float('-inf'))
p_i = scores.new_zeros(seq_len, seq_len, batch_size).long()
p_c = scores.new_zeros(seq_len, seq_len, batch_size).long()
s_c.diagonal().fill_(0)
for w in range(1, seq_len):
n = seq_len - w
starts = p_i.new_tensor(range(n)).unsqueeze(0)
# ilr = C(i->r) + C(j->r+1)
ilr = stripe(s_c, n, w) + stripe(s_c, n, w, (w, 1))
# [batch_size, n, w]
il = ir = ilr.permute(2, 0, 1)
# I(j->i) = max(C(i->r) + C(j->r+1) + s(j->i)), i <= r < j
il_span, il_path = il.max(-1)
s_i.diagonal(-w).copy_(il_span + scores.diagonal(-w))
p_i.diagonal(-w).copy_(il_path + starts)
# I(i->j) = max(C(i->r) + C(j->r+1) + s(i->j)), i <= r < j
ir_span, ir_path = ir.max(-1)
s_i.diagonal(w).copy_(ir_span + scores.diagonal(w))
p_i.diagonal(w).copy_(ir_path + starts)
# C(j->i) = max(C(r->i) + I(j->r)), i <= r < j
cl = stripe(s_c, n, w, (0, 0), 0) + stripe(s_i, n, w, (w, 0))
cl_span, cl_path = cl.permute(2, 0, 1).max(-1)
s_c.diagonal(-w).copy_(cl_span)
p_c.diagonal(-w).copy_(cl_path + starts)
# C(i->j) = max(I(i->r) + C(r->j)), i < r <= j
cr = stripe(s_i, n, w, (0, 1)) + stripe(s_c, n, w, (1, w), 0)
cr_span, cr_path = cr.permute(2, 0, 1).max(-1)
s_c.diagonal(w).copy_(cr_span)
s_c[0, w][lens.ne(w)] = float('-inf')
p_c.diagonal(w).copy_(cr_path + starts + 1)
def backtrack(p_i, p_c, heads, i, j, complete):
if i == j:
return
if complete:
r = p_c[i, j]
backtrack(p_i, p_c, heads, i, r, False)
backtrack(p_i, p_c, heads, r, j, True)
else:
r, heads[j] = p_i[i, j], i
i, j = sorted((i, j))
backtrack(p_i, p_c, heads, i, r, True)
backtrack(p_i, p_c, heads, j, r + 1, True)
preds = []
p_c = p_c.permute(2, 0, 1).cpu()
p_i = p_i.permute(2, 0, 1).cpu()
for i, length in enumerate(lens.tolist()):
heads = p_c.new_zeros(length + 1, dtype=torch.long)
backtrack(p_i[i], p_c[i], heads, 0, length, True)
preds.append(heads.to(mask.device))
return pad(preds, total_length=seq_len).to(mask.device)
def eisner2o(scores, mask):
r"""
Second-order Eisner algorithm for projective decoding.
This is an extension of the first-order one that further incorporates sibling scores into tree scoring.
References:
- Ryan McDonald and Fernando Pereira. 2006.
`Online Learning of Approximate Dependency Parsing Algorithms`_.
Args:
scores (~torch.Tensor, ~torch.Tensor):
A tuple of two tensors representing the first-order and second-order scores repectively.
The first (``[batch_size, seq_len, seq_len]``) holds scores of all dependent-head pairs.
The second (``[batch_size, seq_len, seq_len, seq_len]``) holds scores of all dependent-head-sibling triples.
mask (~torch.BoolTensor): ``[batch_size, seq_len]``.
The mask to avoid parsing over padding tokens.
The first column serving as pseudo words for roots should be ``False``.
Returns:
~torch.Tensor:
A tensor with shape ``[batch_size, seq_len]`` for the resulting projective parse trees.
Examples:
>>> s_arc = torch.tensor([[[ -2.8092, -7.9104, -0.9414, -5.4360],
[-10.3494, -7.9298, -3.6929, -7.3985],
[ 1.1815, -3.8291, 2.3166, -2.7183],
[ -3.9776, -3.9063, -1.6762, -3.1861]]])
>>> s_sib = torch.tensor([[[[ 0.4719, 0.4154, 1.1333, 0.6946],
[ 1.1252, 1.3043, 2.1128, 1.4621],
[ 0.5974, 0.5635, 1.0115, 0.7550],
[ 1.1174, 1.3794, 2.2567, 1.4043]],
[[-2.1480, -4.1830, -2.5519, -1.8020],
[-1.2496, -1.7859, -0.0665, -0.4938],
[-2.6171, -4.0142, -2.9428, -2.2121],
[-0.5166, -1.0925, 0.5190, 0.1371]],
[[ 0.5827, -1.2499, -0.0648, -0.0497],
[ 1.4695, 0.3522, 1.5614, 1.0236],
[ 0.4647, -0.7996, -0.3801, 0.0046],
[ 1.5611, 0.3875, 1.8285, 1.0766]],
[[-1.3053, -2.9423, -1.5779, -1.2142],
[-0.1908, -0.9699, 0.3085, 0.1061],
[-1.6783, -2.8199, -1.8853, -1.5653],
[ 0.3629, -0.3488, 0.9011, 0.5674]]]])
>>> mask = torch.tensor([[False, True, True, True]])
>>> eisner2o((s_arc, s_sib), mask)
tensor([[0, 2, 0, 2]])
.. _Online Learning of Approximate Dependency Parsing Algorithms:
https://www.aclweb.org/anthology/E06-1011/
"""
# the end position of each sentence in a batch
lens = mask.sum(1)
s_arc, s_sib = scores
batch_size, seq_len, _ = s_arc.shape
# [seq_len, seq_len, batch_size]
s_arc = s_arc.permute(2, 1, 0)
# [seq_len, seq_len, seq_len, batch_size]
s_sib = s_sib.permute(2, 1, 3, 0)
s_i = torch.full_like(s_arc, float('-inf'))
s_s = torch.full_like(s_arc, float('-inf'))
s_c = torch.full_like(s_arc, float('-inf'))
p_i = s_arc.new_zeros(seq_len, seq_len, batch_size).long()
p_s = s_arc.new_zeros(seq_len, seq_len, batch_size).long()
p_c = s_arc.new_zeros(seq_len, seq_len, batch_size).long()
s_c.diagonal().fill_(0)
for w in range(1, seq_len):
# n denotes the number of spans to iterate,
# from span (0, w) to span (n, n+w) given width w
n = seq_len - w
starts = p_i.new_tensor(range(n)).unsqueeze(0)
# I(j->i) = max(I(j->r) + S(j->r, i)), i < r < j |
# C(j->j) + C(i->j-1))
# + s(j->i)
# [n, w, batch_size]
il = stripe(s_i, n, w, (w, 1)) + stripe(s_s, n, w, (1, 0), 0)
il += stripe(s_sib[range(w, n+w), range(n)], n, w, (0, 1))
# [n, 1, batch_size]
il0 = stripe(s_c, n, 1, (w, w)) + stripe(s_c, n, 1, (0, w - 1))
# il0[0] are set to zeros since the scores of the complete spans starting from 0 are always -inf
il[:, -1] = il0.index_fill_(0, lens.new_tensor(0), 0).squeeze(1)
il_span, il_path = il.permute(2, 0, 1).max(-1)
s_i.diagonal(-w).copy_(il_span + s_arc.diagonal(-w))
p_i.diagonal(-w).copy_(il_path + starts + 1)
# I(i->j) = max(I(i->r) + S(i->r, j), i < r < j |
# C(i->i) + C(j->i+1))
# + s(i->j)
# [n, w, batch_size]
ir = stripe(s_i, n, w) + stripe(s_s, n, w, (0, w), 0)
ir += stripe(s_sib[range(n), range(w, n+w)], n, w)
ir[0] = float('-inf')
# [n, 1, batch_size]
ir0 = stripe(s_c, n, 1) + stripe(s_c, n, 1, (w, 1))
ir[:, 0] = ir0.squeeze(1)
ir_span, ir_path = ir.permute(2, 0, 1).max(-1)
s_i.diagonal(w).copy_(ir_span + s_arc.diagonal(w))
p_i.diagonal(w).copy_(ir_path + starts)
# [n, w, batch_size]
slr = stripe(s_c, n, w) + stripe(s_c, n, w, (w, 1))
slr_span, slr_path = slr.permute(2, 0, 1).max(-1)
# S(j, i) = max(C(i->r) + C(j->r+1)), i <= r < j
s_s.diagonal(-w).copy_(slr_span)
p_s.diagonal(-w).copy_(slr_path + starts)
# S(i, j) = max(C(i->r) + C(j->r+1)), i <= r < j
s_s.diagonal(w).copy_(slr_span)
p_s.diagonal(w).copy_(slr_path + starts)
# C(j->i) = max(C(r->i) + I(j->r)), i <= r < j
cl = stripe(s_c, n, w, (0, 0), 0) + stripe(s_i, n, w, (w, 0))
cl_span, cl_path = cl.permute(2, 0, 1).max(-1)
s_c.diagonal(-w).copy_(cl_span)
p_c.diagonal(-w).copy_(cl_path + starts)
# C(i->j) = max(I(i->r) + C(r->j)), i < r <= j
cr = stripe(s_i, n, w, (0, 1)) + stripe(s_c, n, w, (1, w), 0)
cr_span, cr_path = cr.permute(2, 0, 1).max(-1)
s_c.diagonal(w).copy_(cr_span)
# disable multi words to modify the root
s_c[0, w][lens.ne(w)] = float('-inf')
p_c.diagonal(w).copy_(cr_path + starts + 1)
def backtrack(p_i, p_s, p_c, heads, i, j, flag):
if i == j:
return
if flag == 'c':
r = p_c[i, j]
backtrack(p_i, p_s, p_c, heads, i, r, 'i')
backtrack(p_i, p_s, p_c, heads, r, j, 'c')
elif flag == 's':
r = p_s[i, j]
i, j = sorted((i, j))
backtrack(p_i, p_s, p_c, heads, i, r, 'c')
backtrack(p_i, p_s, p_c, heads, j, r + 1, 'c')
elif flag == 'i':
r, heads[j] = p_i[i, j], i
if r == i:
r = i + 1 if i < j else i - 1
backtrack(p_i, p_s, p_c, heads, j, r, 'c')
else:
backtrack(p_i, p_s, p_c, heads, i, r, 'i')
backtrack(p_i, p_s, p_c, heads, r, j, 's')
preds = []
p_i = p_i.permute(2, 0, 1).cpu()
p_s = p_s.permute(2, 0, 1).cpu()
p_c = p_c.permute(2, 0, 1).cpu()
for i, length in enumerate(lens.tolist()):
heads = p_c.new_zeros(length + 1, dtype=torch.long)
backtrack(p_i[i], p_s[i], p_c[i], heads, 0, length, 'c')
preds.append(heads.to(mask.device))
return pad(preds, total_length=seq_len).to(mask.device)
def cky(scores, mask):
r"""
The implementation of `Cocke-Kasami-Younger`_ (CKY) algorithm to parse constituency trees.
References:
- Yu Zhang, Houquan Zhou and Zhenghua Li. 2020.
`Fast and Accurate Neural CRF Constituency Parsing`_.
Args:
scores (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all candidate constituents.
mask (~torch.BoolTensor): ``[batch_size, seq_len, seq_len]``.
The mask to avoid parsing over padding tokens.
For each square matrix in a batch, the positions except upper triangular part should be masked out.
Returns:
Sequences of factorized predicted bracketed trees that are traversed in pre-order.
Examples:
>>> scores = torch.tensor([[[ 2.5659, 1.4253, -2.5272, 3.3011],
[ 1.3687, -0.5869, 1.0011, 3.3020],
[ 1.2297, 0.4862, 1.1975, 2.5387],
[-0.0511, -1.2541, -0.7577, 0.2659]]])
>>> mask = torch.tensor([[[False, True, True, True],
[False, False, True, True],
[False, False, False, True],
[False, False, False, False]]])
>>> cky(scores, mask)
[[(0, 3), (0, 1), (1, 3), (1, 2), (2, 3)]]
.. _Cocke-Kasami-Younger:
https://en.wikipedia.org/wiki/CYK_algorithm
.. _Fast and Accurate Neural CRF Constituency Parsing:
https://www.ijcai.org/Proceedings/2020/560/
"""
lens = mask[:, 0].sum(-1)
scores = scores.permute(1, 2, 0)
seq_len, seq_len, batch_size = scores.shape
s = scores.new_zeros(seq_len, seq_len, batch_size)
p = scores.new_zeros(seq_len, seq_len, batch_size).long()
for w in range(1, seq_len):
n = seq_len - w
starts = p.new_tensor(range(n)).unsqueeze(0)
if w == 1:
s.diagonal(w).copy_(scores.diagonal(w))
continue
# [n, w, batch_size]
s_span = stripe(s, n, w-1, (0, 1)) + stripe(s, n, w-1, (1, w), 0)
# [batch_size, n, w]
s_span = s_span.permute(2, 0, 1)
# [batch_size, n]
s_span, p_span = s_span.max(-1)
s.diagonal(w).copy_(s_span + scores.diagonal(w))
p.diagonal(w).copy_(p_span + starts + 1)
def backtrack(p, i, j):
if j == i + 1:
return [(i, j)]
split = p[i][j]
ltree = backtrack(p, i, split)
rtree = backtrack(p, split, j)
return [(i, j)] + ltree + rtree
p = p.permute(2, 0, 1).tolist()
trees = [backtrack(p[i], 0, length) for i, length in enumerate(lens.tolist())]
return trees
| 24,571 | 39.150327 | 120 | py |
diaparser | diaparser-master/diaparser/utils/fn.py | # -*- coding: utf-8 -*-
import unicodedata
def ispunct(token):
return all(unicodedata.category(char).startswith('P')
for char in token)
def isfullwidth(token):
return all(unicodedata.east_asian_width(char) in ['W', 'F', 'A']
for char in token)
def islatin(token):
return all('LATIN' in unicodedata.name(char)
for char in token)
def isdigit(token):
return all('DIGIT' in unicodedata.name(char)
for char in token)
def tohalfwidth(token):
return unicodedata.normalize('NFKC', token)
def isprojective(sequence):
sequence = [0] + list(sequence)
arcs = [(h, d) for d, h in enumerate(sequence[1:], 1) if h >= 0]
for i, (hi, di) in enumerate(arcs):
for hj, dj in arcs[i+1:]:
(li, ri), (lj, rj) = sorted([hi, di]), sorted([hj, dj])
if (li <= hj <= ri and hi == dj) or (lj <= hi <= rj and hj == di):
return False
if (li < lj < ri or li < rj < ri) and (li - lj) * (ri - rj) > 0:
return False
return True
def istree(sequence, proj=False, multiroot=False):
from ..utils.alg import tarjan
if proj and not isprojective(sequence):
return False
n_roots = sum(head == 0 for head in sequence[1:])
if n_roots == 0:
return False
if not multiroot and n_roots > 1:
return False
return next(tarjan(sequence), None) is None
def numericalize(sequence):
return [int(i) for i in sequence]
def stripe(x, n, w, offset=(0, 0), dim=1):
r"""
Returns a diagonal stripe of the tensor.
Args:
x (~torch.Tensor): the input tensor with 2 or more dims.
n (int): the length of the stripe.
w (int): the width of the stripe.
offset (tuple): the offset of the first two dims.
dim (int): 1 if returns a horizontal stripe; 0 otherwise.
Returns:
a diagonal stripe of the tensor.
Examples:
>>> x = torch.arange(25).view(5, 5)
>>> x
tensor([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> stripe(x, 2, 3)
tensor([[0, 1, 2],
[6, 7, 8]])
>>> stripe(x, 2, 3, (1, 1))
tensor([[ 6, 7, 8],
[12, 13, 14]])
>>> stripe(x, 2, 3, (1, 1), 0)
tensor([[ 6, 11, 16],
[12, 17, 22]])
"""
x, seq_len = x.contiguous(), x.size(1)
stride, numel = list(x.stride()), x[0, 0].numel()
stride[0] = (seq_len + 1) * numel
stride[1] = (1 if dim == 1 else seq_len) * numel
return x.as_strided(size=(n, w, *x.shape[2:]),
stride=stride,
storage_offset=(offset[0]*seq_len+offset[1])*numel)
def pad(tensors, padding_value=0, total_length=None):
size = [len(tensors)] + [max(tensor.size(i) for tensor in tensors)
for i in range(len(tensors[0].size()))]
if total_length is not None:
assert total_length >= size[1]
size[1] = total_length
out_tensor = tensors[0].data.new(*size).fill_(padding_value)
for i, tensor in enumerate(tensors):
out_tensor[i][[slice(0, i) for i in tensor.size()]] = tensor
return out_tensor
| 3,344 | 29.135135 | 78 | py |
diaparser | diaparser-master/diaparser/utils/data.py | # -*- coding: utf-8 -*-
from collections import namedtuple
import torch
import torch.distributed as dist
from ..utils.alg import kmeans
class Dataset(torch.utils.data.Dataset):
r"""
Dataset that is compatible with :class:`torch.utils.data.Dataset`.
This serves as a wrapper for manipulating all data fields
with the operating behaviours defined in :class:`Transform`.
The data fields of all the instantiated sentences can be accessed as an attribute of the dataset.
Args:
transform (Transform):
An instance of :class:`Transform` and its derivations.
The instance holds a series of loading and processing behaviours with regard to the specfic data format.
data (list[list] or str):
A list of list of strings or a filename.
This will be passed into :meth:`transform.load`.
kwargs (dict):
Keyword arguments that will be passed into :meth:`transform.load` together with `data`
to control the loading behaviour.
Attributes:
transform (Transform):
An instance of :class:`Transform`.
sentences (list[Sentence]):
A list of sentences loaded from the data.
Each sentence includes fields obeying the data format defined in ``transform``.
"""
def __init__(self, transform, data, **kwargs):
super(Dataset, self).__init__()
self.transform = transform
self.sentences = transform.load(data, **kwargs)
def __repr__(self):
s = f"{self.__class__.__name__}("
s += f"n_sentences={len(self.sentences)}"
if hasattr(self, 'loader'):
s += f", n_batches={len(self.loader)}"
if hasattr(self, 'buckets'):
s += f", n_buckets={len(self.buckets)}"
s += ")"
return s
def __len__(self):
return len(self.sentences)
def __getitem__(self, index):
if not hasattr(self, 'fields'):
raise RuntimeError("The fields are not numericalized. Please build the dataset first.")
for d in self.fields.values():
yield d[index]
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return [getattr(sentence, name) for sentence in self.sentences]
def __setattr__(self, name, value):
if 'sentences' in self.__dict__ and name in self.sentences[0]:
# restore the order of sequences in the buckets
indices = torch.tensor([i
for bucket in self.buckets.values()
for i in bucket]).argsort()
for index, sentence in zip(indices, self.sentences):
setattr(sentence, name, value[index])
else:
self.__dict__[name] = value
def __getstate__(self):
# only pickle the Transform object and sentences
return {'transform': self.transform, 'sentences': self.sentences}
def __setstate__(self, state):
self.__dict__.update(state)
def collate_fn(self, batch):
return {f: d for f, d in zip(self.fields.keys(), zip(*batch))}
def build(self, batch_size, n_buckets=1, shuffle=False, distributed=False):
# numericalize all fields
self.fields = self.transform(self.sentences)
# NOTE: the final bucket count is roughly equal to n_buckets
self.lengths = [len(i) for i in self.fields[next(iter(self.fields))]]
self.buckets = dict(zip(*kmeans(self.lengths, n_buckets)))
self.loader = DataLoader(dataset=self,
batch_sampler=Sampler(buckets=self.buckets,
batch_size=batch_size,
shuffle=shuffle,
distributed=distributed),
collate_fn=self.collate_fn)
class DataLoader(torch.utils.data.DataLoader):
r"""
DataLoader, matching with :class:`Dataset`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __iter__(self):
for batch in super().__iter__():
yield namedtuple('Batch', [f.name for f in batch.keys()])(*[f.compose(d) for f, d in batch.items()])
class Sampler(torch.utils.data.Sampler):
r"""
Sampler that supports for bucketization and token-level batchification.
Args:
buckets (dict):
A dict that maps each centroid to indices of clustered sentences.
The centroid corresponds to the average length of all sentences in the bucket.
batch_size (int):
Token-level batch size. The resulting batch contains roughly the same number of tokens as ``batch_size``.
shuffle (bool):
If ``True``, the sampler will shuffle both buckets and samples in each bucket. Default: ``False``.
distributed (bool):
If ``True``, the sampler will be used in conjunction with :class:`torch.nn.parallel.DistributedDataParallel`
that restricts data loading to a subset of the dataset.
Default: ``False``.
"""
def __init__(self, buckets, batch_size, shuffle=False, distributed=False):
self.batch_size = batch_size
self.shuffle = shuffle
self.sizes, self.buckets = zip(*[(size, bucket) for size, bucket in buckets.items()])
# number of chunks in each bucket, clipped by range [1, len(bucket)]
self.chunks = [min(len(bucket), max(round(size * len(bucket) / batch_size), 1))
for size, bucket in zip(self.sizes, self.buckets)]
self.rank = dist.get_rank() if distributed else 0
self.replicas = dist.get_world_size() if distributed else 1
self.samples = sum(self.chunks) // self.replicas
self.epoch = 0
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.epoch)
range_fn = torch.arange
# if `shuffle=True`, shuffle both the buckets and samples in each bucket
# for distributed training, make sure each process generates the same random sequence at each epoch
if self.shuffle:
def range_fn(x):
return torch.randperm(x, generator=g)
total, count = 0, 0
# TODO: more elegant way to deal with uneven data, which we directly discard right now
for i in range_fn(len(self.buckets)).tolist():
split_sizes = [(len(self.buckets[i]) - j - 1) // self.chunks[i] + 1
for j in range(self.chunks[i])]
# DON'T use `torch.chunk` which may return wrong number of chunks
for batch in range_fn(len(self.buckets[i])).split(split_sizes):
if count == self.samples:
break
if total % self.replicas == self.rank:
count += 1
yield [self.buckets[i][j] for j in batch.tolist()]
total += 1
self.epoch += 1
def __len__(self):
return self.samples
| 7,112 | 40.354651 | 120 | py |
diaparser | diaparser-master/diaparser/utils/parallel.py | # -*- coding: utf-8 -*-
import os
from random import Random
import torch
import torch.distributed as dist
import torch.nn as nn
class DistributedDataParallel(nn.parallel.DistributedDataParallel):
def __init__(self, module, **kwargs):
super().__init__(module, **kwargs)
def __getattr__(self, name):
wrapped = super().__getattr__('module')
if hasattr(wrapped, name):
return getattr(wrapped, name)
return super().__getattr__(name)
def init_device(device, backend='nccl', host=None, port=None):
os.environ['CUDA_VISIBLE_DEVICES'] = device
if torch.cuda.device_count() > 1:
host = host or os.environ.get('MASTER_ADDR', 'localhost')
port = port or os.environ.get('MASTER_PORT', str(Random(0).randint(10000, 20000)))
os.environ['MASTER_ADDR'] = host
os.environ['MASTER_PORT'] = port
dist.init_process_group(backend)
torch.cuda.set_device(dist.get_rank())
def is_master():
return not dist.is_available() or not dist.is_initialized() or dist.get_rank() == 0
| 1,071 | 28.777778 | 90 | py |
diaparser | diaparser-master/diaparser/parsers/parser.py | # -*- coding: utf-8 -*-
import os
from datetime import datetime, timedelta
import torch
import torch.distributed as dist
from .. import parsers
from tokenizer.tokenizer import Tokenizer
from ..catalog import select
from ..utils import Config, Dataset
from ..utils.field import Field, BertField
from ..utils.logging import init_logger, logger
from ..utils.metric import Metric
from ..utils.parallel import DistributedDataParallel as DDP
from ..utils.parallel import is_master
from torch.optim import Adam
from torch.optim.lr_scheduler import ExponentialLR
def conll_format(path):
"""
Check whether a file contains data in CoNLL-U format.
"""
try:
with open(path) as f:
for line in f:
line = line.strip()
if line and not line.startswith('#'):
# CoNLL-U format has 10 tsv:
return len(line.split('\t')) == 10
return False
except:
return False
class Parser():
MODEL = None
def __init__(self, args, model, transform):
self.args = args
self.model = model
self.transform = transform
def train(self, train, dev, test,
buckets=32,
batch_size=5000,
lr=2e-3,
mu=.9,
nu=.9,
epsilon=1e-12,
clip=5.0,
decay=.75,
decay_steps=5000,
epochs=5000,
patience=100,
verbose=True,
**kwargs):
r"""
Args:
lr (float): learnin rate of adam optimizer. Default: 2e-3.
mu (float): beta1 of adam optimizer. Default: .9.
nu (float): beta2 of adam optimizer. Default: .9.
epsilon (float): epsilon of adam optimizer. Default: 1e-12.
buckets (int): number of buckets. Default: 32.
epochs (int): number of epochs to train: Default: 5000.
patience (int): early stop after these many epochs. Default: 100.
"""
args = self.args.update(locals())
init_logger(logger, verbose=args.verbose)
self.transform.train()
if dist.is_initialized():
args.batch_size = args.batch_size // dist.get_world_size()
logger.info(f"Load the datasets\n"
f"{'train:':6} {train}\n"
f"{'dev:':6} {dev}\n")
train = Dataset(self.transform, args.train, **args)
train.build(args.batch_size, args.buckets, True, dist.is_initialized())
logger.info(f"{'train:':6} {len(train):5} sentences, "
f"{len(train.loader):3} batches, "
f"{len(train.buckets)} buckets")
dev = Dataset(self.transform, args.dev)
dev.build(args.batch_size, args.buckets)
logger.info(f"{'dev:':6} {len(dev):5} sentences, "
f"{len(dev.loader):3} batches, "
f"{len(train.buckets)} buckets")
if args.test:
test = Dataset(self.transform, args.test)
test.build(args.batch_size, args.buckets)
logger.info(f"{'test:':6} {len(test):5} sentences, "
f"{len(test.loader):3} batches, "
f"{len(train.buckets)} buckets")
else:
test = None
logger.info(f"Model\n{self.model}\n")
if dist.is_initialized():
self.model = DDP(self.model,
device_ids=[dist.get_rank()],
find_unused_parameters=True)
self.optimizer = Adam(self.model.parameters(),
args.lr,
(args.mu, args.nu),
args.epsilon)
self.scheduler = ExponentialLR(self.optimizer, args.decay**(1/args.decay_steps))
elapsed = timedelta()
best_e, best_metric = 1, Metric()
for epoch in range(1, args.epochs + 1):
start = datetime.now()
logger.info(f"Epoch {epoch} / {args.epochs}:")
self._train(train.loader)
loss, dev_metric = self._evaluate(dev.loader)
logger.info(f"{'dev:':6} - loss: {loss:.4f} - {dev_metric}")
if test:
loss, test_metric = self._evaluate(test.loader)
logger.info(f"{'test:':6} - loss: {loss:.4f} - {test_metric}")
t = datetime.now() - start
# save the model if it is the best so far
if dev_metric > best_metric:
best_e, best_metric = epoch, dev_metric
if is_master():
self.save(args.path)
logger.info(f"{t}s elapsed (saved)\n")
else:
logger.info(f"{t}s elapsed\n")
elapsed += t
if epoch - best_e >= args.patience:
break
logger.info(f"Epoch {best_e} saved")
logger.info(f"{'dev:':6} - {best_metric}")
if test:
loss, metric = self.load(args.path)._evaluate(test.loader)
logger.info(f"{'test:':6} - {metric}")
logger.info(f"{elapsed}s elapsed, {elapsed / epoch}s/epoch")
def evaluate(self, data, buckets=8, batch_size=5000, **kwargs):
args = self.args.update(locals())
init_logger(logger, verbose=args.verbose)
self.transform.train()
logger.info("Loading the data")
dataset = Dataset(self.transform, data)
dataset.build(args.batch_size, args.buckets)
logger.info(f"\n{dataset}")
logger.info("Evaluating the dataset")
start = datetime.now()
loss, metric = self._evaluate(dataset.loader)
elapsed = datetime.now() - start
logger.info(f"loss: {loss:.4f} - {metric}")
logger.info(f"{elapsed}s elapsed, {len(dataset)/elapsed.total_seconds():.2f} Sents/s")
return loss, metric
def predict(self, data, pred=None, buckets=8, batch_size=5000, prob=False, **kwargs):
r"""
Parses the data and produces a parse tree for each sentence.
Args:
data (str or list[list[str]]): input to be parsed: either
- a str, that will be tokenized first with the tokenizer for the parser language
- a path to a file to be read, either in CoNLL-U format or in plain text if :param text: is supplied.
- a list of lists of tokens
text (str): optional, specifies that the input data is in plain text in the specified language code.
pred (str or file): a path to a file where to write the parsed input in CoNLL-U fprmat.
bucket (int): the number of buckets used to group sentences to parallelize matrix computations.
batch_size (int): group sentences in batches.
prob (bool): whther to return also probabilities for each arc.
Return:
a Dataset containing the parsed sentence trees.
"""
args = self.args.update(locals())
init_logger(logger, verbose=args.verbose)
self.transform.eval()
if args.prob:
self.transform.append(Field('probs'))
if isinstance(data, str) and (not conll_format(data) or args.text):
self.transform.reader = Tokenizer(args.text, dir=args.cache_dir, verbose=args.verbose).reader()
logger.info("Loading the data")
dataset = Dataset(self.transform, data)
dataset.build(args.batch_size, args.buckets)
logger.info(f"\n{dataset}")
logger.info("Making predictions on the dataset")
start = datetime.now()
preds = self._predict(dataset.loader)
elapsed = datetime.now() - start
for name, value in preds.items():
setattr(dataset, name, value)
if pred is not None and is_master():
logger.info(f"Saving predicted results to {pred}")
self.transform.save(pred, dataset.sentences)
logger.info(f"{elapsed}s elapsed, {len(dataset) / elapsed.total_seconds():.2f} Sents/s")
return dataset
def _train(self, loader):
raise NotImplementedError
@torch.no_grad()
def _evaluate(self, loader):
raise NotImplementedError
@torch.no_grad()
def _predict(self, loader):
raise NotImplementedError
@classmethod
def build(cls, path, **kwargs):
raise NotImplementedError
@classmethod
def load(cls, name_or_path='', lang='en', cache_dir=os.path.expanduser('~/.cache/diaparser'), **kwargs):
r"""
Loads a parser from a pretrained model.
Args:
name_or_path (str):
- a string with the shortcut name of a pretrained parser listed in ``resource.json``
to load from cache or download, e.g., ``'en_ptb.electra-base'``.
- a path to a directory containing a pre-trained parser, e.g., `./<path>/model`.
lang (str):
A language code, used in alternative to ``name_or_path`` to load the default model
for the given language.
cache_dir (str):
Directory where to cache models. The default value is `~/.cache/diaparser`.
kwargs (dict):
A dict holding the unconsumed arguments that can be used to update the configurations and initiate the model.
Examples:
>>> parser = Parser.load('en_ewt.electra-base')
>>> parser = Parser.load(lang='en')
>>> parser = Parser.load('./ptb.biaffine.dependency.char')
"""
args = Config(**locals())
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
if os.path.exists(name_or_path):
state = torch.load(name_or_path)
else:
url = select(name=name_or_path, lang=lang, **kwargs)
if url is None:
raise Exception(f'Could not find a model matching name {name_or_path}')
verbose = kwargs.get('verbose', True)
state = torch.hub.load_state_dict_from_url(url, model_dir=cache_dir,
progress=verbose)
cls = getattr(parsers, state['name'])
args = state['args'].update(args)
model = cls.MODEL(**args)
model.load_pretrained(state['pretrained'])
model.load_state_dict(state['state_dict'], False)
model.to(args.device)
transform = state['transform']
if args.feat == 'bert':
tokenizer = BertField.tokenizer(args.bert)
transform.FORM[1].tokenize = tokenizer.tokenize
return cls(args, model, transform)
def save(self, path):
model = self.model
if hasattr(model, 'module'):
model = self.model.module
args = model.args
args.pop('Parser') # dont save parser class object
state_dict = {k: v.cpu() for k, v in model.state_dict().items()}
pretrained = state_dict.pop('pretrained.weight', None)
if args.feat == 'bert':
tokenize = self.transform.FORM[1].tokenize # save it
self.transform.FORM[1].tokenize = None
state = {'name': type(self).__name__,
'args': args,
'state_dict': state_dict,
'pretrained': pretrained,
'transform': self.transform}
torch.save(state, path)
if args.feat == 'bert':
self.transform.FORM[1].tokenize = tokenize # restore
| 11,478 | 38.582759 | 125 | py |
diaparser | diaparser-master/diaparser/parsers/biaffine_dependency.py | # -*- coding: utf-8 -*-
import os
import torch
import torch.nn as nn
from ..models import BiaffineDependencyModel
from .parser import Parser
from ..utils import Config, Dataset, Embedding
from ..utils.common import bos, pad, unk
from ..utils.field import Field, SubwordField, BertField
from ..utils.fn import ispunct
from ..utils.logging import get_logger, progress_bar
from ..utils.metric import AttachmentMetric
from ..utils.transform import CoNLL
from tokenizer.tokenizer import Tokenizer
logger = get_logger(__name__)
class BiaffineDependencyParser(Parser):
r"""
The implementation of Biaffine Dependency Parser.
References:
- Timothy Dozat and Christopher D. Manning. 2017.
`Deep Biaffine Attention for Neural Dependency Parsing`_.
.. _Deep Biaffine Attention for Neural Dependency Parsing:
https://openreview.net/forum?id=Hk95PK9le
"""
MODEL = BiaffineDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.args.feat in ('char', 'bert'):
self.WORD, self.FEAT = self.transform.FORM
else:
self.WORD, self.FEAT = self.transform.FORM, self.transform.CPOS
self.ARC, self.REL = self.transform.HEAD, self.transform.DEPREL
self.puncts = torch.tensor([i
for s, i in self.WORD.vocab.stoi.items()
if ispunct(s)]).to(self.args.device)
def train(self, train, dev, test, buckets=32, batch_size=5000,
punct=False, tree=False, proj=False, verbose=True, **kwargs):
r"""
Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, ignores the punctuations during evaluation. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding the unconsumed arguments that can be used to update the configurations for training.
"""
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000,
punct=False, tree=True, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, ignores the punctuations during evaluation. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding the unconsumed arguments that can be used to update the configurations for evaluation.
Returns:
The loss scalar and evaluation results.
"""
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, buckets=8, batch_size=5000,
prob=False, tree=True, proj=False, verbose=False, **kwargs):
r"""
Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
prob (bool):
If ``True``, outputs the probabilities. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding the unconsumed arguments that can be used to update the configurations for prediction.
Returns:
A :class:`~diaparser.utils.Dataset` object that stores the predicted results.
"""
return super().predict(**Config().update(locals()))
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for words, feats, arcs, rels in bar:
self.optimizer.zero_grad()
mask = words.ne(self.WORD.pad_index)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.partial)
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
self.optimizer.step()
self.scheduler.step()
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask &= words.unsqueeze(-1).ne(self.puncts).all(-1)
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for words, feats, arcs, rels in loader:
mask = words.ne(self.WORD.pad_index)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.partial)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask,
self.args.tree,
self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask &= words.unsqueeze(-1).ne(self.puncts).all(-1)
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {}
arcs, rels, probs = [], [], []
for words, feats in progress_bar(loader):
mask = words.ne(self.WORD.pad_index)
# ignore the first token of each sentence
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_rel = self.model(words, feats)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask,
self.args.tree,
self.args.proj)
arcs.extend(arc_preds[mask].split(lens))
rels.extend(rel_preds[mask].split(lens))
if self.args.prob:
arc_probs = s_arc.softmax(-1)
probs.extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, arc_probs.unbind())])
arcs = [seq.tolist() for seq in arcs]
rels = [self.REL.vocab[seq.tolist()] for seq in rels]
preds = {'arcs': arcs, 'rels': rels}
if self.args.prob:
preds['probs'] = probs
return preds
@classmethod
def build(cls, path, min_freq=2, fix_len=20, **kwargs):
r"""
Build a brand-new Parser, including initialization of all data fields and model parameters.
Args:
path (str):
The path of the model to be saved.
min_freq (str):
The minimum frequency needed to include a token in the vocabulary. Default: 2.
fix_len (int):
The max length of all subword pieces. The excess part of each piece will be truncated.
Required if using CharLSTM/BERT.
Default: 20.
kwargs (dict):
A dict holding the unconsumed arguments.
"""
args = Config(**locals())
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
os.makedirs(os.path.dirname(path), exist_ok=True)
if os.path.exists(path) and not args.build:
parser = cls.load(**args)
parser.model = cls.MODEL(**parser.args)
parser.model.load_pretrained(parser.WORD.embed).to(args.device)
return parser
logger.info("Building the fields")
WORD = Field('words', pad=pad, unk=unk, bos=bos, lower=True)
if args.feat == 'char':
FEAT = SubwordField('chars', pad=pad, unk=unk, bos=bos, fix_len=args.fix_len)
elif args.feat == 'bert':
tokenizer = BertField.tokenizer(args.bert)
args.max_len = min(args.max_len or tokenizer.max_len, tokenizer.max_len)
FEAT = BertField('bert', tokenizer, fix_len=args.fix_len)
WORD.bos = FEAT.bos # ensure representations have the same length
else:
FEAT = Field('tags', bos=bos)
ARC = Field('arcs', bos=bos, use_vocab=False, fn=CoNLL.get_arcs)
REL = Field('rels', bos=bos)
if args.feat in ('char', 'bert'):
transform = CoNLL(FORM=(WORD, FEAT), HEAD=ARC, DEPREL=REL)
else:
transform = CoNLL(FORM=WORD, CPOS=FEAT, HEAD=ARC, DEPREL=REL)
train = Dataset(transform, args.train)
WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None))
FEAT.build(train)
REL.build(train)
# set parameters from data:
args.update({
'n_words': WORD.vocab.n_init,
'pad_index': WORD.pad_index,
'unk_index': WORD.unk_index,
'bos_index': WORD.bos_index,
'n_feats': len(FEAT.vocab),
'n_rels': len(REL.vocab),
'feat_pad_index': FEAT.pad_index,
})
logger.info("Features:")
logger.info(f" {WORD}")
logger.info(f" {FEAT}\n {ARC}\n {REL}")
model = cls.MODEL(**args)
model.load_pretrained(WORD.embed).to(args.device)
return cls(args, model, transform)
| 11,647 | 40.6 | 117 | py |
diaparser | diaparser-master/diaparser/cmds/cmd.py | # -*- coding: utf-8 -*-
import torch
from ..utils import Config
from ..utils.logging import init_logger, logger
from ..utils.parallel import init_device
from ..parsers.biaffine_dependency import BiaffineDependencyParser as Parser
def parse(argparser):
argparser.add_argument('--conf', '-c', help='path to config file')
argparser.add_argument('--path', '-p', help='model name or path to model file')
argparser.add_argument('--device', '-d', default='-1', help='ID of GPU to use')
argparser.add_argument('--seed', '-s', default=1, type=int, help='seed for generating random numbers')
argparser.add_argument('--threads', '-t', default=16, type=int, help='max num of threads')
argparser.add_argument('--batch-size', default=5000, type=int, help='batch size')
argparser.add_argument("--local_rank", type=int, default=-1, help='node rank for distributed training')
argparser.add_argument('--quiet', '-q', dest='verbose', action='store_false',
help='suppress verbose logs')
args, unknown = argparser.parse_known_args()
args, _ = argparser.parse_known_args(unknown, args)
args = Config(**vars(args))
torch.set_num_threads(args.threads)
torch.manual_seed(args.seed)
init_device(args.device, args.local_rank)
init_logger(logger, f"{args.path}.{args.mode}.log", verbose=args.verbose)
logger.info('Configuration:\n' + str(args))
if args.mode == 'train':
parser = Parser.build(**args)
parser.train(**args)
elif args.mode == 'evaluate':
parser = Parser.load(args.path)
parser.evaluate(**args)
elif args.mode == 'predict':
parser = Parser.load(args.path, **args)
parser.predict(**args)
| 1,727 | 43.307692 | 107 | py |
diaparser | diaparser-master/tokenizer/tokenizer.py |
import stanza
import torch
import json
import os
from contextlib import contextmanager
from diaparser.catalog import available_processors, download_processors
# reference https://github.com/stanfordnlp/stanza/blob/master/stanza/utils/prepare_tokenizer_data.py
class Tokenizer:
"""
Interface to Stanza tokenizers.
Args.
lang (str): conventional language identifier.
dir (str): directory for caching models.
verbose (Bool): print download progress.
"""
def __init__(self, lang, dir=os.path.expanduser('~/.cache/diaparser'), verbose=True):
dir += '/tokenizer'
# check for custom processors
avail_processors = available_processors(lang, dir=dir)
avail_preprocessors = avail_processors.keys() & ('tokenize', 'mwt')
if avail_preprocessors:
processors = {p: avail_processors[p] for p in avail_preprocessors}
cached_paths = download_processors(lang, processors, dir)
processors = ','.join(avail_preprocessors)
try:
# get Stanza resource.json which is needed by stanza.Pipeline().
stanza.download(lang='', model_dir=dir, verbose=verbose)
except:
pass # discard exception for unknown lang=''
else:
cached_paths = {}
processors='tokenize'
stanza.download(lang, model_dir=dir, processors=processors, verbose=verbose)
try:
stanza.download(lang, model_dir=dir, processors='mwt', verbose=verbose)
processors += ',mwt'
except:
pass
use_gpu = torch.cuda.is_available()
self.pipeline = stanza.Pipeline(lang, dir=dir, processors=processors, verbose=verbose,
use_gpu=use_gpu, **cached_paths)
def predict(self, text):
return self.pipeline(text).sentences
def format(self, sentences):
"""
Convert sentences to CoNLL format.
"""
empty_fields = '\t_' * 8
for i, sentence in enumerate(sentences):
yield f'# sent_id = {i+1}'
sent_text = sentence.text.replace("\n", " ")
yield f'# text = {sent_text}'
for token in sentence.tokens:
# multiword
if len(token.words) > 1:
token_range = f'{token.id[0]}-{token.id[-1]}'
yield f'{token_range}\t{token.text + empty_fields}'
for word in token.words:
yield f'{word.id}\t{word.text + empty_fields}'
else:
yield f'{token.id[0]}\t{token.text + empty_fields}'
yield ''
def reader(self):
"""
Reading function that returns a generator of CoNLL-U sentences.
"""
@contextmanager
def generator(data):
"""
Args:
data (str): could be a filename or the text to tokenize.
Returns:
a context manager that can be used in a `with` contruct,
yielding each line of the tokenized `data`.
"""
if not os.path.exists(data):
yield self.format(self.predict(data))
else:
with open(data) as f:
yield self.format(self.predict(f.read()))
return generator
if __name__ == '__main__':
import sys
tokenizer = Tokenizer(sys.argv[1]) # language code, e.g. 'it'
sentences = tokenizer.predict(sys.argv[2]) # text to tokenize.
print('\n'.join(tokenizer.format(sentences)))
| 3,617 | 37.084211 | 100 | py |
diaparser | diaparser-master/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import diaparser
# -- Project information -----------------------------------------------------
project = 'DiaParser'
copyright = '2020, Yu Zhang, Giuseppe Attardi'
author = 'Yu Zhang, Giuseppe Attardi'
# The short X.Y version
version = diaparser.__version__
# The full version, including alpha/beta/rc tags
release = diaparser.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'nltk': ('https://www.nltk.org', None),
'numpy': ('https://numpy.org/doc/stable', None),
'python': ('https://docs.python.org/3', None),
'torch': ('https://pytorch.org/docs/stable', None)
}
autodoc_member_order = 'bysource'
| 3,158 | 33.336957 | 79 | py |
EditNTS | EditNTS-master/main.py | #!/usr/bin/env python
# coding:utf8
from __future__ import print_function
import argparse
import collections
import logging
import numpy as np
import torch
import torch.nn as nn
import data
from checkpoint import Checkpoint
from editnts import EditNTS
from evaluator import Evaluator
PAD = 'PAD' # This has a vocab id, which is used to represent out-of-vocabulary words [0]
UNK = 'UNK' # This has a vocab id, which is used to represent out-of-vocabulary words [1]
KEEP = 'KEEP' # This has a vocab id, which is used for copying from the source [2]
DEL = 'DEL' # This has a vocab id, which is used for deleting the corresponding word [3]
START = 'START' # this has a vocab id, which is uded for indicating start of the sentence for decoding [4]
STOP = 'STOP' # This has a vocab id, which is used to stop decoding [5]
PAD_ID = 0 # This has a vocab id, which is used to represent out-of-vocabulary words [0]
UNK_ID = 1 # This has a vocab id, which is used to represent out-of-vocabulary words [1]
KEEP_ID = 2 # This has a vocab id, which is used for copying from the source [2]
DEL_ID = 3 # This has a vocab id, which is used for deleting the corresponding word [3]
START_ID = 4 # this has a vocab id, which is uded for indicating start of the sentence for decoding [4]
STOP_ID = 5 # This has a vocab id, which is used to stop decoding [5]
def sort_by_lens(seq, seq_lengths):
seq_lengths_sorted, sort_order = seq_lengths.sort(descending=True)
seq_sorted = seq.index_select(0, sort_order)
return seq_sorted, seq_lengths_sorted, sort_order
def reweigh_batch_loss(target_id_bath):
pad_c = 0
unk_c = 0
keep_c = 0
del_c = 0
start_c = 0
stop_c = 0
other_c = 0
new_edits_ids_l = target_id_bath
for i in new_edits_ids_l:
# start_c += 1
# stop_c += 1
for ed in i:
if ed == PAD_ID:
pad_c += 1
elif ed == UNK_ID:
unk_c += 1
elif ed == KEEP_ID:
keep_c += 1
elif ed == DEL_ID:
del_c += 1
elif ed == START_ID:
start_c +=1
elif ed == STOP_ID:
stop_c +=1
else:
other_c += 1
NLL_weight = np.zeros(30006) + (1 / other_c+1)
NLL_weight[PAD_ID] = 0 # pad
NLL_weight[UNK_ID] = 1. / unk_c+1
NLL_weight[KEEP_ID] = 1. / keep_c+1
NLL_weight[DEL_ID] = 1. / del_c+1
NLL_weight[5] = 1. / stop_c+1
NLL_weight_t = torch.from_numpy(NLL_weight).float().cuda()
# print(pad_c, unk_c, start_c, stop_c, keep_c, del_c, other_c)
return NLL_weight_t
def reweight_global_loss(w_add,w_keep,w_del):
# keep, del, other, (0, 65304, 246768, 246768, 2781648, 3847848, 2016880) pad,start,stop,keep,del,add
NLL_weight = np.ones(30006)+w_add
NLL_weight[PAD_ID] = 0 # pad
NLL_weight[KEEP_ID] = w_keep
NLL_weight[DEL_ID] = w_del
return NLL_weight
def training(edit_net,nepochs, args, vocab, print_every=100, check_every=500):
eval_dataset = data.Dataset(args.data_path + 'val.df.filtered.pos') # load eval dataset
evaluator = Evaluator(loss= nn.NLLLoss(ignore_index=vocab.w2i['PAD'], reduction='none'))
editnet_optimizer = torch.optim.Adam(edit_net.parameters(),
lr=1e-3, weight_decay=1e-6)
# scheduler = MultiStepLR(abstract_optimizer, milestones=[20,30,40], gamma=0.1)
# abstract_scheduler = ReduceLROnPlateau(abstract_optimizer, mode='max')
# uncomment this part to re-weight different operations
# NLL_weight = reweight_global_loss(args.w_add, args.w_keep, args.w_del)
# NLL_weight_t = torch.from_numpy(NLL_weight).float().cuda()
# editnet_criterion = nn.NLLLoss(weight=NLL_weight_t, ignore_index=vocab.w2i['PAD'], reduce=False)
editnet_criterion = nn.NLLLoss(ignore_index=vocab.w2i['PAD'], reduction='none')
best_eval_loss = 0. # init statistics
print_loss = [] # Reset every print_every
for epoch in range(nepochs):
# scheduler.step()
#reload training for every epoch
if os.path.isfile(args.data_path+'train.df.filtered.pos'):
train_dataset = data.Dataset(args.data_path + 'train.df.filtered.pos')
else: # iter chunks and vocab_data
train_dataset = data.Datachunk(args.data_path + 'train.df.filtered.pos')
for i, batch_df in train_dataset.batch_generator(batch_size=args.batch_size, shuffle=True):
# time1 = time.time()
prepared_batch, syn_tokens_list = data.prepare_batch(batch_df, vocab, args.max_seq_len) #comp,scpn,simp
# a batch of complex tokens in vocab ids, sorted in descending order
org_ids = prepared_batch[0]
org_lens = org_ids.ne(0).sum(1)
org = sort_by_lens(org_ids, org_lens) # inp=[inp_sorted, inp_lengths_sorted, inp_sort_order]
# a batch of pos-tags in pos-tag ids for complex
org_pos_ids = prepared_batch[1]
org_pos_lens = org_pos_ids.ne(0).sum(1)
org_pos = sort_by_lens(org_pos_ids, org_pos_lens)
out = prepared_batch[2][:, :]
tar = prepared_batch[2][:, 1:]
simp_ids = prepared_batch[3]
editnet_optimizer.zero_grad()
output = edit_net(org, out, org_ids, org_pos,simp_ids)
##################calculate loss
tar_lens = tar.ne(0).sum(1).float()
tar_flat=tar.contiguous().view(-1)
loss = editnet_criterion(output.contiguous().view(-1, vocab.count), tar_flat).contiguous()
loss[tar_flat == 1] = 0 #remove loss for UNK
loss = loss.view(tar.size())
loss = loss.sum(1).float()
loss = loss/tar_lens
loss = loss.mean()
print_loss.append(loss.item())
loss.backward()
torch.nn.utils.clip_grad_norm_(edit_net.parameters(), 1.)
editnet_optimizer.step()
if i % print_every == 0:
log_msg = 'Epoch: %d, Step: %d, Loss: %.4f' % (
epoch,i, np.mean(print_loss))
print_loss = []
print(log_msg)
# Checkpoint
if i % check_every == 0:
edit_net.eval()
val_loss, bleu_score, sari, sys_out = evaluator.evaluate(eval_dataset, vocab, edit_net,args)
log_msg = "epoch %d, step %d, Dev loss: %.4f, Bleu score: %.4f, Sari: %.4f \n" % (epoch, i, val_loss, bleu_score, sari)
print(log_msg)
if val_loss < best_eval_loss:
best_eval_loss = val_loss
Checkpoint(model=edit_net,
opt=editnet_optimizer,
epoch=epoch, step=i,
).save(args.store_dir)
print("checked after %d steps"%i)
edit_net.train()
return edit_net
dataset='newsela'
def main():
torch.manual_seed(233)
logging.basicConfig(level=logging.INFO, format='%(asctime)s [INFO] %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str,dest='data_path',
default='/home/ml/ydong26/data/EditNTS_data/editnet_data/%s/'%dataset,
help='Path to train vocab_data')
parser.add_argument('--store_dir', action='store', dest='store_dir',
default='/home/ml/ydong26/tmp_store/editNTS_%s'%dataset,
help='Path to exp storage directory.')
parser.add_argument('--vocab_path', type=str, dest='vocab_path',
default='../vocab_data/',
help='Path contains vocab, embedding, postag_set')
parser.add_argument('--load_model', type=str, dest='load_model',
default=None,
help='Path for loading pre-trained model for further training')
parser.add_argument('--vocab_size', dest='vocab_size', default=30000, type=int)
parser.add_argument('--batch_size', dest='batch_size', default=32, type=int)
parser.add_argument('--max_seq_len', dest='max_seq_len', default=100)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--hidden', type=int, default=200)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--device', type=int, default=1,
help='select GPU')
#train_file = '/media/vocab_data/yue/TS/editnet_data/%s/train.df.filtered.pos'%dataset
# test='/media/vocab_data/yue/TS/editnet_data/%s/test.df.pos' % args.dataset
args = parser.parse_args()
torch.cuda.set_device(args.device)
# load vocab-related files and init vocab
print('*'*10)
vocab = data.Vocab()
vocab.add_vocab_from_file(args.vocab_path+'vocab.txt', args.vocab_size)
vocab.add_embedding(gloveFile=args.vocab_path+'glove.6B.100d.txt')
pos_vocab = data.POSvocab(args.vocab_path) #load pos-tags embeddings
print('*' * 10)
print(args)
print("generating config")
hyperparams=collections.namedtuple(
'hps', #hyper=parameters
['vocab_size', 'embedding_dim',
'word_hidden_units', 'sent_hidden_units',
'pretrained_embedding', 'word2id', 'id2word',
'pos_vocab_size', 'pos_embedding_dim']
)
hps = hyperparams(
vocab_size=vocab.count,
embedding_dim=100,
word_hidden_units=args.hidden,
sent_hidden_units=args.hidden,
pretrained_embedding=vocab.embedding,
word2id=vocab.w2i,
id2word=vocab.i2w,
pos_vocab_size=pos_vocab.count,
pos_embedding_dim=30
)
print('init editNTS model')
edit_net = EditNTS(hps, n_layers=1)
edit_net.cuda()
if args.load_model is not None:
print("load edit_net for further training")
ckpt_path = args.load_model
ckpt = Checkpoint.load(ckpt_path)
edit_net = ckpt.model
edit_net.cuda()
edit_net.train()
training(edit_net, args.epochs, args, vocab)
if __name__ == '__main__':
import os
cwd = os.getcwd()
print(cwd)
main()
| 10,186 | 38.792969 | 135 | py |
EditNTS | EditNTS-master/checkpoint.py | from __future__ import print_function
import os
import time
import shutil
import torch
class Checkpoint(object):
"""
The Checkpoint class manages the saving and loading of a model during training. It allows training to be suspended
and resumed at a later time (e.g. when running on a cluster using sequential jobs).
To make a checkpoint, initialize a Checkpoint object with the following args; then call that object's save() method
to write parameters to disk.
Args:
model (seq2seq): seq2seq model being trained
optimizer (Optimizer): stores the state of the optimizer
epoch (int): current epoch (an epoch is a loop through the full training vocab_data)
step (int): number of examples seen within the current epoch
input_vocab (Vocabulary): vocabulary for the input language
output_vocab (Vocabulary): vocabulary for the output language
Attributes:
CHECKPOINT_DIR_NAME (str): name of the checkpoint directory
TRAINER_STATE_NAME (str): name of the file storing trainer states
MODEL_NAME (str): name of the file storing model
INPUT_VOCAB_FILE (str): name of the input vocab file
OUTPUT_VOCAB_FILE (str): name of the output vocab file
"""
CHECKPOINT_DIR_NAME = 'checkpoints'
TRAINER_STATE_NAME = 'trainer_states.pt'
MODEL_NAME = 'model.pt'
def __init__(self, model, opt, epoch, step, path=None):
self.model = model
self.opt = opt
self.epoch = epoch
self.step = step
self._path = path
@property
def path(self):
if self._path is None:
raise LookupError("The checkpoint has not been saved.")
return self._path
def save(self, experiment_dir):
"""
Saves the current model and related training parameters into a subdirectory of the checkpoint directory.
The name of the subdirectory is the current local time in Y_M_D_H_M_S format.
Args:
experiment_dir (str): path to the experiment root directory
Returns:
str: path to the saved checkpoint subdirectory
"""
date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())
self._path = os.path.join(experiment_dir, self.CHECKPOINT_DIR_NAME, date_time)
path = self._path
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
torch.save({'epoch': self.epoch,
'step': self.step,
'opt': self.opt
},
os.path.join(path, self.TRAINER_STATE_NAME))
torch.save(self.model, os.path.join(path, self.MODEL_NAME))
#with open(os.path.join(path, self.INPUT_VOCAB_FILE), 'wb') as fout:
# dill.dump(self.input_vocab, fout)
#with open(os.path.join(path, self.OUTPUT_VOCAB_FILE), 'wb') as fout:
# dill.dump(self.output_vocab, fout)
return path
@classmethod
def load(cls, path):
"""
Loads a Checkpoint object that was previously saved to disk.
Args:
path (str): path to the checkpoint subdirectory
Returns:
checkpoint (Checkpoint): checkpoint object with fields copied from those stored on disk
"""
if torch.cuda.is_available():
resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME))
model = torch.load(os.path.join(path, cls.MODEL_NAME), map_location=lambda storage, loc: storage)
model.cuda()
# # Load all tensors onto the CPU
# torch.load('tensors.pt', map_location=lambda storage, loc: storage)
# # Map tensors from GPU 1 to GPU 0
# torch.load('tensors.pt', map_location={'cuda:1': 'cuda:%d'%gpu})
else:
resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME), map_location=lambda storage, loc: storage)
model = torch.load(os.path.join(path, cls.MODEL_NAME), map_location=lambda storage, loc: storage)
#model.flatten_parameters() # make RNN parameters contiguous
#with open(os.path.join(path, cls.INPUT_VOCAB_FILE), 'rb') as fin:
# input_vocab = dill.load(fin)
#with open(os.path.join(path, cls.OUTPUT_VOCAB_FILE), 'rb') as fin:
# output_vocab = dill.load(fin)
opt = resume_checkpoint['opt']
print('the fking model is,', type(model))
return Checkpoint(model=model,
opt=opt,
epoch=resume_checkpoint['epoch'],
step=resume_checkpoint['step'],
path=path)
@classmethod
def get_latest_checkpoint(cls, experiment_path):
"""
Given the path to an experiment directory, returns the path to the last saved checkpoint's subdirectory.
Precondition: at least one checkpoint has been made (i.e., latest checkpoint subdirectory exists).
Args:
experiment_path (str): path to the experiment directory
Returns:
str: path to the last saved checkpoint's subdirectory
"""
checkpoints_path = os.path.join(experiment_path, cls.CHECKPOINT_DIR_NAME)
all_times = sorted(os.listdir(checkpoints_path), reverse=True)
return os.path.join(checkpoints_path, all_times[0])
@classmethod
def get_all_checkpoints(cls, experiment_path):
"""
Given the path to an experiment directory, returns the path to the last saved checkpoint's subdirectory.
Precondition: at least one checkpoint has been made (i.e., latest checkpoint subdirectory exists).
Args:
experiment_path (str): path to the experiment directory
Returns:
str: path to the last saved checkpoint's subdirectory
"""
checkpoints_path = os.path.join(experiment_path, cls.CHECKPOINT_DIR_NAME)
all_times = sorted(os.listdir(checkpoints_path))
return [os.path.join(checkpoints_path, ckpt) for ckpt in all_times]
| 6,074 | 42.705036 | 129 | py |
EditNTS | EditNTS-master/data.py | from collections import Counter
import glob
import random
import struct
import csv
import pandas as pd
import numpy as np
import os
import torch
from torch.autograd import Variable
import random
import pickle
# <s> and </s> are used in the vocab_data files to segment the abstracts into sentences. They don't receive vocab ids.
PAD = 'PAD' # This has a vocab id, which is used to represent out-of-vocabulary words [0]
UNK = 'UNK' # This has a vocab id, which is used to represent out-of-vocabulary words [1]
KEEP = 'KEEP' # This has a vocab id, which is used for copying from the source [2]
DEL = 'DEL' # This has a vocab id, which is used for deleting the corresponding word [3]
START = 'START' # this has a vocab id, which is uded for indicating start of the sentence for decoding [4]
STOP = 'STOP' # This has a vocab id, which is used to stop decoding [5]
PAD_ID = 0 # This has a vocab id, which is used to represent out-of-vocabulary words [0]
UNK_ID = 1 # This has a vocab id, which is used to represent out-of-vocabulary words [1]
KEEP_ID = 2 # This has a vocab id, which is used for copying from the source [2]
DEL_ID = 3 # This has a vocab id, which is used for deleting the corresponding word [3]
START_ID = 4 # this has a vocab id, which is uded for indicating start of the sentence for decoding [4]
STOP_ID = 5 # This has a vocab id, which is used to stop decoding [5]
def sent2id(sent,vocab):
"""
this function transfers a sentence (in list of strings) to an np_array
:param sent: sentence in list of strings
:param vocab: vocab object
:return: sentence in numeric token numbers
"""
new_sent = np.array([[vocab.w2i[i] if i in vocab.w2i.keys() else vocab.w2i[UNK] for i in sent]])
return new_sent
def id2edits(ids,vocab):
"""
# this function transfers a id sentences of edits to actual edit actions
# :param ids: list of ids indicating edits
# :param vocab: vocab object
# :return: list of actual edits
# """
edit_list = [vocab.i2w[i] for i in ids]
return edit_list
def batchify(data, max_len=100): #max_len cutout defined by human
bsz = len(data)
try:
maxlen_data = max([s.shape[0] for s in data])
except:
maxlen_data = max([len(s) for s in data])
maxlen = min(maxlen_data, max_len)
batch = np.zeros((bsz, maxlen), dtype=np.int)
for i, s in enumerate(data):
try:
batch[i, :min(s.shape[0],maxlen)] = s[:min(s.shape[0],maxlen)]
except:
batch[i, :min(len(s), maxlen)] = s[:min(len(s), maxlen)]
# batch[i, s.shape[0]:] = 3
return Variable(torch.from_numpy(batch)).cuda()
def batchify_start_stop(data, max_len=100,start_id=4,stop_id=5): #max_len cutout defined by human
# add start token at the beginning and stop token at the end of each sequence in a batch
data = [np.append(s, [stop_id]) for s in data] # stop 3
data = [np.insert(s, 0, start_id) for s in data] # stop 3
bsz = len(data)
maxlen_data = max([s.shape[0] for s in data])
maxlen = min(maxlen_data, max_len)
batch = np.zeros((bsz, maxlen), dtype=np.int)
for i, s in enumerate(data):
batch[i, :min(s.shape[0],maxlen)] = s[:min(s.shape[0],maxlen)]
# batch[i, s.shape[0]:] = 3
return Variable(torch.from_numpy(batch)).cuda()
def batchify_stop(data, max_len=100,start_id=4,stop_id=5): #max_len cutout defined by human
# add stop tokens at the end of the sequence in each batch
data = [np.append(s, [stop_id]) for s in data] # stop 3
bsz = len(data)
maxlen_data = max([s.shape[0] for s in data])
maxlen = min(maxlen_data, max_len)
batch = np.zeros((bsz, maxlen), dtype=np.int)
for i, s in enumerate(data):
batch[i, :min(s.shape[0],maxlen)] = s[:min(s.shape[0],maxlen)]
# batch[i, s.shape[0]:] = 3
return Variable(torch.from_numpy(batch)).cuda()
class Vocab():
def __init__(self):
self.word_list = [PAD, UNK, KEEP, DEL, START, STOP]
self.w2i = {}
self.i2w = {}
self.count = 0
self.embedding = None
def add_vocab_from_file(self, vocab_file="../vocab_data/vocab.txt",vocab_size=30000):
with open(vocab_file, "rb") as f:
for i,line in enumerate(f):
if i >=vocab_size:
break
self.word_list.append(line.split()[0]) # only want the word, not the count
print("read %d words from vocab file" % len(self.word_list))
for w in self.word_list:
self.w2i[w] = self.count
self.i2w[self.count] = w
self.count += 1
def add_embedding(self, gloveFile="path_for_glove_embedding", embed_size=100):
print("Loading Glove embeddings")
with open(gloveFile, 'r') as f:
model = {}
w_set = set(self.word_list)
embedding_matrix = np.zeros(shape=(len(self.word_list), embed_size))
for line in f:
splitLine = line.split()
word = splitLine[0]
if word in w_set: # only extract embeddings in the word_list
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
embedding_matrix[self.w2i[word]] = embedding
# if len(model) % 1000 == 0:
# print("processed %d vocab_data" % len(model))
self.embedding = embedding_matrix
print("%d words out of %d has embeddings in the glove file" % (len(model), len(self.word_list)))
class POSvocab():
def __init__(self,vocab_path):
self.word_list = [PAD,UNK,START,STOP]
self.w2i = {}
self.i2w = {}
self.count = 0
self.embedding = None
with open(vocab_path+'postag_set.p','r') as f:
# postag_set is from NLTK
tagdict = pickle.load(f)
for w in self.word_list:
self.w2i[w] = self.count
self.i2w[self.count] = w
self.count += 1
for w in tagdict:
self.w2i[w] = self.count
self.i2w[self.count] = w
self.count += 1
class Datachunk():
def __init__(self,data_path):
self.data_path = data_path
self.listdir = os.listdir(self.data_path)
random.shuffle(self.listdir)
self.idx_count = 0
def example_generator(self,shuffle=True):
while len(self.listdir) != 0:
print("reading a new chunk with %d chunks remaining" % len(self.listdir))
df = pd.read_pickle(self.data_path + self.listdir.pop())
if shuffle:
df = df.sample(frac=1).reset_index(drop=True)
print('shuffling the df')
for index, row in df.iterrows():
self.idx_count+=1
yield self.idx_count, row
def batch_generator(self, batch_size=1, shuffle=True):
while len(self.listdir) != 0:
# print("reading a new chunk with %d chunks remaining" % len(self.listdir))
df = pd.read_pickle(self.data_path + self.listdir.pop())
if shuffle:
df = df.sample(frac=1).reset_index(drop=True)
# print('shuffling the df')
list_df = [df[i:i + batch_size] for i in range(0, df.shape[0], batch_size)]
for df in list_df:
self.idx_count += 1
yield self.idx_count, df
class Dataset():
def __init__(self,data_path):
self.df = pd.read_pickle(data_path)
self.idx_count = 0
def example_generator(self):
for index, row in self.df.iterrows():
yield index, row
def batch_generator(self, batch_size=64, shuffle=True):
if shuffle:
self.df = self.df.sample(frac=1).reset_index(drop=True)
# print('shuffling the df')
list_df = [self.df[i:i + batch_size] for i in range(0, self.df.shape[0], batch_size)]
for df in list_df:
self.idx_count += 1
yield self.idx_count, df
def prepare_batch(batch_df,vocab, max_length=100):
"""
:param example: one row in pandas dataframe with feild ['comp_tokens', 'simp_tokens','comp_ids','simp_ids', 'comp_pos_ids', edit_labels','new_edit_ids']
:param vocab: vocab object for translation
:return: inp: original input sentences
:return: inp_pos: pos-tag ids for the input sentences
:return: tgt: the target edit-labels in ids
:return: inp_simp:the corresponding simple sentences in ids
:return: batch_df['comp_tokens']:the complex tokens
"""
inp = batchify_stop(batch_df['comp_ids'], max_len=max_length)
inp_pos = batchify_stop(batch_df['comp_pos_ids'], max_len=max_length)
inp_simp=batchify_start_stop(batch_df['simp_id'], max_len=max_length)
# tgt = batchify_start_stop(batch_df['edit_ids'], max_len=max_length) # edit ids has early stop
tgt = batchify_start_stop(batch_df['new_edit_ids'], max_len=max_length) # new_edit_ids do not do early stopping
# I think new edit ids do not ave early stopping
return [inp, inp_pos, tgt,inp_simp], batch_df['comp_tokens']
| 9,193 | 38.459227 | 160 | py |
EditNTS | EditNTS-master/evaluator.py | import numpy as np
import torch
from nltk.translate.bleu_score import *
smooth = SmoothingFunction()
from SARI import SARIsent
import nltk
import data
nltk.data.path.append("/media/nvme/nltk_data")
from label_edits import edit2sent
def sort_by_lens(seq, seq_lengths):
seq_lengths_sorted, sort_order = seq_lengths.sort(descending=True)
seq_sorted = seq.index_select(0, sort_order)
return seq_sorted, seq_lengths_sorted, sort_order
import nltk
def cal_bleu_score(decoded, target):
return nltk.translate.bleu_score.sentence_bleu([target], decoded,
smoothing_function=nltk.translate.bleu_score.SmoothingFunction().method1)
class Evaluator():
""""""
def __init__(self, loss, batch_size=64):
self.loss = loss
self.batch_size = batch_size
def evaluate(self, dataset, vocab, model, args, max_edit_steps=50):
""" Evaluate a model on given dataset and return performance during training
Args:
dataset: an object of data.Dataset()
model (editNTS model): model to evaluate
vocab: an object containing data.Vocab()
args: args from the main methods
Returns:
loss (float): loss of the given model on the given dataset evaluated with teacher forcing
sari: computed based on python script
"""
print_loss, print_loss_tf = [], []
bleu_list = []
ter = 0.
sari_list = []
sys_out=[]
print('Doing tokenized evaluation')
for i, batch_df in dataset.batch_generator(batch_size=1, shuffle=False):
model.eval()
prepared_batch, syn_tokens_list = data.prepare_batch(batch_df, vocab, args.max_seq_len) # comp,scpn,simp
org_ids = prepared_batch[0]
org_lens = org_ids.ne(0).sum(1)
org = sort_by_lens(org_ids, org_lens) # inp=[inp_sorted, inp_lengths_sorted, inp_sort_order]
org_pos_ids = prepared_batch[1]
org_pos_lens = org_pos_ids.ne(0).sum(1)
org_pos = sort_by_lens(org_pos_ids, org_pos_lens) # inp=[inp_sorted, inp_lengths_sorted, inp_sort_order]
out = prepared_batch[2][:, :]
tar = prepared_batch[2][:, 1:]
simp_ids = prepared_batch[3]
# best_seq_list = model.beamsearch(org, out,simp_ids, org_ids, org_pos, 5)
output_without_teacher_forcing = model(org, out, org_ids, org_pos, simp_ids,0.0) #can't compute loss for this one, can only do teacher forcing
output_teacher_forcing = model(org, out, org_ids, org_pos,simp_ids, 1.0)
if True: # the loss on validation is computed based on teacher forcing
##################calculate loss
tar_lens = tar.ne(0).sum(1).float()
tar_flat = tar.contiguous().view(-1)
def compute_loss(output,tar_flat): #this function computes the loss based on model outputs and target in flat
loss = self.loss(output.contiguous().view(-1, vocab.count), tar_flat).contiguous()
loss[tar_flat == 1] = 0 # remove loss for UNK
loss = loss.view(tar.size())
loss = loss.sum(1).float()
loss = loss / tar_lens
loss = loss.mean()
return loss
loss_tf = compute_loss(output_teacher_forcing,tar_flat)
print_loss_tf.append(loss_tf.item())
# the SARI and BLUE is computed based on model.eval without teacher forcing
for j in range(output_without_teacher_forcing.size()[0]):
## write beam search here
# try:
if True:
example = batch_df.iloc[j]
example_out = output_without_teacher_forcing[j, :, :]
##GREEDY
pred_action = torch.argmax(example_out, dim=1).view(-1).data.cpu().numpy()
edit_list_in_tokens = data.id2edits(pred_action, vocab)
# ###BEST BEAM
# edit_list_in_tokens = vocab_data.id2edits(best_seq_list[0][1:], vocab)
greedy_decoded_tokens = ' '.join(edit2sent(example['comp_tokens'], edit_list_in_tokens))
greedy_decoded_tokens = greedy_decoded_tokens.split('STOP')[0].split(' ')
# tgt_tokens_translated = [vocab.i2w[i] for i in example['simp_ids']]
sys_out.append(' '.join(greedy_decoded_tokens))
# prt = True if random.random() < 0.01 else False
# if prt:
# print('*' * 30)
# # print('tgt_in_tokens_translated', ' '.join(tgt_tokens_translated))
# print('ORG', ' '.join(example['comp_tokens']))
# print('GEN', ' '.join(greedy_decoded_tokens))
# print('TGT', ' '.join(example['simp_tokens']))
# print('edit_list_in_tokens',edit_list_in_tokens)
# print('gold labels', ' '.join(example['edit_labels']))
bleu_list.append(cal_bleu_score(greedy_decoded_tokens, example['simp_tokens']))
# calculate sari
comp_string = ' '.join(example['comp_tokens'])
simp_string = ' '.join(example['simp_tokens'])
gen_string = ' '.join(greedy_decoded_tokens)
sari_list.append(SARIsent(comp_string, gen_string, [simp_string]))
print('loss_with_teacher_forcing', np.mean(print_loss_tf))
return np.mean(print_loss_tf), np.mean(bleu_list), np.mean(sari_list), sys_out | 5,720 | 45.893443 | 154 | py |
EditNTS | EditNTS-master/editnts.py | from __future__ import unicode_literals, print_function, division
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
use_cuda = torch.cuda.is_available()
import data
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
MAX_LEN =100
PAD = 'PAD' # This has a vocab id, which is used to represent out-of-vocabulary words [0]
UNK = 'UNK' # This has a vocab id, which is used to represent out-of-vocabulary words [1]
KEEP = 'KEEP' # This has a vocab id, which is used for copying from the source [2]
DEL = 'DEL' # This has a vocab id, which is used for deleting the corresponding word [3]
START = 'START' # this has a vocab id, which is uded for indicating start of the sentence for decoding [4]
STOP = 'STOP' # This has a vocab id, which is used to stop decoding [5]
PAD_ID = 0 # This has a vocab id, which is used to represent out-of-vocabulary words [0]
UNK_ID = 1 # This has a vocab id, which is used to represent out-of-vocabulary words [1]
KEEP_ID = 2 # This has a vocab id, which is used for copying from the source [2]
DEL_ID = 3 # This has a vocab id, which is used for deleting the corresponding word [3]
START_ID = 4 # this has a vocab id, which is uded for indicating start of the sentence for decoding [4]
STOP_ID = 5 # This has a vocab id, which is used to stop decoding [5]
def unsort(x_sorted, sorted_order):
x_unsort = torch.zeros_like(x_sorted)
x_unsort[:, sorted_order,:] = x_sorted
return x_unsort
class EncoderRNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, pos_vocab_size, pos_embedding_dim,hidden_size, n_layers=1, embedding=None, embeddingPOS=None,dropout=0.3):
super(EncoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
if embedding is None:
self.embedding = nn.Embedding(vocab_size, embedding_dim)
else:
self.embedding = embedding
if embeddingPOS is None:
self.embeddingPOS = nn.Embedding(pos_vocab_size, pos_embedding_dim)
else:
self.embeddingPOS = embeddingPOS
self.rnn = nn.LSTM(embedding_dim+pos_embedding_dim, hidden_size, num_layers=n_layers, batch_first=True, bidirectional=True)
self.drop = nn.Dropout(dropout)
def forward(self, inp, inp_pos, hidden):
#inp and inp pose should be both sorted
inp_sorted=inp[0]
inp_lengths_sorted=inp[1]
inp_sort_order=inp[2]
inp_pos_sorted = inp_pos[0]
inp_pos_lengths_sorted = inp_pos[1]
inp_pos_sort_order = inp_pos[2]
emb = self.embedding(inp_sorted)
emb_pos = self.embeddingPOS(inp_pos_sorted)
embed_cat = torch.cat((emb,emb_pos),dim=2)
packed_emb = pack(embed_cat, inp_lengths_sorted,batch_first=True)
memory_bank, encoder_final = self.rnn(packed_emb, hidden)
memory_bank = unpack(memory_bank)[0]
memory_bank = unsort(memory_bank, inp_sort_order)
h_unsorted=unsort(encoder_final[0], inp_sort_order)
c_unsorted=unsort(encoder_final[1], inp_sort_order)
return memory_bank.transpose(0,1), (h_unsorted,c_unsorted)
def initHidden(self, bsz):
weight = next(self.parameters()).data
return Variable(weight.new(self.n_layers * 2, bsz, self.hidden_size).zero_()), \
Variable(weight.new(self.n_layers * 2, bsz, self.hidden_size).zero_())
class EditDecoderRNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_size, n_layers=1, embedding=None):
super(EditDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding_dim = embedding_dim
self.vocab_size = vocab_size
self.n_layers = n_layers
if embedding is None:
self.embedding = nn.Embedding(vocab_size, embedding_dim)
else:
self.embedding = embedding
self.rnn_edits = nn.LSTM(embedding_dim, hidden_size, num_layers=n_layers, batch_first=True)
self.rnn_words = nn.LSTM(embedding_dim, hidden_size, num_layers=n_layers, batch_first=True)
self.attn_Projection_org = nn.Linear(hidden_size, hidden_size, bias=False)
# self.attn_Projection_scpn = nn.Linear(hidden_size, hidden_size, bias=False) #hard attention here
self.attn_MLP = nn.Sequential(nn.Linear(hidden_size * 4, embedding_dim),
nn.Tanh())
self.out = nn.Linear(embedding_dim, self.vocab_size)
self.out.weight.data = self.embedding.weight.data[:self.vocab_size]
def execute(self, symbol, input, lm_state):
"""
:param symbol: token_id for predicted edit action (in teacher forcing mode, give the true one)
:param input: the word_id being editted currently
:param lm_state: last lstm state
:return:
"""
# predicted_symbol = KEEP -> feed input to RNN_LM
# predicted_symbol = DEL -> do nothing, return current lm_state
# predicted_symbol = new word -> feed that word to RNN_LM
is_keep = torch.eq(symbol, data.KEEP_ID)
is_del = torch.eq(symbol, data.DEL_ID)
if is_del:
return lm_state
elif is_keep: # return lstm with kept word learned in lstm
_, new_lm_state = self.rnn_words(self.embedding(input.view(-1, 1)), lm_state)
else: #consider as insert here
# print(symbol.item())
input = self.embedding(symbol.view(-1,1))
_, new_lm_state = self.rnn_words(input,lm_state)
return new_lm_state
def execute_batch(self, batch_symbol, batch_input, batch_lm_state):
batch_h = batch_lm_state[0]
batch_c = batch_lm_state[1]
bsz = batch_symbol.size(0)
unbind_new_h = []
unbind_new_c = []
# unbind all batch inputs
unbind_symbol = torch.unbind(batch_symbol,dim=0)
unbind_input = torch.unbind(batch_input,dim=0)
unbind_h = torch.unbind(batch_h,dim=1)
unbind_c = torch.unbind(batch_c,dim=1)
for i in range(bsz):
elem=self.execute(unbind_symbol[i], unbind_input[i], (unbind_h[i].view(1,1,-1), unbind_c[i].view(1,1,-1)))
unbind_new_h.append(elem[0])
unbind_new_c.append(elem[1])
new_batch_lm_h = torch.cat(unbind_new_h,dim=1)
new_batch_lm_c = torch.cat(unbind_new_c,dim=1)
return (new_batch_lm_h,new_batch_lm_c)
def forward(self, input_edits, hidden_org,encoder_outputs_org, org_ids, simp_sent,teacher_forcing_ratio=1.):
#input_edits and simp_sent need to be padded with START
bsz, nsteps = input_edits.size()
# revisit each word and then decide the action, for each action, do the modification and calculate rouge difference
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
decoder_out = []
counter_for_keep_del = np.zeros(bsz, dtype=int)
counter_for_keep_ins =np.zeros(bsz, dtype=int)
# decoder in the training:
if use_teacher_forcing:
embedded_edits = self.embedding(input_edits)
output_edits, hidden_edits = self.rnn_edits(embedded_edits, hidden_org)
embedded_words = self.embedding(simp_sent)
output_words, hidden_words = self.rnn_words(embedded_words, hidden_org)
key_org = self.attn_Projection_org(output_edits) # bsz x nsteps x nhid MIGHT USE WORD HERE
logits_org = torch.bmm(key_org, encoder_outputs_org.transpose(1, 2)) # bsz x nsteps x encsteps
attn_weights_org = F.softmax(logits_org, dim=-1) # bsz x nsteps x encsteps
attn_applied_org = torch.bmm(attn_weights_org, encoder_outputs_org) # bsz x nsteps x nhid
for t in range(nsteps-1):
# print(t)
decoder_output_t = output_edits[:, t:t + 1, :]
attn_applied_org_t = attn_applied_org[:, t:t + 1, :]
## find current word
inds = torch.LongTensor(counter_for_keep_del)
dummy = inds.view(-1, 1, 1)
dummy = dummy.expand(dummy.size(0), dummy.size(1), encoder_outputs_org.size(2)).cuda()
c = encoder_outputs_org.gather(1, dummy)
inds = torch.LongTensor(counter_for_keep_ins)
dummy = inds.view(-1, 1, 1)
dummy = dummy.expand(dummy.size(0), dummy.size(1), output_words.size(2)).cuda()
c_word = output_words.gather(1, dummy)
output_t = torch.cat((decoder_output_t, attn_applied_org_t, c,c_word),
2) # bsz*nsteps x nhid*2
output_t = self.attn_MLP(output_t)
output_t = F.log_softmax(self.out(output_t), dim=-1)
decoder_out.append(output_t)
# interpreter's output from lm
gold_action = input_edits[:, t + 1].data.cpu().numpy() # might need to realign here because start added
counter_for_keep_del = [i[0] + 1 if i[1] == 2 or i[1] == 3 else i[0]
for i in zip(counter_for_keep_del, gold_action)]
counter_for_keep_ins = [i[0] + 1 if i[1] != DEL_ID and i[1] != STOP_ID and i[1] != PAD_ID else i[0]
for i in zip(counter_for_keep_ins, gold_action)]
check1 = sum([x >= org_ids.size(1) for x in counter_for_keep_del])
check2 = sum([x >= simp_sent.size(1) for x in counter_for_keep_ins])
if check1 or check2:
# print(org_ids.size(1))
# print(counter_for_keep_del)
break
else: # no teacher forcing
decoder_input_edit = input_edits[:, :1]
decoder_input_word=simp_sent[:,:1]
t, tt = 0, max(MAX_LEN,input_edits.size(1)-1)
# initialize
embedded_edits = self.embedding(decoder_input_edit)
output_edits, hidden_edits = self.rnn_edits(embedded_edits, hidden_org)
embedded_words = self.embedding(decoder_input_word)
output_words, hidden_words = self.rnn_words(embedded_words, hidden_org)
#
# # give previous word from tgt simp_sent
# inds = torch.LongTensor(counter_for_keep_ins)
# dummy = inds.view(-1, 1, 1)
# dummy = dummy.expand(dummy.size(0), dummy.size(1), output_words.size(2)).cuda()
# c_word = output_words.gather(1, dummy)
while t < tt:
if t>0:
embedded_edits = self.embedding(decoder_input_edit)
output_edits, hidden_edits = self.rnn_edits(embedded_edits, hidden_edits)
key_org = self.attn_Projection_org(output_edits) # bsz x nsteps x nhid
logits_org = torch.bmm(key_org, encoder_outputs_org.transpose(1, 2)) # bsz x nsteps x encsteps
attn_weights_org_t = F.softmax(logits_org, dim=-1) # bsz x nsteps x encsteps
attn_applied_org_t = torch.bmm(attn_weights_org_t, encoder_outputs_org) # bsz x nsteps x nhid
## find current word
inds = torch.LongTensor(counter_for_keep_del)
dummy = inds.view(-1, 1, 1)
dummy = dummy.expand(dummy.size(0), dummy.size(1), encoder_outputs_org.size(2)).cuda()
c = encoder_outputs_org.gather(1, dummy)
output_t = torch.cat((output_edits, attn_applied_org_t, c, hidden_words[0]),
2) # bsz*nsteps x nhid*2
output_t = self.attn_MLP(output_t)
output_t = F.log_softmax(self.out(output_t), dim=-1)
decoder_out.append(output_t)
decoder_input_edit=torch.argmax(output_t,dim=2)
# gold_action = input[:, t + 1].vocab_data.cpu().numpy() # might need to realign here because start added
pred_action= torch.argmax(output_t,dim=2)
counter_for_keep_del = [i[0] + 1 if i[1] == 2 or i[1] == 3 or i[1] == 5 else i[0]
for i in zip(counter_for_keep_del, pred_action)]
# update rnn_words
# find previous generated word
# give previous word from tgt simp_sent
dummy_2 = inds.view(-1, 1).cuda()
org_t = org_ids.gather(1, dummy_2)
hidden_words = self.execute_batch(pred_action, org_t, hidden_words) # we give the editted subsequence
# hidden_words = self.execute_batch(pred_action, org_t, hidden_org) #here we only give the word
t += 1
check = sum([x >= org_ids.size(1) for x in counter_for_keep_del])
if check:
break
return torch.cat(decoder_out, dim=1), hidden_edits
def initHidden(self, bsz):
weight = next(self.parameters()).data
return Variable(weight.new(self.n_layers, bsz, self.hidden_size).zero_()), \
Variable(weight.new(self.n_layers, bsz, self.hidden_size).zero_())
def beam_forwad_step(self,decoder_input_edits,hidden_edits,hidden_words, org_ids,encoder_outputs_org,counter_for_keep_del,beam_size=5):
#buffers: each with k elements for next step
decoder_input_k=[]
hidden_edits_k=[]
counter_for_keep_del_k=[]
prob_k=[]
hidden_words_k=[]
# given decoder hidden, forward one step
embedded = self.embedding(decoder_input_edits)
decoder_output_t, hidden_edits = self.rnn_edits(embedded, hidden_edits)
key_org = self.attn_Projection_org(decoder_output_t) # bsz x nsteps x nhid
logits_org = torch.bmm(key_org, encoder_outputs_org.transpose(1, 2)) # bsz x nsteps x encsteps
attn_weights_org_t = F.softmax(logits_org, dim=-1) # bsz x nsteps x encsteps
attn_applied_org_t = torch.bmm(attn_weights_org_t, encoder_outputs_org) # bsz x nsteps x nhid
## find current word
inds = torch.LongTensor(counter_for_keep_del)
dummy = inds.view(-1, 1, 1)
dummy = dummy.expand(dummy.size(0), dummy.size(1), encoder_outputs_org.size(2)).cuda()
c = encoder_outputs_org.gather(1, dummy)
output_t = torch.cat((decoder_output_t, attn_applied_org_t, c, hidden_words[0]),
2) # bsz*nsteps x nhid*2
output_t = self.attn_MLP(output_t)
output_t = F.log_softmax(self.out(output_t), dim=-1)
# update rnn_words
# find previous generated word
# give previous word from tgt simp_sent
topv, topi = torch.topk(output_t,beam_size, dim=2)
for b in range(beam_size):
prob_t_k=topv[:,:,b]
out_id_t_k=topi[:,:,b]
counter_for_keep_del = [i[0] + 1 if i[1] == 2 or i[1] == 3 or i[1] == 5 else i[0]
for i in zip(counter_for_keep_del, out_id_t_k)]
dummy_2 = inds.view(-1, 1).cuda()
org_t = org_ids.gather(1, dummy_2)
hidden_words = self.execute_batch(out_id_t_k, org_t, hidden_words) # input[:, t + 1]=gold action,
decoder_input_k.append(out_id_t_k)
hidden_edits_k.append(hidden_edits)
prob_k.append(prob_t_k)
hidden_words_k.append(hidden_words)
counter_for_keep_del_k.append(counter_for_keep_del)
return decoder_input_k,hidden_edits_k,hidden_words_k,prob_k,counter_for_keep_del_k
def beam_forward(self, input_edits, simp_sent, hidden_org,encoder_outputs_org, org_ids, beam_size=5):
# initialize for beam search
bsz, nsteps = input_edits.size()
# decoder_out = []
counter_for_keep_del = np.zeros(bsz, dtype=int)
# decoder_input = input[:, :1]
t, tt = 0, max(MAX_LEN, input_edits.size(1) - 1)
# embedded = self.embedding(decoder_input)
# output, hidden = self.rnn(embedded, hidden_org)
# initialize for beam list
best_k_seqs = [[input_edits[:, :1]]]
best_k_probs = [0.]
best_k_hidden_edits = [hidden_org]
best_k_hidden_words=[hidden_org]
best_k_counters =[counter_for_keep_del]
while t < tt:
# print(t)
next_best_k_squared_seq = []
next_best_k_squared_probs = []
next_best_k_squared_counters = []
next_best_k_squared_hidden_edits = []
next_best_k_squared_hidden_words = []
for b in range(len(best_k_seqs)):
seq = best_k_seqs[b]
prob = best_k_probs[b]
counter = best_k_counters[b]
hidden_edits = best_k_hidden_edits[b]
hidden_words = best_k_hidden_words[b]
check = sum([x >= org_ids.size(1) for x in counter])
if seq[-1].item() == STOP_ID or check:
# if end of token, make sure no children
next_best_k_squared_seq.append(seq)
next_best_k_squared_probs.append(prob)
next_best_k_squared_counters.append(counter)
next_best_k_squared_hidden_edits.append(hidden_edits)
next_best_k_squared_hidden_words.append(hidden_words)
else:
# append the top k children
decoder_input_k, hidden_edits_k,hidden_words_k, prob_k, counter_for_keep_del_k=self.beam_forwad_step(seq[-1],
hidden_edits,hidden_words,org_ids,encoder_outputs_org,counter,beam_size)
for i in range(beam_size):
next_seq = seq[:]
next_seq.append(decoder_input_k[i])
next_best_k_squared_seq.append(next_seq)
next_best_k_squared_probs.append(prob + prob_k[i].item())
next_best_k_squared_counters.append(counter_for_keep_del_k[i])
next_best_k_squared_hidden_edits.append(hidden_edits_k[i])
next_best_k_squared_hidden_words.append(hidden_words_k[i])
# contract to the best k
indexs = np.argsort(next_best_k_squared_probs)[::-1][:beam_size]
best_k_seqs = [next_best_k_squared_seq[i] for i in indexs]
best_k_probs = [next_best_k_squared_probs[i] for i in indexs]
best_k_counters = [next_best_k_squared_counters[i] for i in indexs]
best_k_hidden_edits = [next_best_k_squared_hidden_edits[i] for i in indexs]
best_k_hidden_words = [next_best_k_squared_hidden_words[i] for i in indexs]
t +=1
return best_k_seqs, best_k_probs, best_k_hidden_edits,best_k_hidden_words,best_k_counters
class EditNTS(nn.Module):
def __init__(self, config, n_layers=2):
super(EditNTS, self).__init__()
self.embedding = nn.Embedding(config.vocab_size, config.embedding_dim)
if not(config.pretrained_embedding is None):
print('load pre-trained embeddings')
self.embedding.weight.data.copy_(torch.from_numpy(config.pretrained_embedding))
self.embeddingPOS = nn.Embedding(config.pos_vocab_size, config.pos_embedding_dim)
self.encoder1 = EncoderRNN(config.vocab_size, config.embedding_dim,
config.pos_vocab_size, config.pos_embedding_dim,
config.word_hidden_units,
n_layers,
self.embedding, self.embeddingPOS)
self.decoder = EditDecoderRNN(config.vocab_size, config.embedding_dim, config.word_hidden_units * 2,
n_layers, self.embedding)
def forward(self,org,output,org_ids,org_pos,simp_sent,teacher_forcing_ratio=1.0):
def transform_hidden(hidden): #for bidirectional encoders
h, c = hidden
h = torch.cat([h[0], h[1]], dim=1)[None, :, :]
c = torch.cat([c[0], c[1]], dim=1)[None, :, :]
hidden = (h, c)
return hidden
hidden_org = self.encoder1.initHidden(org[0].size(0))
encoder_outputs_org, hidden_org = self.encoder1(org,org_pos,hidden_org)
hidden_org = transform_hidden(hidden_org)
logp, _ = self.decoder(output, hidden_org, encoder_outputs_org,org_ids,simp_sent,teacher_forcing_ratio)
return logp
def beamsearch(self, org, input_edits,simp_sent,org_ids,org_pos, beam_size=5):
def transform_hidden(hidden): #for bidirectional encoders
h, c = hidden
h = torch.cat([h[0], h[1]], dim=1)[None, :, :]
c = torch.cat([c[0], c[1]], dim=1)[None, :, :]
hidden = (h, c)
return hidden
hidden_org = self.encoder1.initHidden(org[0].size(0))
encoder_outputs_org, hidden_org = self.encoder1(org,org_pos,hidden_org)
hidden_org = transform_hidden(hidden_org)
best_k_seqs, best_k_probs, best_k_hidden_edits, best_k_hidden_words, best_k_counters =\
self.decoder.beam_forward(input_edits,simp_sent, hidden_org, encoder_outputs_org,org_ids,beam_size)
best_seq_list=[]
for sq in best_k_seqs:
best_seq_list.append([i.item() for i in sq])
# find final best output
index = np.argsort(best_k_probs)[::-1][0]
best_seq = best_k_seqs[index]
best_seq_np=[i.item() for i in best_seq]
return best_seq_list
| 21,683 | 46.344978 | 156 | py |
patchNR | patchNR-master/train_patchNR.py | # This code belongs to the paper
#
# F. Altekrüger, A. Denker, P. Hagemann, J. Hertrich, P. Maass and G. Steidl (2023).
# PatchNR: Learning from Very Few Images by Patch Normalizing Flow Regularization.
# Inverse Problems, vol. 39, no. 6.
#
# Please cite the paper, if you use the code.
# The script trains the patchNR
import torch
from torch import nn
import FrEIA.framework as Ff
import FrEIA.modules as Fm
import numpy as np
import model
from tqdm import tqdm
import utils
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#choose the image class out of image_classes
image_classes = ['material', 'lung', 'lentils']
image_class = image_classes[2]
print('Image class:' + image_class)
if image_class == 'material':
example_img = utils.imread('input_imgs/img_learn_material.png')
val_img = utils.imread('input_imgs/img_val_material.png')
elif image_class == 'lung':
from dival import get_standard_dataset
dataset = get_standard_dataset('lodopab', impl='astra_cuda')
train = dataset.create_torch_dataset(part='train',
reshape=((1,1,) + dataset.space[0].shape,
(1,1,) + dataset.space[1].shape))
val_set = dataset.create_torch_dataset(part='validation',
reshape=((1,1,) + dataset.space[0].shape,
(1,1,) + dataset.space[1].shape))
example_img = torch.cat([train[3][1],train[5][1],train[8][1],
train[11][1],train[37][1],train[75][1]]).to(DEVICE)
val_img = val_set[1][1].to(DEVICE)
elif image_class == 'lentils':
example_img = utils.imread('input_imgs/img_learn_lentils.png')
val_img = utils.imread('input_imgs/img_val_lentils.png')
else:
print('Image class is not known')
exit()
if __name__ == '__main__':
patch_size = 6
num_layers = 5
subnet_nodes = 512
patchNR = model.create_NF(num_layers, subnet_nodes, dimension=patch_size**2)
batch_size = 32
optimizer_steps = 750000
optimizer = torch.optim.Adam(patchNR.parameters(), lr = 1e-4)
im2patch = utils.patch_extractor(patch_size=patch_size)
for k in tqdm(range(optimizer_steps)):
#extract patches
idx = np.random.randint(0,example_img.shape[0])
patch_example = im2patch(example_img[idx].unsqueeze(0),batch_size)
#compute loss
loss = 0
invs, jac_inv = patchNR(patch_example, rev = True)
loss += torch.mean(0.5 * torch.sum(invs**2, dim=1) - jac_inv)
optimizer.zero_grad()
loss.backward()
optimizer.step()
#validation step
if k%1000 ==0:
with torch.no_grad():
val_patch = im2patch(val_img,batch_size)
invs, jac_inv = patchNR(val_patch, rev = True)
val_loss = torch.mean(0.5 * torch.sum(invs**2, dim=1) - jac_inv).item()
print(k)
print(loss.item())
print(val_loss)
#save weights
if (k+1) % 50000 == 0:
it = int((k+1)/1000)
#torch.save({'net_state_dict': patchNR.state_dict()}, 'patchNR_weights/weights_'+image_class + '_'+str(it) + '.pth')
torch.save({'net_state_dict': patchNR.state_dict()}, 'patchNR_weights/weights_'+image_class + '.pth')
| 3,300 | 36.089888 | 137 | py |
patchNR | patchNR-master/patchNR_CT.py | # This code belongs to the paper
#
# F. Altekrüger, A. Denker, P. Hagemann, J. Hertrich, P. Maass and G. Steidl (2023).
# PatchNR: Learning from Very Few Images by Patch Normalizing Flow Regularization.
# Inverse Problems, vol. 39, no. 6.
#
# Please cite the paper, if you use the code.
#
# The script reproduces the numerical example CT in the paper.
import torch
from torch import nn
import numpy as np
import torch.nn.functional as F
import model
from tqdm import tqdm
import utils
import dival
from dival import get_standard_dataset
import odl
from odl.contrib.torch import OperatorModule
from dival.util.torch_losses import poisson_loss
from functools import partial
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(DEVICE)
def patchNR(img, lam, patch_size, n_patches_out, patchNR, n_iter_max, operator):
"""
Defines the reconstruction using patchNR as regularizer
"""
# fixed parameters
obs = img.to(DEVICE)
operator = operator
center = False
init = fbp(obs)
pad_size = 4 #pad the image before extracting patches to avoid boundary effects
pad = [pad_size]*4
# create patch extractors
input_im2pat = utils.patch_extractor(patch_size, pad=False, center=center)
# intialize optimizer for image
fake_img = torch.tensor(init.clone(),dtype=torch.float,device=DEVICE,requires_grad = True)
optim_img = torch.optim.Adam([fake_img], lr=0.005)
#define the poisson loss
photons_per_pixel = 4096
mu_max = 81.35858
criterion = partial(poisson_loss,photons_per_pixel=photons_per_pixel,mu_max=mu_max)
# Main loop
for it in tqdm(range(n_iter_max)):
optim_img.zero_grad()
tmp = nn.functional.pad(fake_img,pad,mode='reflect')
fake_data = input_im2pat(tmp,n_patches_out)
#patchNR
pred_inv, log_det_inv = patchNR(fake_data,rev=True)
reg = torch.mean(torch.sum(pred_inv**2,dim=1)/2) - torch.mean(log_det_inv)
#data fidelity
data_fid = criterion(operator(fake_img),obs)
#loss
loss = data_fid + lam*reg
loss.backward()
optim_img.step()
return fake_img
if __name__ == '__main__':
#choose the number of angles
angle_types = ['full','limited']
angle_type = angle_types[1]
#input parameters
patch_size = 6
num_layers = 5
subnet_nodes = 512
#load model
net = model.create_NF(num_layers, subnet_nodes, dimension=patch_size**2)
weights = torch.load('patchNR_weights/weights_lung.pth')
net.load_state_dict(weights['net_state_dict'])
#load images
dataset = get_standard_dataset('lodopab', impl='astra_cuda')
test = dataset.create_torch_dataset(part='test',
reshape=((1,1,) + dataset.space[0].shape,
(1,1,) + dataset.space[1].shape))
ray_trafo = dataset.ray_trafo
if angle_type == 'limited':
lim_dataset = dival.datasets.angle_subset_dataset.AngleSubsetDataset(dataset,
slice(100,900),impl='astra_cuda')
test = lim_dataset.create_torch_dataset(part='test',
reshape=((1,1,) + lim_dataset.space[0].shape,
(1,1,) + lim_dataset.space[1].shape))
ray_trafo = lim_dataset.ray_trafo
gt = test[64][1]
obs = test[64][0]
gt = test[39][1]
obs = test[39][0]
#load operator and FBP
operator = OperatorModule(ray_trafo).to(DEVICE)
fbp = odl.tomo.analytic.filtered_back_projection.fbp_op(ray_trafo,
filter_type = 'Hann', frequency_scaling = 0.641025641025641)
fbp = OperatorModule(fbp)
lam = 700
n_pat = 40000
iteration = 300
if angle_type == 'limited':
iteration = 3000
rec = patchNR(obs,lam = lam, patch_size = patch_size, n_patches_out = n_pat,
patchNR = net, n_iter_max = iteration, operator = operator)
utils.save_img(rec,'results/patchNR_'+angle_type+'_angleCT')
#torch.save(rec,'results/patchNR_'+angle_type+'_angleCT_tens.pt')
| 4,204 | 32.110236 | 94 | py |
patchNR | patchNR-master/utils.py | # This code belongs to the paper
#
# F. Altekrüger, A. Denker, P. Hagemann, J. Hertrich, P. Maass and G. Steidl (2023).
# PatchNR: Learning from Very Few Images by Patch Normalizing Flow Regularization.
# Inverse Problems, vol. 39, no. 6.
#
# Please cite the paper, if you use the code.
# The functions are adapted from
#
# J. Hertrich, A. Houdard and C. Redenbach (2022).
# Wasserstein Patch Prior for Image Superresolution.
# IEEE Transactions on Computational Imaging.
# (https://github.com/johertrich/Wasserstein_Patch_Prior)
#
# and
#
# A. Houdard, A. Leclaire, N. Papadakis and J. Rabin.
# Wasserstein Generative Models for Patch-based Texture Synthesis.
# ArXiv Preprint#2007.03408
# (https://github.com/ahoudard/wgenpatex)
import torch
from torch import nn
import skimage.io as io
import numpy as np
import math
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def imread(img_name):
"""
loads an image as torch.tensor on the selected device
"""
np_img = io.imread(img_name)
tens_img = torch.tensor(np_img, dtype=torch.float, device=DEVICE)
if torch.max(tens_img) > 1:
tens_img/=255
if len(tens_img.shape) < 3:
tens_img = tens_img.unsqueeze(2)
if tens_img.shape[2] > 3:
tens_img = tens_img[:,:,:3]
tens_img = tens_img.permute(2,0,1)
return tens_img.unsqueeze(0)
def save_img(tensor_img, name):
'''
save img (tensor form) with the name
'''
img = np.clip(tensor_img.squeeze().detach().cpu().numpy(),0,1)
io.imsave(str(name)+'.png', img)
return
class gaussian_downsample(nn.Module):
"""
Downsampling module with Gaussian filtering
"""
def __init__(self, kernel_size, sigma, stride, pad=False):
super(gaussian_downsample, self).__init__()
self.gauss = nn.Conv2d(1, 1, kernel_size, stride=stride, groups=1, bias=False)
gaussian_weights = self.init_weights(kernel_size, sigma)
self.gauss.weight.data = gaussian_weights.to(DEVICE)
self.gauss.weight.requires_grad_(False)
self.pad = pad
self.padsize = kernel_size-1
def forward(self, x):
if self.pad:
x = torch.cat((x, x[:,:,:self.padsize,:]), 2)
x = torch.cat((x, x[:,:,:,:self.padsize]), 3)
return self.gauss(x)
def init_weights(self, kernel_size, sigma):
x_cord = torch.arange(kernel_size)
x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1)
mean = (kernel_size - 1)/2.
variance = sigma**2.
gaussian_kernel = (1./(2.*math.pi*variance))*torch.exp(-torch.sum((xy_grid - mean)**2., dim=-1)/(2*variance))
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
return gaussian_kernel.view(1, 1, kernel_size, kernel_size)
class patch_extractor(nn.Module):
"""
Module for creating custom patch extractor
"""
def __init__(self, patch_size, pad=False,center=False):
super(patch_extractor, self).__init__()
self.im2pat = nn.Unfold(kernel_size=patch_size)
self.pad = pad
self.padsize = patch_size-1
self.center=center
self.patch_size=patch_size
def forward(self, input, batch_size=0):
if self.pad:
input = torch.cat((input, input[:,:,:self.padsize,:]), 2)
input = torch.cat((input, input[:,:,:,:self.padsize]), 3)
patches = self.im2pat(input).squeeze(0).transpose(1,0)
if batch_size > 0:
idx = torch.randperm(patches.size(0))[:batch_size]
patches = patches[idx,:]
if self.center:
patches = patches - torch.mean(patches,-1).unsqueeze(-1)
return patches
| 3,773 | 33.623853 | 117 | py |
patchNR | patchNR-master/model.py | # This code belongs to the paper
#
# F. Altekrüger, A. Denker, P. Hagemann, J. Hertrich, P. Maass and G. Steidl (2023).
# PatchNR: Learning from Very Few Images by Patch Normalizing Flow Regularization.
# Inverse Problems, vol. 39, no. 6.
#
# Please cite the paper, if you use the code.
import torch
from torch import nn
import FrEIA.framework as Ff
import FrEIA.modules as Fm
import numpy as np
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def create_NF(num_layers, sub_net_size, dimension):
"""
Creates the patchNR network
"""
def subnet_fc(c_in, c_out):
return nn.Sequential(nn.Linear(c_in, sub_net_size), nn.ReLU(),
nn.Linear(sub_net_size, sub_net_size), nn.ReLU(),
nn.Linear(sub_net_size, c_out))
nodes = [Ff.InputNode(dimension, name='input')]
for k in range(num_layers):
nodes.append(Ff.Node(nodes[-1],
Fm.GLOWCouplingBlock,
{'subnet_constructor':subnet_fc, 'clamp':1.6},
name=F'coupling_{k}'))
nodes.append(Ff.Node(nodes[-1],
Fm.PermuteRandom,
{'seed':(k+1)},
name=F'permute_flow_{k}'))
nodes.append(Ff.OutputNode(nodes[-1], name='output'))
model = Ff.ReversibleGraphNet(nodes, verbose=False).to(DEVICE)
return model
| 1,431 | 33.926829 | 84 | py |
patchNR | patchNR-master/patchNR_zeroshot_material.py | # This code belongs to the paper
#
# F. Altekrüger, A. Denker, P. Hagemann, J. Hertrich, P. Maass and G. Steidl (2023).
# PatchNR: Learning from Very Few Images by Patch Normalizing Flow Regularization.
# Inverse Problems, vol. 39, no. 6.
#
# Please cite the paper, if you use the code.
#
# The script reproduces the zero-shot superresolution example with SiC in the paper.
import torch
from torch import nn
import numpy as np
import random
from model import create_NF
from tqdm import tqdm
from utils import *
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# downsample operator
def Downsample(input_img, scale = 0.25):
gaussian_std = 2.
kernel_size = 16
gaussian_down = gaussian_downsample(kernel_size,gaussian_std,int(1/scale),pad=False)
out = gaussian_down(input_img)
return out
def train_patchNR(patchNR, img, patch_size, steps, batch_size, center):
"""
Train the patchNR for the given img (low resolution)
"""
batch_size = batch_size
optimizer_steps = steps
center = center
optimizer = torch.optim.Adam(patchNR.parameters(), lr = 1e-4)
im2patch = patch_extractor(patch_size=patch_size, center = center)
patches = torch.empty(0,device=DEVICE)
#enlarge training patches by rotation and mirroring
for j in range(2):
if j == 0:
tmp = img
elif j == 1:
tmp = torch.flip(img,[1])
for i in range(4):
patches = torch.cat([patches,im2patch(torch.rot90(tmp,i,[0,1]))])
for k in tqdm(range(optimizer_steps)):
#extract patches
idx = torch.tensor(random.sample(range(patches.shape[0]),batch_size))
patch_example = patches[idx,:]
#compute loss
loss = 0
invs, jac_inv = patchNR(patch_example, rev = True)
loss += torch.mean(0.5 * torch.sum(invs**2, dim=1) - jac_inv)
optimizer.zero_grad()
loss.backward()
optimizer.step()
weights = dict()
weights['batch_size'] = batch_size
weights['optimizer_steps'] = optimizer_steps
weights['patch_size'] = patch_size
weights['net_state_dict'] = patchNR.state_dict()
#torch.save(weights, 'patchNR_zeroshot_SiC_weights.pt')
def patchNR(img, lam, patch_size, n_patches_out, flow, n_iter_max, center, operator):
"""
Defines the reconstruction using patchNR as regularizer
"""
# fixed parameters
operator = operator
center = center
init = torch.nn.functional.interpolate(img,scale_factor=4,mode='bicubic')
#save_img(init,'bicubic')
# create patch extractors
input_im2pat = patch_extractor(patch_size, pad=False, center=center)
# intialize optimizer for image
fake_img = init.clone().detach().requires_grad_(True).to(DEVICE)
optim_img = torch.optim.Adam([fake_img], lr=0.01)
# Main loop
for it in tqdm(range(n_iter_max)):
optim_img.zero_grad()
tmp = torch.nn.functional.pad(fake_img, pad = (6,6,6,6), mode= 'reflect')
fake_data = input_im2pat(tmp,n_patches_out)
#patchNR
pred_inv, log_det_inv = flow(fake_data,rev=True)
reg = torch.mean(torch.sum(pred_inv**2,dim=1)/2) - torch.mean(log_det_inv)
#data fidelity
data_fid = torch.sum((operator(tmp) - img)**2)
#loss
loss = data_fid + lam*reg
loss.backward()
optim_img.step()
return fake_img
def run_ZeroShot_patchNR(img, load_model = False):
patch_size = 6
center = False
#params for training
train_steps = 10000
batch_size = 128
#params for reco
n_pat = 50000
lam = 0.25
n_iter = 60
model = create_NF(num_layers = 5, sub_net_size = 512, dimension=patch_size**2)
if load_model:
weights = torch.load('patchNR_zeroshot_SiC_weights.pt')
patch_size = weights['patch_size']
model.load_state_dict(weights['net_state_dict'])
else:
train_patchNR(model, img, patch_size, train_steps, batch_size, center = center)
reco = patchNR(img, lam, patch_size, n_pat, model, n_iter, center = center, operator = Downsample)
return reco
if __name__ == '__main__':
hr = imread('input_imgs/img_test_material.png')
lr = Downsample(hr)
lr += 0.01 * torch.randn_like(lr)
#save_img(lr,'lr_img')
pred = run_ZeroShot_patchNR(lr, load_model = False)
save_img(pred,'results/patchNR_zeroshot_material')
| 4,456 | 32.261194 | 102 | py |
patchNR | patchNR-master/patchNR_deblurring.py | # This code belongs to the paper
#
# F. Altekrüger, A. Denker, P. Hagemann, J. Hertrich, P. Maass and G. Steidl (2023).
# PatchNR: Learning from Very Few Images by Patch Normalizing Flow Regularization.
# Inverse Problems, vol. 39, no. 6.
#
# Please cite the paper, if you use the code.
#
# The script reproduces the numerical example Deblurring in the paper.
import torch
from torch import nn
import numpy as np
import torch.nn.functional as F
import model
import scipy.io
from tqdm import tqdm
import utils
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(DEVICE)
class Blur(nn.Module):
def __init__(self):
super().__init__()
data_dict = scipy.io.loadmat('im05_flit01.mat')
kernels = data_dict['f']
self.kernel = np.array(kernels)
self.blur = nn.Conv2d(1,1, self.kernel.shape[0], bias=False, padding='same' ,padding_mode='reflect')
self.blur.weight.data = torch.from_numpy(self.kernel).float().unsqueeze(0).unsqueeze(0)
def forward(self, x):
return self.blur(x)
def patchNR(img, lam, patch_size, n_patches_out, patchNR, n_iter_max):
"""
Defines the reconstruction using patchNR as regularizer
"""
# fixed parameters
obs = img.to(DEVICE)
operator = Blur().to(DEVICE)
center = False
init = obs
pad_size = 4 #pad the image before extracting patches to avoid boundary effects
pad = [pad_size]*4
# create patch extractors
input_im2pat = utils.patch_extractor(patch_size, pad=False, center=center)
# intialize optimizer for image
fake_img = torch.tensor(init.clone(),dtype=torch.float,device=DEVICE,requires_grad = True)
optim_img = torch.optim.Adam([fake_img], lr=0.005)
# Main loop
for it in tqdm(range(n_iter_max)):
optim_img.zero_grad()
tmp = nn.functional.pad(fake_img,pad,mode='reflect')
fake_data = input_im2pat(tmp,n_patches_out)
#patchNR
pred_inv, log_det_inv = patchNR(fake_data,rev=True)
reg = torch.mean(torch.sum(pred_inv**2,dim=1)/2) - torch.mean(log_det_inv)
#data fidelity
data_fid = torch.sum((operator(fake_img) - obs)**2)
#loss
loss = data_fid + lam*reg
loss.backward()
optim_img.step()
return fake_img
if __name__ == '__main__':
#input parameters
patch_size = 6
num_layers = 5
subnet_nodes = 512
net = model.create_NF(num_layers, subnet_nodes, dimension=patch_size**2)
weights = torch.load('patchNR_weights/weights_lentils.pth')
net.load_state_dict(weights['net_state_dict'])
operator = Blur().to(DEVICE)
gt = utils.imread('input_imgs/img_test_lentils.png')
with torch.no_grad():
noisy = operator(gt)
noisy = noisy + 5/255*torch.randn(noisy.shape,device=DEVICE)
lam = 0.87
n_pat = 40000
iteration = 600
rec = patchNR(noisy,lam = lam, patch_size = patch_size, n_patches_out = n_pat,
patchNR = net, n_iter_max = iteration)
utils.save_img(rec,'results/patchNR_lentils')
| 3,142 | 29.813725 | 108 | py |
patchNR | patchNR-master/patchNR_zeroshot.py | # This code belongs to the paper
#
# F. Altekrüger, A. Denker, P. Hagemann, J. Hertrich, P. Maass and G. Steidl (2023).
# PatchNR: Learning from Very Few Images by Patch Normalizing Flow Regularization.
# Inverse Problems, vol. 39, no. 6.
#
# Please cite the paper, if you use the code.
#
# The script reproduces the zero-shot superresolution example in the paper.
import torch
from torch import nn
import numpy as np
import random
from model import create_NF
from tqdm import tqdm
from utils import *
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# downsample operator
def Downsample(input_img, scale = 0.5):
gaussian_std = 1.
kernel_size = 16
gaussian_down = gaussian_downsample(kernel_size,gaussian_std,int(1/scale),pad=False)
out = gaussian_down(input_img)
return out
def train_patchNR(patchNR, img, patch_size, steps, batch_size, center):
"""
Train the patchNR for the given img (low resolution)
"""
batch_size = batch_size
optimizer_steps = steps
center = center
optimizer = torch.optim.Adam(patchNR.parameters(), lr = 1e-4)
im2patch = patch_extractor(patch_size=patch_size, center = center)
patches = torch.empty(0,device=DEVICE)
#enlarge training patches by rotation and mirroring
for j in range(2):
if j == 0:
tmp = img
elif j == 1:
tmp = torch.flip(img,[1])
for i in range(4):
patches = torch.cat([patches,im2patch(torch.rot90(tmp,i,[0,1]))])
for k in tqdm(range(optimizer_steps)):
#extract patches
idx = torch.tensor(random.sample(range(patches.shape[0]),batch_size))
patch_example = patches[idx,:]
#compute loss
loss = 0
invs, jac_inv = patchNR(patch_example, rev = True)
loss += torch.mean(0.5 * torch.sum(invs**2, dim=1) - jac_inv)
optimizer.zero_grad()
loss.backward()
optimizer.step()
weights = dict()
weights['batch_size'] = batch_size
weights['optimizer_steps'] = optimizer_steps
weights['patch_size'] = patch_size
weights['net_state_dict'] = patchNR.state_dict()
#torch.save(weights, 'patchNR_zeroshot_weights.pt')
def patchNR(img, lam, patch_size, n_patches_out, flow, n_iter_max, center, operator):
"""
Defines the reconstruction using patchNR as regularizer
"""
# fixed parameters
operator = operator
center = center
init = torch.nn.functional.interpolate(img,scale_factor=2,mode='bicubic')
#save_img(init,'bicubic')
# create patch extractors
input_im2pat = patch_extractor(patch_size, pad=False, center=center)
# intialize optimizer for image
fake_img = init.clone().detach().requires_grad_(True).to(DEVICE)
optim_img = torch.optim.Adam([fake_img], lr=0.01)
# Main loop
for it in tqdm(range(n_iter_max)):
optim_img.zero_grad()
tmp = torch.nn.functional.pad(fake_img, pad = (7,7,7,7), mode= 'reflect')
fake_data = input_im2pat(tmp,n_patches_out)
#patchNR
pred_inv, log_det_inv = flow(fake_data,rev=True)
reg = torch.mean(torch.sum(pred_inv**2,dim=1)/2) - torch.mean(log_det_inv)
#data fidelity
data_fid = torch.sum((operator(tmp) - img)**2)
#loss
loss = data_fid + lam*reg
loss.backward()
optim_img.step()
return fake_img
def run_ZeroShot_patchNR(img, load_model = False):
patch_size = 6
center = False
#params for training
train_steps = 10000
batch_size = 128
#params for reconstruction
n_pat = 80000
lam = 0.25
n_iter = 60
model = create_NF(num_layers = 5, sub_net_size = 512, dimension=patch_size**2)
if load_model:
weights = torch.load('patchNR_zeroshot_weights.pt')
patch_size = weights['patch_size']
model.load_state_dict(weights['net_state_dict'])
else:
train_patchNR(model, img, patch_size, train_steps, batch_size, center = center)
reco = patchNR(img, lam, patch_size, n_pat, model, n_iter, center = center, operator = Downsample)
return reco
if __name__ == '__main__':
hr = imread('input_imgs/img_test_bsd.png')
lr = Downsample(hr)
lr += 0.01 * torch.randn_like(lr)
#save_img(lr,'lr_img')
pred = run_ZeroShot_patchNR(lr, load_model = False)
save_img(pred,'results/patchNR_zeroshot')
| 4,439 | 31.888889 | 102 | py |
patchNR | patchNR-master/patchNR_superres.py | # This code belongs to the paper
#
# F. Altekrüger, A. Denker, P. Hagemann, J. Hertrich, P. Maass and G. Steidl (2023).
# PatchNR: Learning from Very Few Images by Patch Normalizing Flow Regularization.
# Inverse Problems, vol. 39, no. 6.
#
# Please cite the paper, if you use the code.
#
# The script reproduces the numerical example Superresolution in the paper.
import torch
from torch import nn
import numpy as np
import torch.nn.functional as F
import model
from tqdm import tqdm
import utils
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(DEVICE)
def Downsample(input_img, scale = 0.25):
'''
downsamples an img by factor 4 using gaussian downsample from wgenpatex.py
'''
if scale > 1:
print('Error. Scale factor is larger than 1.')
return
gaussian_std = 2
kernel_size = 16
gaussian_down = utils.gaussian_downsample(kernel_size,gaussian_std,int(1/scale),pad=True) #gaussian downsample with zero padding
out = gaussian_down(input_img).to(DEVICE)
return out
def patchNR(img, lam, patch_size, n_patches_out, patchNR, n_iter_max):
"""
Defines the reconstruction using patchNR as regularizer
"""
# fixed parameters
lr_img = img.to(DEVICE)
operator = Downsample
center = False
init = F.interpolate(img,scale_factor=4,mode='bicubic')
# create patch extractors
input_im2pat = utils.patch_extractor(patch_size, pad=False, center=center)
# intialize optimizer for image
fake_img = torch.tensor(init.clone(),dtype=torch.float,device=DEVICE,requires_grad = True)
optim_img = torch.optim.Adam([fake_img], lr=0.03)
# Main loop
for it in tqdm(range(n_iter_max)):
optim_img.zero_grad()
fake_data = input_im2pat(fake_img,n_patches_out)
#patchNR
pred_inv, log_det_inv = patchNR(fake_data,rev=True)
reg = torch.mean(torch.sum(pred_inv**2,dim=1)/2) - torch.mean(log_det_inv)
#data fidelity
data_fid = torch.sum((operator(fake_img) - lr_img)**2)
#loss
loss = data_fid + lam*reg
loss.backward()
optim_img.step()
return fake_img
if __name__ == '__main__':
#input parameters
patch_size = 6
num_layers = 5
subnet_nodes = 512
net = model.create_NF(num_layers, subnet_nodes, dimension=patch_size**2)
weights = torch.load('patchNR_weights/weights_material.pth')
net.load_state_dict(weights['net_state_dict'])
hr = utils.imread('input_imgs/img_test_material.png')
lr = Downsample(hr)
lr = lr + 0.01*torch.randn(lr.shape,device=DEVICE)
lam = 0.15
n_pat = 130000
iteration = 300
rec = patchNR(lr,lam = lam, patch_size = patch_size, n_patches_out = n_pat,
patchNR = net, n_iter_max = iteration)
utils.save_img(rec,'results/patchNR_material')
| 2,928 | 28.887755 | 132 | py |
AxiCLASS | AxiCLASS-master/external_fz/sphinx-documentation/conf.py | # -*- coding: utf-8 -*-
#
# DarkAges documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 20 22:50:36 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# Mock the some modules used
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['numpy', 'scipy','scipy.interpolate','scipy.integrate','DarkAges']
#sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc','sphinx.ext.autosummary',
#'sphinx.ext.todo',
#'sphinx.ext.coverage',
#'sphinx.ext.inheritance_diagram',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DarkAges'
copyright = u'2017, Patrick Stöcker'
author = u'Patrick Stöcker'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
with open('../VERSION', 'r') as version_file:
# The full version, including alpha/beta/rc tags.
release = version_file.readline()
# The short X.Y version.
version = '.'.join(release.split('.')[:-1])
## The short X.Y version.
#version = u'1.0'
## The full version, including alpha/beta/rc tags.
#release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
#show_authors = True
# If true, `todo` and `todoList` produce output, else they produce nothing.
#todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'classic'
html_theme = 'pyramid'
#html_theme = 'nature'
#html_theme = 'scrolls'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DarkAgesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DarkAges.tex', u'DarkAges Documentation',
u'Patrick Stöcker', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'darkages', u'DarkAges Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DarkAges', u'DarkAges Documentation',
author, 'DarkAges', 'One line description of project.',
'Miscellaneous'),
]
# Allow duplicate toc entries.
#epub_tocdup = True
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_mock_imports = ['scipy','scipy.interpolate','scipy.integrate']
# Napoleon settings
# go to http://sphinxcontrib-napoleon.readthedocs.org/en/latest/
# to see what all this is about
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = True
napoleon_use_param = True
napoleon_use_rtype = True
| 6,269 | 29.585366 | 82 | py |
bluebert | bluebert-master/mt-bluebert/mt_bluebert/blue_strip_model.py | """
Usage:
blue_strip_model <src> <dest>
"""
import logging
import os
import docopt
import torch
def main():
args = docopt.docopt(__doc__)
print(args)
if not os.path.exists(args['<src>']):
logging.error('%s: Cannot find the model', args['<src>'])
map_location = 'cpu' if not torch.cuda.is_available() else None
state_dict = torch.load(args['<src>'], map_location=map_location)
config = state_dict['config']
if config['ema_opt'] > 0:
state = state_dict['ema']
else:
state = state_dict['state']
my_state = {k: v for k, v in state.items() if not k.startswith('scoring_list.')}
my_config = {k: config[k] for k in ('vocab_size', 'hidden_size', 'num_hidden_layers', 'num_attention_heads',
'hidden_act', 'intermediate_size', 'hidden_dropout_prob',
'attention_probs_dropout_prob', 'max_position_embeddings', 'type_vocab_size',
'initializer_range')}
torch.save({'state': my_state, 'config': my_config}, args['<dest>'])
if __name__ == '__main__':
main()
| 1,149 | 29.263158 | 117 | py |
bluebert | bluebert-master/mt-bluebert/mt_bluebert/blue_prepro_std.py | """
Preprocessing BLUE dataset.
Usage:
blue_prepro_std [options] --vocab=<file> --root_dir=<dir> --task_def=<file> --datasets=<str>
Options:
--do_lower_case
--max_seq_len=<int> [default: 128]
--overwrite
"""
# Copyright (c) Microsoft. All rights reserved.
# Modified by Yifan Peng
import json
import logging
import os
import docopt
import numpy as np
from pytorch_pretrained_bert.tokenization import BertTokenizer
from mt_bluebert.data_utils.log_wrapper import create_logger
from mt_bluebert.data_utils.task_def import TaskType, DataFormat, EncoderModelType
from mt_bluebert.data_utils.vocab import Vocabulary
from mt_bluebert.experiments.squad import squad_utils
from mt_bluebert.blue_exp_def import BlueTaskDefs
from mt_bluebert.mt_dnn.batcher import BatchGen
MAX_SEQ_LEN = 512
def load_data(file_path, data_format: DataFormat, task_type: TaskType, label_dict: Vocabulary = None):
"""
Args:
label_dict: map string label to numbers. only valid for Classification task or ranking task.
For ranking task, better label should have large number
"""
if task_type == TaskType.Ranking:
assert data_format == DataFormat.PremiseAndMultiHypothesis
rows = []
for line in open(file_path, encoding="utf-8"):
fields = line.strip().split("\t")
if data_format == DataFormat.PremiseOnly:
assert len(fields) == 3
row = {"uid": fields[0], "label": fields[1], "premise": fields[2]}
elif data_format == DataFormat.PremiseAndOneHypothesis:
assert len(fields) == 4
row = {"uid": fields[0], "label": fields[1], "premise": fields[2], "hypothesis": fields[3]}
elif data_format == DataFormat.PremiseAndMultiHypothesis:
assert len(fields) > 5
row = {"uid": fields[0], "ruid": fields[1].split(","), "label": fields[2], "premise": fields[3],
"hypothesis": fields[4:]}
elif data_format == DataFormat.Sequence:
assert len(fields) == 4
row = {"uid": fields[0], "label": json.loads(fields[1]), "premise": json.loads(fields[2]),
"offset": json.loads(fields[3])}
else:
raise ValueError(data_format)
if task_type == TaskType.Classification:
if label_dict is not None:
row["label"] = label_dict[row["label"]]
else:
row["label"] = int(row["label"])
elif task_type == TaskType.Regression:
row["label"] = float(row["label"])
elif task_type == TaskType.Ranking:
labels = row["label"].split(",")
if label_dict is not None:
labels = [label_dict[label] for label in labels]
else:
labels = [float(label) for label in labels]
row["label"] = int(np.argmax(labels))
row["olabel"] = labels
elif task_type == TaskType.Span:
pass # don't process row label
elif task_type == TaskType.SequenceLabeling:
if label_dict is not None:
row["label"] = [label_dict[l] for l in row["label"]]
else:
row["label"] = [int(l) for l in row["label"]]
assert row["label"] is not None, \
'%s: %s: label is None. label_dict: %s' % (file_path, row['uid'], label_dict.tok2ind)
rows.append(row)
return rows
def truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length.
Copyed from https://github.com/huggingface/pytorch-pretrained-BERT
"""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
logger = logging.getLogger(__name__)
truncate_tokens_a = False
truncate_tokens_b = False
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
truncate_tokens_a = True
tokens_a.pop()
else:
truncate_tokens_b = True
tokens_b.pop()
if truncate_tokens_a:
logger.debug('%s: longer than %s', tokens_a, max_length)
if truncate_tokens_b:
logger.debug('%s: longer than %s', tokens_b, max_length)
def bert_feature_extractor(text_a, text_b=None, max_seq_length=512, tokenize_fn=None):
logger = logging.getLogger(__name__)
tokens_a = tokenize_fn.tokenize(text_a)
tokens_b = None
if text_b:
tokens_b = tokenize_fn.tokenize(text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for one [SEP] & one [CLS] with "- 2"
if len(tokens_a) > max_seq_length - 2:
logger.debug('%s: longer than %s', text_a, max_seq_length)
tokens_a = tokens_a[:max_seq_length - 2]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
if tokens_b:
input_ids = tokenize_fn.convert_tokens_to_ids(['[CLS]'] + tokens_a + ['[SEP]'] + tokens_b + ['[SEP]'])
segment_ids = [0] * (len(tokens_a) + 2) + [1] * (len(tokens_b) + 1)
if tokens_b:
input_ids = tokenize_fn.convert_tokens_to_ids(['[CLS]'] + tokens_b + ['[SEP]'] + tokens_a + ['[SEP]'])
segment_ids = [0] * (len(tokens_b) + 2) + [1] * (len(tokens_a) + 1)
else:
input_ids = tokenize_fn.convert_tokens_to_ids(['[CLS]'] + tokens_a + ['[SEP]'])
segment_ids = [0] * len(input_ids)
input_mask = None
return input_ids, input_mask, segment_ids
def split_if_longer(data, label_mapper, max_seq_len=30):
rows = []
for idx, sample in enumerate(data):
# uid = sample['uid']
docid = sample['uid'].split('.')[0]
premise = sample['premise']
label = sample['label']
offset = sample['offset']
while len(premise) > max_seq_len:
tmplabel = label[:max_seq_len]
for iidx in range(len(tmplabel)):
if label_mapper[tmplabel.pop()] == 'O':
break
p = premise[:len(tmplabel) + 1]
l = label[:len(tmplabel) + 1]
o = offset[:len(tmplabel) + 1]
uid = '{}.{}'.format(docid, o[0].split(';')[0])
rows.append({"uid": uid, "label": l, "premise": p, "offset": o})
premise = premise[len(tmplabel) + 1:]
label = label[len(tmplabel) + 1:]
offset = offset[len(tmplabel) + 1:]
if len(premise) == 0:
continue
uid = '{}.{}'.format(docid, offset[0].split(';')[0])
rows.append({"uid": uid, "label": label, "premise": premise, "offset": offset})
return rows
def build_data_sequence(data, dump_path, max_seq_len=MAX_SEQ_LEN, tokenizer=None, label_mapper=None):
with open(dump_path, 'w', encoding='utf-8') as writer:
for idx, sample in enumerate(data):
ids = sample['uid']
premise = sample['premise']
ne_labels = sample['label']
premise_offset = sample['offset']
tokens = []
labels = []
offsets = []
for i, word in enumerate(premise):
subwords = tokenizer.tokenize(word)
tokens.extend(subwords)
for j in range(len(subwords)):
if j == 0:
labels.append(ne_labels[i])
offsets.append(premise_offset[i])
else:
labels.append(label_mapper['X'])
offsets.append('X')
if len(premise) > max_seq_len - 2:
tokens = tokens[:max_seq_len - 2]
labels = labels[:max_seq_len - 2]
offsets = offsets[:max_seq_len - 2]
label = [label_mapper['CLS']] + labels + [label_mapper['SEP']]
offsets = ['X'] + offsets + ['X']
input_ids = tokenizer.convert_tokens_to_ids(['[CLS]'] + tokens + ['[SEP]'])
assert len(label) == len(input_ids)
type_ids = [0] * len(input_ids)
features = {'uid': ids, 'label': label, 'token_id': input_ids, 'type_id': type_ids, 'offset': offsets}
writer.write('{}\n'.format(json.dumps(features)))
def build_data_premise_only(data, dump_path, max_seq_len=MAX_SEQ_LEN, tokenizer=None,
encoderModelType=EncoderModelType.BERT):
"""Build data of single sentence tasks
"""
with open(dump_path, 'w', encoding='utf-8') as writer:
for idx, sample in enumerate(data):
ids = sample['uid']
premise = sample['premise']
label = sample['label']
assert encoderModelType == EncoderModelType.BERT
input_ids, _, type_ids = bert_feature_extractor(premise, max_seq_length=max_seq_len, tokenize_fn=tokenizer)
features = {'uid': ids, 'label': label, 'token_id': input_ids, 'type_id': type_ids}
writer.write('{}\n'.format(json.dumps(features)))
def build_data_premise_and_one_hypo(
data, dump_path, task_type, max_seq_len=MAX_SEQ_LEN, tokenizer=None, encoderModelType=EncoderModelType.BERT):
"""Build data of sentence pair tasks
"""
with open(dump_path, 'w', encoding='utf-8') as writer:
for idx, sample in enumerate(data):
ids = sample['uid']
premise = sample['premise']
hypothesis = sample['hypothesis']
label = sample['label']
assert encoderModelType == EncoderModelType.BERT
input_ids, _, type_ids = bert_feature_extractor(
premise, hypothesis, max_seq_length=max_seq_len, tokenize_fn=tokenizer)
if task_type == TaskType.Span:
seg_a_start = len(type_ids) - sum(type_ids)
seg_a_end = len(type_ids)
answer_start, answer_end, answer, is_impossible = squad_utils.parse_squad_label(label)
span_start, span_end = squad_utils.calc_tokenized_span_range(premise, hypothesis, answer,
answer_start, answer_end,
tokenizer, encoderModelType)
span_start = seg_a_start + span_start
span_end = min(seg_a_end, seg_a_start + span_end)
answer_tokens = tokenizer.convert_ids_to_tokens(input_ids[span_start:span_end])
if span_start >= span_end:
span_start = -1
span_end = -1
features = {
'uid': ids,
'label': is_impossible,
'answer': answer,
"answer_tokens": answer_tokens,
"token_start": span_start,
"token_end": span_end,
'token_id': input_ids,
'type_id': type_ids}
else:
features = {
'uid': ids,
'label': label,
'token_id': input_ids,
'type_id': type_ids}
writer.write('{}\n'.format(json.dumps(features)))
def build_data_premise_and_multi_hypo(
data, dump_path, max_seq_len=MAX_SEQ_LEN, tokenizer=None, encoderModelType=EncoderModelType.BERT):
"""Build QNLI as a pair-wise ranking task
"""
with open(dump_path, 'w', encoding='utf-8') as writer:
for idx, sample in enumerate(data):
ids = sample['uid']
premise = sample['premise']
hypothesis_1 = sample['hypothesis'][0]
hypothesis_2 = sample['hypothesis'][1]
label = sample['label']
assert encoderModelType == EncoderModelType.BERT
input_ids_1, _, type_ids_1 = bert_feature_extractor(
premise, hypothesis_1, max_seq_length=max_seq_len, tokenize_fn=tokenizer)
input_ids_2, _, type_ids_2 = bert_feature_extractor(
premise, hypothesis_2, max_seq_length=max_seq_len, tokenize_fn=tokenizer)
features = {
'uid': ids, 'label': label, 'token_id': [
input_ids_1, input_ids_2], 'type_id': [
type_ids_1, type_ids_2], 'ruid': sample['ruid'], 'olabel': sample['olabel']}
writer.write('{}\n'.format(json.dumps(features)))
def build_data(data, dump_path, tokenizer, data_format=DataFormat.PremiseOnly,
max_seq_len=MAX_SEQ_LEN, encoderModelType=EncoderModelType.BERT, task_type=None, lab_dict=None):
# We only support BERT based MRC for now
if task_type == TaskType.Span:
assert data_format == DataFormat.PremiseAndOneHypothesis
assert encoderModelType == EncoderModelType.BERT
if data_format == DataFormat.PremiseOnly:
build_data_premise_only(data, dump_path, max_seq_len, tokenizer)
elif data_format == DataFormat.PremiseAndOneHypothesis:
build_data_premise_and_one_hypo(data, dump_path, task_type, max_seq_len, tokenizer)
elif data_format == DataFormat.PremiseAndMultiHypothesis:
build_data_premise_and_multi_hypo(data, dump_path, max_seq_len, tokenizer)
elif data_format == DataFormat.Sequence:
build_data_sequence(data, dump_path, max_seq_len, tokenizer, lab_dict)
else:
raise ValueError(data_format)
def main(args):
root = args['--root_dir']
assert os.path.exists(root)
max_seq_len = int(args['--max_seq_len'])
log_file = os.path.join(root, 'blue_prepro_std_{}.log'.format(max_seq_len))
logger = create_logger(__name__, to_disk=True, log_file=log_file)
is_uncased = False
if 'uncased' in args['--vocab']:
is_uncased = True
do_lower_case = args['--do_lower_case']
mt_dnn_suffix = 'bert'
encoder_model = EncoderModelType.BERT
tokenizer = BertTokenizer.from_pretrained(args['--vocab'], do_lower_case=do_lower_case)
if is_uncased:
mt_dnn_suffix = '{}_uncased'.format(mt_dnn_suffix)
else:
mt_dnn_suffix = '{}_cased'.format(mt_dnn_suffix)
if do_lower_case:
mt_dnn_suffix = '{}_lower'.format(mt_dnn_suffix)
mt_dnn_root = os.path.join(root, mt_dnn_suffix)
if not os.path.isdir(mt_dnn_root):
os.mkdir(mt_dnn_root)
task_defs = BlueTaskDefs(args['--task_def'])
if args['--datasets'] == 'all':
tasks = task_defs.tasks
else:
tasks = args['--datasets'].split(',')
for task in tasks:
logger.info("Task %s" % task)
data_format = task_defs.data_format_map[task]
task_type = task_defs.task_type_map[task]
label_mapper = task_defs.label_mapper_map[task]
split_names = task_defs.split_names_map[task]
for split_name in split_names:
dump_path = os.path.join(mt_dnn_root, f"{task}_{split_name}.json")
if os.path.exists(dump_path) and not args['--overwrite']:
logger.warning('%s: Not overwrite %s: %s', task, split_name, dump_path)
continue
rows = load_data(os.path.join(root, f"{task}_{split_name}.tsv"), data_format, task_type, label_mapper)
logger.info('%s: Loaded %s %s samples', task, len(rows), split_name)
if task_type == TaskType.SequenceLabeling:
rows = split_if_longer(rows, label_mapper, 30)
build_data(rows, dump_path, tokenizer, data_format, max_seq_len=max_seq_len,
encoderModelType=encoder_model, task_type=task_type, lab_dict=label_mapper)
logger.info('%s: Done', task)
# combine train and dev
for task in tasks:
logger.info("Task %s: combine train and dev" % task)
dump_path = os.path.join(mt_dnn_root, f"{task}_train+dev.json")
if os.path.exists(dump_path) and not args['--overwrite']:
logger.warning('%s: Not overwrite train+dev: %s', task, dump_path)
continue
task_type = task_defs.task_type_map[task]
train_path = os.path.join(mt_dnn_root, f"{task}_train.json")
train_rows = BatchGen.load(train_path, task_type=task_type)
dev_path = os.path.join(mt_dnn_root, f"{task}_dev.json")
dev_rows = BatchGen.load(dev_path, task_type=task_type)
with open(dump_path, 'w', encoding='utf-8') as fp:
for features in train_rows + dev_rows:
fp.write('{}\n'.format(json.dumps(features)))
logger.info('%s: Done', task)
if __name__ == '__main__':
args = docopt.docopt(__doc__)
main(args)
| 17,889 | 41.901679 | 119 | py |
bluebert | bluebert-master/mt-bluebert/mt_bluebert/blue_train.py | # Copyright (c) Microsoft. All rights reserved.
# Modified by Yifan Peng
import argparse
import copy
import json
import os
import random
from datetime import datetime
import numpy as np
import torch
from pytorch_pretrained_bert.modeling import BertConfig
from tensorboardX import SummaryWriter
# from experiments.glue.glue_utils import submit, eval_model
from mt_bluebert.blue_exp_def import BlueTaskDefs
from mt_bluebert.blue_inference import eval_model
from mt_bluebert.data_utils.log_wrapper import create_logger
from mt_bluebert.data_utils.task_def import EncoderModelType
from mt_bluebert.data_utils.utils import set_environment
# from torch.utils.tensorboard import SummaryWriter
from mt_bluebert.mt_dnn.batcher import BatchGen
from mt_bluebert.mt_dnn.model import MTDNNModel
def model_config(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser.add_argument('--update_bert_opt', default=0, type=int)
parser.add_argument('--multi_gpu_on', action='store_true')
parser.add_argument('--mem_cum_type', type=str, default='simple',
help='bilinear/simple/defualt')
parser.add_argument('--answer_num_turn', type=int, default=5)
parser.add_argument('--answer_mem_drop_p', type=float, default=0.1)
parser.add_argument('--answer_att_hidden_size', type=int, default=128)
parser.add_argument('--answer_att_type', type=str, default='bilinear',
help='bilinear/simple/defualt')
parser.add_argument('--answer_rnn_type', type=str, default='gru',
help='rnn/gru/lstm')
parser.add_argument('--answer_sum_att_type', type=str, default='bilinear',
help='bilinear/simple/defualt')
parser.add_argument('--answer_merge_opt', type=int, default=1)
parser.add_argument('--answer_mem_type', type=int, default=1)
parser.add_argument('--answer_dropout_p', type=float, default=0.1)
parser.add_argument('--answer_weight_norm_on', action='store_true')
parser.add_argument('--dump_state_on', action='store_true')
parser.add_argument('--answer_opt', type=int, default=0, help='0,1')
parser.add_argument('--label_size', type=str, default='3')
parser.add_argument('--mtl_opt', type=int, default=0)
parser.add_argument('--ratio', type=float, default=0)
parser.add_argument('--mix_opt', type=int, default=0)
parser.add_argument('--max_seq_len', type=int, default=512)
parser.add_argument('--init_ratio', type=float, default=1)
parser.add_argument('--encoder_type', type=int, default=EncoderModelType.BERT)
return parser
def data_config(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser.add_argument('--log_file', default='mt-dnn-train.log', help='path for log file.')
parser.add_argument('--tensorboard', action='store_true')
parser.add_argument('--tensorboard_logdir', default='tensorboard_logdir')
parser.add_argument("--init_checkpoint", default='mt_dnn_models/bert_model_base.pt', type=str)
parser.add_argument('--data_dir', default='blue_data/canonical_data/bert_uncased_lower')
parser.add_argument('--data_sort_on', action='store_true')
parser.add_argument('--name', default='farmer')
parser.add_argument('--task_def', type=str, default="experiments/blue/blue_task_def.yml")
parser.add_argument('--train_datasets', default='mnli')
parser.add_argument('--test_datasets', default='mnli_mismatched,mnli_matched')
return parser
def train_config(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available(),
help='whether to use GPU acceleration.')
parser.add_argument('--log_per_updates', type=int, default=500)
parser.add_argument('--save_per_updates', type=int, default=10000)
parser.add_argument('--save_per_updates_on', action='store_true')
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--batch_size_eval', type=int, default=8)
parser.add_argument('--optimizer', default='adamax',
help='supported optimizer: adamax, sgd, adadelta, adam')
parser.add_argument('--grad_clipping', type=float, default=0)
parser.add_argument('--global_grad_clipping', type=float, default=1.0)
parser.add_argument('--weight_decay', type=float, default=0)
parser.add_argument('--learning_rate', type=float, default=5e-5)
parser.add_argument('--momentum', type=float, default=0)
parser.add_argument('--warmup', type=float, default=0.1)
parser.add_argument('--warmup_schedule', type=str, default='warmup_linear')
parser.add_argument('--adam_eps', type=float, default=1e-6)
parser.add_argument('--vb_dropout', action='store_false')
parser.add_argument('--dropout_p', type=float, default=0.1)
parser.add_argument('--dropout_w', type=float, default=0.000,
help='Randomly drop a fraction drooput_w of training instances.')
parser.add_argument('--bert_dropout_p', type=float, default=0.1)
# loading
parser.add_argument("--model_ckpt", default='checkpoints/model_0.pt', type=str)
parser.add_argument("--resume", action='store_true')
# EMA
parser.add_argument('--ema_opt', type=int, default=0)
parser.add_argument('--ema_gamma', type=float, default=0.995)
# scheduler
parser.add_argument('--have_lr_scheduler', dest='have_lr_scheduler', action='store_false')
parser.add_argument('--multi_step_lr', type=str, default='10,20,30')
parser.add_argument('--freeze_layers', type=int, default=-1)
parser.add_argument('--embedding_opt', type=int, default=0)
parser.add_argument('--lr_gamma', type=float, default=0.5)
parser.add_argument('--bert_l2norm', type=float, default=0.0)
parser.add_argument('--scheduler_type', type=str, default='ms', help='ms/rop/exp')
parser.add_argument('--output_dir', default='checkpoint')
parser.add_argument('--seed', type=int, default=2018,
help='random seed for data shuffling, embedding init, etc.')
parser.add_argument('--grad_accumulation_step', type=int, default=1)
# fp 16
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
# save
parser.add_argument('--not_save', action='store_true', help="Don't save the model")
return parser
def dump(path, data):
with open(path, 'w') as f:
json.dump(data, f)
def dump2(path, uids, scores, predictions):
with open(path, 'w') as f:
for uid, score, pred in zip(uids, scores, predictions):
s = json.dumps({'uid': uid, 'score': score, 'prediction': pred})
f.write(s + '\n')
def generate_decoder_opt(enable_san, max_opt):
opt_v = 0
if enable_san and max_opt < 3:
opt_v = max_opt
return opt_v
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser = data_config(parser)
parser = model_config(parser)
parser = train_config(parser)
args = parser.parse_args()
return args
def main():
args = get_args()
args.train_datasets = args.train_datasets.split(',')
args.test_datasets = args.test_datasets.split(',')
args.output_dir = os.path.abspath(args.output_dir)
os.makedirs(args.output_dir, exist_ok=True)
# log_path = args.log_file
logger = create_logger(__name__, to_disk=True, log_file=args.log_file)
logger.info('args: %s', json.dumps(vars(args), indent=2))
set_environment(args.seed, args.cuda)
task_defs = BlueTaskDefs(args.task_def)
encoder_type = task_defs.encoder_type
assert encoder_type == EncoderModelType.BERT, '%s: only support BERT' % encoder_type
args.encoder_type = encoder_type
logger.info('Launching the MT-DNN training')
# update data dir
train_data_list = []
tasks = {}
tasks_class = {}
nclass_list = []
decoder_opts = []
task_types = []
dropout_list = []
for dataset in args.train_datasets:
task = dataset.split('_')[0]
if task in tasks:
logger.warning('Skipping: %s in %s', task, tasks)
continue
assert task in task_defs.n_class_map, \
'%s not in n_class_map: %s' % (task, task_defs.n_class_map)
assert task in task_defs.data_format_map, \
'%s not in data_format_map: %s' % (task, task_defs.data_format_map)
data_type = task_defs.data_format_map[task]
nclass = task_defs.n_class_map[task]
task_id = len(tasks)
if args.mtl_opt > 0:
task_id = tasks_class[nclass] if nclass in tasks_class else len(tasks_class)
task_type = task_defs.task_type_map[task]
dopt = generate_decoder_opt(task_defs.enable_san_map[task], args.answer_opt)
if task_id < len(decoder_opts):
decoder_opts[task_id] = min(decoder_opts[task_id], dopt)
else:
decoder_opts.append(dopt)
task_types.append(task_type)
if task not in tasks:
tasks[task] = len(tasks)
if args.mtl_opt < 1:
nclass_list.append(nclass)
if nclass not in tasks_class:
tasks_class[nclass] = len(tasks_class)
if args.mtl_opt > 0:
nclass_list.append(nclass)
dropout_p = task_defs.dropout_p_map.get(task, args.dropout_p)
dropout_list.append(dropout_p)
# use train and dev
train_path = os.path.join(args.data_dir, f'{dataset}_train+dev.json')
logger.info('Loading %s as task %s', task, task_id)
train_data = BatchGen(
BatchGen.load(train_path, True, task_type=task_type, maxlen=args.max_seq_len),
batch_size=args.batch_size,
dropout_w=args.dropout_w,
gpu=args.cuda,
task_id=task_id,
maxlen=args.max_seq_len,
data_type=data_type,
task_type=task_type,
encoder_type=encoder_type)
train_data_list.append(train_data)
dev_data_list = []
test_data_list = []
for dataset in args.test_datasets:
task = dataset.split('_')[0]
task_id = tasks_class[task_defs.n_class_map[task]] if args.mtl_opt > 0 else tasks[task]
task_type = task_defs.task_type_map[task]
data_type = task_defs.data_format_map[task]
dev_path = os.path.join(args.data_dir, f'{dataset}_dev.json')
dev_data = BatchGen(
BatchGen.load(dev_path, False, task_type=task_type, maxlen=args.max_seq_len),
batch_size=args.batch_size_eval,
gpu=args.cuda,
is_train=False,
task_id=task_id,
maxlen=args.max_seq_len,
data_type=data_type,
task_type=task_type,
encoder_type=encoder_type)
dev_data_list.append(dev_data)
test_path = os.path.join(args.data_dir, f'{dataset}_test.json')
test_data = BatchGen(
BatchGen.load(test_path, False, task_type=task_type, maxlen=args.max_seq_len),
batch_size=args.batch_size_eval,
gpu=args.cuda,
is_train=False,
task_id=task_id,
maxlen=args.max_seq_len,
data_type=data_type,
task_type=task_type,
encoder_type=encoder_type)
test_data_list.append(test_data)
opt = copy.deepcopy(vars(args))
opt['answer_opt'] = decoder_opts
opt['task_types'] = task_types
opt['tasks_dropout_p'] = dropout_list
label_size = ','.join([str(l) for l in nclass_list])
opt['label_size'] = label_size
logger.info('#' * 20)
logger.info('opt: %s', json.dumps(opt, indent=2))
logger.info('#' * 20)
bert_model_path = args.init_checkpoint
state_dict = None
if os.path.exists(bert_model_path):
state_dict = torch.load(bert_model_path)
config = state_dict['config']
config['attention_probs_dropout_prob'] = args.bert_dropout_p
config['hidden_dropout_prob'] = args.bert_dropout_p
opt.update(config)
else:
logger.error('#' * 20)
logger.error('Could not find the init model!\n'
'The parameters will be initialized randomly!')
logger.error('#' * 20)
config = BertConfig(vocab_size_or_config_json_file=30522).to_dict()
opt.update(config)
all_iters = [iter(item) for item in train_data_list]
all_lens = [len(bg) for bg in train_data_list]
# div number of grad accumulation.
num_all_batches = args.epochs * sum(all_lens) // args.grad_accumulation_step
logger.info('############# Gradient Accumulation Info #############')
logger.info('number of step: %s', args.epochs * sum(all_lens))
logger.info('number of grad grad_accumulation step: %s', args.grad_accumulation_step)
logger.info('adjusted number of step: %s', num_all_batches)
logger.info('############# Gradient Accumulation Info #############')
if len(train_data_list) > 1 and args.ratio > 0:
num_all_batches = int(args.epochs * (len(train_data_list[0]) * (1 + args.ratio)))
model = MTDNNModel(opt, state_dict=state_dict, num_train_step=num_all_batches)
if args.resume and args.model_ckpt:
logger.info('loading model from %s', args.model_ckpt)
model.load(args.model_ckpt)
# model meta str
headline = '############# Model Arch of MT-DNN #############'
# print network
logger.debug('\n{}\n{}\n'.format(headline, model.network))
# dump config
config_file = os.path.join(args.output_dir, 'config.json')
with open(config_file, 'a', encoding='utf-8') as writer:
writer.write('{}\n'.format(json.dumps(opt)))
writer.write('\n{}\n{}\n'.format(headline, model.network))
logger.info("Total number of params: %s", model.total_param)
# tensorboard
if args.tensorboard:
args.tensorboard_logdir = os.path.join(args.output_dir, args.tensorboard_logdir)
tensorboard = SummaryWriter(log_dir=args.tensorboard_logdir)
for epoch in range(0, args.epochs):
logger.warning('At epoch %s', epoch)
for train_data in train_data_list:
train_data.reset()
start = datetime.now()
all_indices = []
if len(train_data_list) > 1 and args.ratio > 0:
main_indices = [0] * len(train_data_list[0])
extra_indices = []
for i in range(1, len(train_data_list)):
extra_indices += [i] * len(train_data_list[i])
random_picks = int(min(len(train_data_list[0]) * args.ratio, len(extra_indices)))
extra_indices = np.random.choice(extra_indices, random_picks, replace=False)
if args.mix_opt > 0:
extra_indices = extra_indices.tolist()
random.shuffle(extra_indices)
all_indices = extra_indices + main_indices
else:
all_indices = main_indices + extra_indices.tolist()
else:
for i in range(1, len(train_data_list)):
all_indices += [i] * len(train_data_list[i])
if args.mix_opt > 0:
random.shuffle(all_indices)
all_indices += [0] * len(train_data_list[0])
if args.mix_opt < 1:
random.shuffle(all_indices)
for i in range(len(all_indices)):
task_id = all_indices[i]
batch_meta, batch_data = next(all_iters[task_id])
model.update(batch_meta, batch_data)
if model.local_updates % (args.log_per_updates * args.grad_accumulation_step) == 0 \
or model.local_updates == 1:
remaining_time = str(
(datetime.now() - start) / (i + 1) * (len(all_indices) - i - 1)
).split('.')[0]
logger.info('Task [%2d] updates[%6d] train loss[%.5f] remaining[%s]',
task_id, model.updates, model.train_loss.avg, remaining_time)
if args.tensorboard:
tensorboard.add_scalar('train/loss', model.train_loss.avg,
global_step=model.updates)
if args.save_per_updates_on \
and (model.local_updates % (
args.save_per_updates * args.grad_accumulation_step) == 0):
model_file = os.path.join(args.output_dir, f'model_{epoch}_{model.updates}.pt')
logger.info('Saving mt-dnn model to %s', model_file)
model.save(model_file)
for idx, dataset in enumerate(args.test_datasets):
task = dataset.split('_')[0]
label_mapper = task_defs.label_mapper_map[task]
metric_meta = task_defs.metric_meta_map[task]
# dev
data = dev_data_list[idx]
with torch.no_grad():
metrics, predictions, scores, golds, ids = eval_model(
model, data, metric_meta, args.cuda, True, label_mapper)
for key, val in metrics.items():
if args.tensorboard:
tensorboard.add_scalar(f'dev/{dataset}/{key}', val, global_step=epoch)
logger.warning('Task %s - epoch %s - Dev %s: %s', dataset, epoch, key, val)
path = os.path.join(args.output_dir, f'{dataset}_dev_scores_{epoch}.json')
result = {'metrics': metrics, 'predictions': predictions, 'uids': ids, 'scores': scores}
dump(path, result)
path = os.path.join(args.output_dir, f'{dataset}_dev_scores_{epoch}_2.json')
dump2(path, ids, scores, predictions)
# test
data = test_data_list[idx]
with torch.no_grad():
metrics, predictions, scores, golds, ids = eval_model(
model, data, metric_meta, args.cuda, True, label_mapper)
for key, val in metrics.items():
if args.tensorboard:
tensorboard.add_scalar(f'test/{dataset}/{key}', val, global_step=epoch)
logger.warning('Task %s - epoch %s - Test %s: %s', dataset, epoch, key, val)
path = os.path.join(args.output_dir, f'{dataset}_test_scores_{epoch}.json')
result = {'metrics': metrics, 'predictions': predictions, 'uids': ids, 'scores': scores}
dump(path, result)
path = os.path.join(args.output_dir, f'{dataset}_test_scores_{epoch}_2.json')
dump2(path, ids, scores, predictions)
logger.info('[new test scores saved.]')
if not args.not_save:
model_file = os.path.join(args.output_dir, f'model_{epoch}.pt')
model.save(model_file)
if args.tensorboard:
tensorboard.close()
if __name__ == '__main__':
main()
| 19,152 | 42.332579 | 111 | py |
bluebert | bluebert-master/mt-bluebert/mt_bluebert/mt_dnn/batcher.py | # coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import sys
import json
import torch
import random
from shutil import copyfile
from mt_bluebert.data_utils.task_def import TaskType, DataFormat
from mt_bluebert.data_utils.task_def import EncoderModelType
UNK_ID=100
BOS_ID=101
class BatchGen:
def __init__(self, data, batch_size=32, gpu=True, is_train=True,
maxlen=128, dropout_w=0.005,
do_batch=True, weighted_on=False,
task_id=0,
task=None,
task_type=TaskType.Classification,
data_type=DataFormat.PremiseOnly,
soft_label=False,
encoder_type=EncoderModelType.BERT):
self.batch_size = batch_size
self.maxlen = maxlen
self.is_train = is_train
self.gpu = gpu
self.weighted_on = weighted_on
self.data = data
self.task_id = task_id
self.pairwise_size = 1
self.data_type = data_type
self.task_type=task_type
self.encoder_type = encoder_type
# soft label used for knowledge distillation
self.soft_label_on = soft_label
if do_batch:
if is_train:
indices = list(range(len(self.data)))
random.shuffle(indices)
data = [self.data[i] for i in indices]
self.data = BatchGen.make_baches(data, batch_size)
self.offset = 0
self.dropout_w = dropout_w
@staticmethod
def make_baches(data, batch_size=32):
return [data[i:i + batch_size] for i in range(0, len(data), batch_size)]
@staticmethod
def load(path, is_train=True, maxlen=128, factor=1.0, task_type=None):
assert task_type is not None
with open(path, 'r', encoding='utf-8') as reader:
data = []
cnt = 0
for line in reader:
sample = json.loads(line)
sample['factor'] = factor
cnt += 1
if is_train:
if (task_type == TaskType.Ranking) and (len(sample['token_id'][0]) > maxlen or len(sample['token_id'][1]) > maxlen):
continue
if (task_type != TaskType.Ranking) and (len(sample['token_id']) > maxlen):
continue
data.append(sample)
print('Loaded {} samples out of {}'.format(len(data), cnt))
return data
def reset(self):
if self.is_train:
indices = list(range(len(self.data)))
random.shuffle(indices)
self.data = [self.data[i] for i in indices]
self.offset = 0
def __random_select__(self, arr):
if self.dropout_w > 0:
return [UNK_ID if random.uniform(0, 1) < self.dropout_w else e for e in arr]
else: return arr
def __len__(self):
return len(self.data)
def patch(self, v):
v = v.cuda(non_blocking=True)
return v
@staticmethod
def todevice(v, device):
v = v.to(device)
return v
def rebacth(self, batch):
newbatch = []
for sample in batch:
size = len(sample['token_id'])
self.pairwise_size = size
assert size == len(sample['type_id'])
for idx in range(0, size):
token_id = sample['token_id'][idx]
type_id = sample['type_id'][idx]
uid = sample['ruid'][idx]
olab = sample['olabel'][idx]
newbatch.append({'uid': uid, 'token_id': token_id, 'type_id': type_id, 'label':sample['label'], 'true_label': olab})
return newbatch
def __if_pair__(self, data_type):
return data_type in [DataFormat.PremiseAndOneHypothesis, DataFormat.PremiseAndMultiHypothesis]
def __iter__(self):
while self.offset < len(self):
batch = self.data[self.offset]
if self.task_type == TaskType.Ranking:
batch = self.rebacth(batch)
# prepare model input
batch_data, batch_info = self._prepare_model_input(batch)
batch_info['task_id'] = self.task_id # used for select correct decoding head
batch_info['input_len'] = len(batch_data) # used to select model inputs
# select different loss function and other difference in training and testing
batch_info['task_type'] = self.task_type
batch_info['pairwise_size'] = self.pairwise_size # need for ranking task
if self.gpu:
for i, item in enumerate(batch_data):
batch_data[i] = self.patch(item.pin_memory())
# add label
labels = [sample['label'] for sample in batch]
if self.is_train:
# in training model, label is used by Pytorch, so would be tensor
if self.task_type == TaskType.Regression:
batch_data.append(torch.FloatTensor(labels))
batch_info['label'] = len(batch_data) - 1
elif self.task_type in (TaskType.Classification, TaskType.Ranking):
batch_data.append(torch.LongTensor(labels))
batch_info['label'] = len(batch_data) - 1
elif self.task_type == TaskType.Span:
start = [sample['token_start'] for sample in batch]
end = [sample['token_end'] for sample in batch]
batch_data.extend([torch.LongTensor(start), torch.LongTensor(end)])
batch_info['start'] = len(batch_data) - 2
batch_info['end'] = len(batch_data) - 1
elif self.task_type == TaskType.SequenceLabeling:
batch_size = self._get_batch_size(batch)
tok_len = self._get_max_len(batch, key='token_id')
tlab = torch.LongTensor(batch_size, tok_len).fill_(-1)
for i, label in enumerate(labels):
ll = len(label)
tlab[i, : ll] = torch.LongTensor(label)
batch_data.append(tlab)
batch_info['label'] = len(batch_data) - 1
# soft label generated by ensemble models for knowledge distillation
if self.soft_label_on and (batch[0].get('softlabel', None) is not None):
assert self.task_type != TaskType.Span # Span task doesn't support soft label yet.
sortlabels = [sample['softlabel'] for sample in batch]
sortlabels = torch.FloatTensor(sortlabels)
batch_info['soft_label'] = self.patch(sortlabels.pin_memory()) if self.gpu else sortlabels
else:
# in test model, label would be used for evaluation
batch_info['label'] = labels
if self.task_type == TaskType.Ranking:
batch_info['true_label'] = [sample['true_label'] for sample in batch]
batch_info['uids'] = [sample['uid'] for sample in batch] # used in scoring
self.offset += 1
yield batch_info, batch_data
def _get_max_len(self, batch, key='token_id'):
tok_len = max(len(x[key]) for x in batch)
return tok_len
def _get_batch_size(self, batch):
return len(batch)
def _prepare_model_input(self, batch):
batch_size = self._get_batch_size(batch)
tok_len = self._get_max_len(batch, key='token_id')
#tok_len = max(len(x['token_id']) for x in batch)
hypothesis_len = max(len(x['type_id']) - sum(x['type_id']) for x in batch)
if self.encoder_type == EncoderModelType.ROBERTA:
token_ids = torch.LongTensor(batch_size, tok_len).fill_(1)
type_ids = torch.LongTensor(batch_size, tok_len).fill_(0)
masks = torch.LongTensor(batch_size, tok_len).fill_(0)
else:
token_ids = torch.LongTensor(batch_size, tok_len).fill_(0)
type_ids = torch.LongTensor(batch_size, tok_len).fill_(0)
masks = torch.LongTensor(batch_size, tok_len).fill_(0)
if self.__if_pair__(self.data_type):
premise_masks = torch.ByteTensor(batch_size, tok_len).fill_(1)
hypothesis_masks = torch.ByteTensor(batch_size, hypothesis_len).fill_(1)
for i, sample in enumerate(batch):
select_len = min(len(sample['token_id']), tok_len)
tok = sample['token_id']
if self.is_train:
tok = self.__random_select__(tok)
token_ids[i, :select_len] = torch.LongTensor(tok[:select_len])
type_ids[i, :select_len] = torch.LongTensor(sample['type_id'][:select_len])
masks[i, :select_len] = torch.LongTensor([1] * select_len)
if self.__if_pair__(self.data_type):
hlen = len(sample['type_id']) - sum(sample['type_id'])
hypothesis_masks[i, :hlen] = torch.LongTensor([0] * hlen)
for j in range(hlen, select_len):
premise_masks[i, j] = 0
if self.__if_pair__(self.data_type):
batch_info = {
'token_id': 0,
'segment_id': 1,
'mask': 2,
'premise_mask': 3,
'hypothesis_mask': 4
}
batch_data = [token_ids, type_ids, masks, premise_masks, hypothesis_masks]
else:
batch_info = {
'token_id': 0,
'segment_id': 1,
'mask': 2
}
batch_data = [token_ids, type_ids, masks]
return batch_data, batch_info
| 9,667 | 42.54955 | 136 | py |
bluebert | bluebert-master/mt-bluebert/mt_bluebert/mt_dnn/matcher.py | # coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import torch.nn as nn
from pytorch_pretrained_bert.modeling import BertConfig, BertLayerNorm, BertModel
from mt_bluebert.module.dropout_wrapper import DropoutWrapper
from mt_bluebert.module.san import SANClassifier
from mt_bluebert.data_utils.task_def import EncoderModelType, TaskType
class LinearPooler(nn.Module):
def __init__(self, hidden_size):
super(LinearPooler, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class SANBertNetwork(nn.Module):
def __init__(self, opt, bert_config=None):
super(SANBertNetwork, self).__init__()
self.dropout_list = nn.ModuleList()
self.encoder_type = opt['encoder_type']
if opt['encoder_type'] == EncoderModelType.ROBERTA:
from fairseq.models.roberta import RobertaModel
self.bert = RobertaModel.from_pretrained(opt['init_checkpoint'])
hidden_size = self.bert.args.encoder_embed_dim
self.pooler = LinearPooler(hidden_size)
else:
self.bert_config = BertConfig.from_dict(opt)
self.bert = BertModel(self.bert_config)
hidden_size = self.bert_config.hidden_size
if opt.get('dump_feature', False):
self.opt = opt
return
if opt['update_bert_opt'] > 0:
for p in self.bert.parameters():
p.requires_grad = False
self.decoder_opt = opt['answer_opt']
self.task_types = opt["task_types"]
self.scoring_list = nn.ModuleList()
labels = [int(ls) for ls in opt['label_size'].split(',')]
task_dropout_p = opt['tasks_dropout_p']
for task, lab in enumerate(labels):
decoder_opt = self.decoder_opt[task]
task_type = self.task_types[task]
dropout = DropoutWrapper(task_dropout_p[task], opt['vb_dropout'])
self.dropout_list.append(dropout)
if task_type == TaskType.Span:
assert decoder_opt != 1
out_proj = nn.Linear(hidden_size, 2)
elif task_type == TaskType.SequenceLabeling:
out_proj = nn.Linear(hidden_size, lab)
else:
if decoder_opt == 1:
out_proj = SANClassifier(hidden_size, hidden_size, lab, opt, prefix='answer', dropout=dropout)
else:
out_proj = nn.Linear(hidden_size, lab)
self.scoring_list.append(out_proj)
self.opt = opt
self._my_init()
def _my_init(self):
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=0.02 * self.opt['init_ratio'])
elif isinstance(module, BertLayerNorm):
# Slightly different from the BERT pytorch version, which should be a bug.
# Note that it only affects on training from scratch. For detailed discussions, please contact xiaodl@.
# Layer normalization (https://arxiv.org/abs/1607.06450)
# support both old/latest version
if 'beta' in dir(module) and 'gamma' in dir(module):
module.beta.data.zero_()
module.gamma.data.fill_(1.0)
else:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear):
module.bias.data.zero_()
self.apply(init_weights)
def forward(self, input_ids, token_type_ids, attention_mask, premise_mask=None, hyp_mask=None, task_id=0):
if self.encoder_type == EncoderModelType.ROBERTA:
sequence_output = self.bert.extract_features(input_ids)
pooled_output = self.pooler(sequence_output)
else:
all_encoder_layers, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
sequence_output = all_encoder_layers[-1]
decoder_opt = self.decoder_opt[task_id]
task_type = self.task_types[task_id]
if task_type == TaskType.Span:
assert decoder_opt != 1
sequence_output = self.dropout_list[task_id](sequence_output)
logits = self.scoring_list[task_id](sequence_output)
start_scores, end_scores = logits.split(1, dim=-1)
start_scores = start_scores.squeeze(-1)
end_scores = end_scores.squeeze(-1)
return start_scores, end_scores
elif task_type == TaskType.SequenceLabeling:
pooled_output = all_encoder_layers[-1]
pooled_output = self.dropout_list[task_id](pooled_output)
pooled_output = pooled_output.contiguous().view(-1, pooled_output.size(2))
logits = self.scoring_list[task_id](pooled_output)
return logits
else:
if decoder_opt == 1:
max_query = hyp_mask.size(1)
assert max_query > 0
assert premise_mask is not None
assert hyp_mask is not None
hyp_mem = sequence_output[:, :max_query, :]
logits = self.scoring_list[task_id](sequence_output, hyp_mem, premise_mask, hyp_mask)
else:
pooled_output = self.dropout_list[task_id](pooled_output)
logits = self.scoring_list[task_id](pooled_output)
return logits
| 5,873 | 44.890625 | 119 | py |
bluebert | bluebert-master/mt-bluebert/mt_bluebert/mt_dnn/model.py | # coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import logging
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import *
from mt_bluebert.data_utils.utils import AverageMeter
from pytorch_pretrained_bert import BertAdam as Adam
from mt_bluebert.module.bert_optim import Adamax, RAdam
from mt_bluebert.module.my_optim import EMA
from .matcher import SANBertNetwork
from mt_bluebert.data_utils.task_def import TaskType
logger = logging.getLogger(__name__)
class MTDNNModel(object):
def __init__(self, opt, state_dict=None, num_train_step=-1):
self.config = opt
self.updates = state_dict['updates'] if state_dict and 'updates' in state_dict else 0
self.local_updates = 0
self.train_loss = AverageMeter()
self.network = SANBertNetwork(opt)
if state_dict:
self.network.load_state_dict(state_dict['state'], strict=False)
self.mnetwork = nn.DataParallel(self.network) if opt['multi_gpu_on'] else self.network
self.total_param = sum([p.nelement() for p in self.network.parameters() if p.requires_grad])
if opt['cuda']:
self.network.cuda()
no_decay = ['bias', 'gamma', 'beta', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_parameters = [
{'params': [p for n, p in self.network.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in self.network.named_parameters() if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
# note that adamax are modified based on the BERT code
if opt['optimizer'] == 'sgd':
self.optimizer = optim.SGD(optimizer_parameters, opt['learning_rate'],
weight_decay=opt['weight_decay'])
elif opt['optimizer'] == 'adamax':
self.optimizer = Adamax(optimizer_parameters,
opt['learning_rate'],
warmup=opt['warmup'],
t_total=num_train_step,
max_grad_norm=opt['grad_clipping'],
schedule=opt['warmup_schedule'],
weight_decay=opt['weight_decay'])
if opt.get('have_lr_scheduler', False): opt['have_lr_scheduler'] = False
elif opt['optimizer'] == 'radam':
self.optimizer = RAdam(optimizer_parameters,
opt['learning_rate'],
warmup=opt['warmup'],
t_total=num_train_step,
max_grad_norm=opt['grad_clipping'],
schedule=opt['warmup_schedule'],
eps=opt['adam_eps'],
weight_decay=opt['weight_decay'])
if opt.get('have_lr_scheduler', False): opt['have_lr_scheduler'] = False
# The current radam does not support FP16.
opt['fp16'] = False
elif opt['optimizer'] == 'adadelta':
self.optimizer = optim.Adadelta(optimizer_parameters,
opt['learning_rate'],
rho=0.95)
elif opt['optimizer'] == 'adam':
self.optimizer = Adam(optimizer_parameters,
lr=opt['learning_rate'],
warmup=opt['warmup'],
t_total=num_train_step,
max_grad_norm=opt['grad_clipping'],
schedule=opt['warmup_schedule'],
weight_decay=opt['weight_decay'])
if opt.get('have_lr_scheduler', False): opt['have_lr_scheduler'] = False
else:
raise RuntimeError('Unsupported optimizer: %s' % opt['optimizer'])
if state_dict and 'optimizer' in state_dict:
self.optimizer.load_state_dict(state_dict['optimizer'])
if opt['fp16']:
try:
from apex import amp
global amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(self.network, self.optimizer, opt_level=opt['fp16_opt_level'])
self.network = model
self.optimizer = optimizer
if opt.get('have_lr_scheduler', False):
if opt.get('scheduler_type', 'rop') == 'rop':
self.scheduler = ReduceLROnPlateau(self.optimizer, mode='max', factor=opt['lr_gamma'], patience=3)
elif opt.get('scheduler_type', 'rop') == 'exp':
self.scheduler = ExponentialLR(self.optimizer, gamma=opt.get('lr_gamma', 0.95))
else:
milestones = [int(step) for step in opt.get('multi_step_lr', '10,20,30').split(',')]
self.scheduler = MultiStepLR(self.optimizer, milestones=milestones, gamma=opt.get('lr_gamma'))
else:
self.scheduler = None
self.ema = None
if opt['ema_opt'] > 0:
self.ema = EMA(self.config['ema_gamma'], self.network)
if opt['cuda']:
self.ema.cuda()
self.para_swapped = False
# zero optimizer grad
self.optimizer.zero_grad()
def setup_ema(self):
if self.config['ema_opt']:
self.ema.setup()
def update_ema(self):
if self.config['ema_opt']:
self.ema.update()
def eval(self):
if self.config['ema_opt']:
self.ema.swap_parameters()
self.para_swapped = True
def train(self):
if self.para_swapped:
self.ema.swap_parameters()
self.para_swapped = False
def update(self, batch_meta, batch_data):
self.network.train()
labels = batch_data[batch_meta['label']]
soft_labels = None
if self.config.get('mkd_opt', 0) > 0 and ('soft_label' in batch_meta):
soft_labels = batch_meta['soft_label']
task_type = batch_meta['task_type']
if task_type == TaskType.Span:
start = batch_data[batch_meta['start']]
end = batch_data[batch_meta['end']]
if self.config["cuda"]:
start = start.cuda(non_blocking=True)
end = end.cuda(non_blocking=True)
start.requires_grad = False
end.requires_grad = False
else:
y = labels
if task_type == TaskType.Ranking:
y = y.contiguous().view(-1, batch_meta['pairwise_size'])[:, 0]
if self.config['cuda']:
y = y.cuda(non_blocking=True)
y.requires_grad = False
task_id = batch_meta['task_id']
inputs = batch_data[:batch_meta['input_len']]
if len(inputs) == 3:
inputs.append(None)
inputs.append(None)
inputs.append(task_id)
if self.config.get('weighted_on', False):
if self.config['cuda']:
weight = batch_data[batch_meta['factor']].cuda(non_blocking=True)
else:
weight = batch_data[batch_meta['factor']]
if task_type == TaskType.Span:
start_logits, end_logits = self.mnetwork(*inputs)
ignored_index = start_logits.size(1)
start.clamp_(0, ignored_index)
end.clamp_(0, ignored_index)
if self.config.get('weighted_on', False):
loss = torch.mean(F.cross_entropy(start_logits, start, reduce=False) * weight) + \
torch.mean(F.cross_entropy(end_logits, end, reduce=False) * weight)
else:
loss = F.cross_entropy(start_logits, start, ignore_index=ignored_index) + \
F.cross_entropy(end_logits, end, ignore_index=ignored_index)
loss = loss / 2
elif task_type == TaskType.SequenceLabeling:
y = y.view(-1)
logits = self.mnetwork(*inputs)
loss = F.cross_entropy(logits, y, ignore_index=-1)
else:
logits = self.mnetwork(*inputs)
if task_type == TaskType.Ranking:
logits = logits.view(-1, batch_meta['pairwise_size'])
if self.config.get('weighted_on', False):
if task_type == TaskType.Regression:
loss = torch.mean(F.mse_loss(logits.squeeze(), y, reduce=False) * weight)
else:
loss = torch.mean(F.cross_entropy(logits, y, reduce=False) * weight)
if soft_labels is not None:
# compute KL
label_size = soft_labels.size(1)
kd_loss = F.kl_div(F.log_softmax(logits.view(-1, label_size).float(), 1), soft_labels, reduction='batchmean')
loss = loss + kd_loss
else:
if task_type == TaskType.Regression:
loss = F.mse_loss(logits.squeeze(), y)
else:
loss = F.cross_entropy(logits, y)
if soft_labels is not None:
# compute KL
label_size = soft_labels.size(1)
kd_loss = F.kl_div(F.log_softmax(logits.view(-1, label_size).float(), 1), soft_labels, reduction='batchmean')
loss = loss + kd_loss
self.train_loss.update(loss.item(), logits.size(0))
# scale loss
loss = loss / self.config.get('grad_accumulation_step', 1)
if self.config['fp16']:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
self.local_updates += 1
if self.local_updates % self.config.get('grad_accumulation_step', 1) == 0:
if self.config['global_grad_clipping'] > 0:
if self.config['fp16']:
torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer),
self.config['global_grad_clipping'])
else:
torch.nn.utils.clip_grad_norm_(self.network.parameters(),
self.config['global_grad_clipping'])
self.updates += 1
# reset number of the grad accumulation
self.optimizer.step()
self.optimizer.zero_grad()
self.update_ema()
def predict(self, batch_meta, batch_data):
self.network.eval()
task_id = batch_meta['task_id']
task_type = batch_meta['task_type']
inputs = batch_data[:batch_meta['input_len']]
if len(inputs) == 3:
inputs.append(None)
inputs.append(None)
inputs.append(task_id)
score = self.mnetwork(*inputs)
if task_type == TaskType.Ranking:
score = score.contiguous().view(-1, batch_meta['pairwise_size'])
assert task_type == TaskType.Ranking
score = F.softmax(score, dim=1)
score = score.data.cpu()
score = score.numpy()
predict = np.zeros(score.shape, dtype=int)
positive = np.argmax(score, axis=1)
for idx, pos in enumerate(positive):
predict[idx, pos] = 1
predict = predict.reshape(-1).tolist()
score = score.reshape(-1).tolist()
return score, predict, batch_meta['true_label']
elif task_type == TaskType.SequenceLabeling:
mask = batch_data[batch_meta['mask']]
score = score.contiguous()
score = score.data.cpu()
score = score.numpy()
predict = np.argmax(score, axis=1).reshape(mask.size()).tolist()
valied_lenght = mask.sum(1).tolist()
final_predict = []
for idx, p in enumerate(predict):
final_predict.append(p[: valied_lenght[idx]])
score = score.reshape(-1).tolist()
return score, final_predict, batch_meta['label']
else:
if task_type == TaskType.Classification:
score = F.softmax(score, dim=1)
score = score.data.cpu()
score = score.numpy()
predict = np.argmax(score, axis=1).tolist()
score = score.reshape(-1).tolist()
return score, predict, batch_meta['label']
def extract(self, batch_meta, batch_data):
self.network.eval()
# 'token_id': 0; 'segment_id': 1; 'mask': 2
inputs = batch_data[:3]
all_encoder_layers, pooled_output = self.mnetwork.bert(*inputs)
return all_encoder_layers, pooled_output
def save(self, filename):
network_state = dict([(k, v.cpu()) for k, v in self.network.state_dict().items()])
ema_state = dict(
[(k, v.cpu()) for k, v in self.ema.model.state_dict().items()]) if self.ema is not None else dict()
params = {
'state': network_state,
'optimizer': self.optimizer.state_dict(),
'ema': ema_state,
'config': self.config,
}
torch.save(params, filename)
logger.info('model saved to {}'.format(filename))
def load(self, checkpoint):
model_state_dict = torch.load(checkpoint)
if model_state_dict['config']['init_checkpoint'].rsplit('/', 1)[1] != \
self.config['init_checkpoint'].rsplit('/', 1)[1]:
logger.error(
'*** SANBert network is pretrained on a different Bert Model. Please use that to fine-tune for other tasks. ***')
sys.exit()
self.network.load_state_dict(model_state_dict['state'], strict=False)
self.optimizer.load_state_dict(model_state_dict['optimizer'])
self.config = model_state_dict['config']
if self.ema:
self.ema.model.load_state_dict(model_state_dict['ema'])
def cuda(self):
self.network.cuda()
if self.config['ema_opt']:
self.ema.cuda() | 14,345 | 43.006135 | 133 | py |
bluebert | bluebert-master/mt-bluebert/mt_bluebert/module/my_optim.py | # Copyright (c) Microsoft. All rights reserved.
from copy import deepcopy
import torch
from torch.nn import Parameter
from functools import wraps
class EMA:
def __init__(self, gamma, model):
super(EMA, self).__init__()
self.gamma = gamma
self.shadow = {}
self.model = model
self.setup()
def setup(self):
for name, para in self.model.named_parameters():
if para.requires_grad:
self.shadow[name] = para.clone()
def cuda(self):
for k, v in self.shadow.items():
self.shadow[k] = v.cuda()
def update(self):
for name,para in self.model.named_parameters():
if para.requires_grad:
self.shadow[name] = (1.0 - self.gamma) * para + self.gamma * self.shadow[name]
def swap_parameters(self):
for name, para in self.model.named_parameters():
if para.requires_grad:
temp_data = para.data
para.data = self.shadow[name].data
self.shadow[name].data = temp_data
def state_dict(self):
return self.shadow
# Adapted from
# https://github.com/pytorch/pytorch/blob/master/torch/nn/utils/weight_norm.py
# and https://github.com/salesforce/awd-lstm-lm/blob/master/weight_drop.py
def _norm(p, dim):
"""Computes the norm over all dimensions except dim"""
if dim is None:
return p.norm()
elif dim == 0:
output_size = (p.size(0),) + (1,) * (p.dim() - 1)
return p.contiguous().view(p.size(0), -1).norm(dim=1).view(*output_size)
elif dim == p.dim() - 1:
output_size = (1,) * (p.dim() - 1) + (p.size(-1),)
return p.contiguous().view(-1, p.size(-1)).norm(dim=0).view(*output_size)
else:
return _norm(p.transpose(0, dim), 0).transpose(0, dim)
def _dummy(*args, **kwargs):
# We need to replace flatten_parameters with a nothing function
return
class WeightNorm(torch.nn.Module):
def __init__(self, weights, dim):
super(WeightNorm, self).__init__()
self.weights = weights
self.dim = dim
def compute_weight(self, module, name):
g = getattr(module, name + '_g')
v = getattr(module, name + '_v')
return v * (g / _norm(v, self.dim))
@staticmethod
def apply(module, weights, dim):
# Terrible temporary solution to an issue regarding compacting weights
# re: CUDNN RNN
if issubclass(type(module), torch.nn.RNNBase):
module.flatten_parameters = _dummy
if weights is None: # do for all weight params
weights = [w for w in module._parameters.keys() if 'weight' in w]
fn = WeightNorm(weights, dim)
for name in weights:
if hasattr(module, name):
print('Applying weight norm to {} - {}'.format(str(module), name))
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(
name + '_g', Parameter(_norm(weight, dim).data))
module.register_parameter(name + '_v', Parameter(weight.data))
setattr(module, name, fn.compute_weight(module, name))
module.register_forward_pre_hook(fn)
return fn
def remove(self, module):
for name in self.weights:
weight = self.compute_weight(module)
delattr(module, name)
del module._parameters[name + '_g']
del module._parameters[name + '_v']
module.register_parameter(name, Parameter(weight.data))
def __call__(self, module, inputs):
for name in self.weights:
setattr(module, name, self.compute_weight(module, name))
def weight_norm(module, weights=None, dim=0):
WeightNorm.apply(module, weights, dim)
return module
| 3,836 | 33.258929 | 94 | py |
bluebert | bluebert-master/mt-bluebert/mt_bluebert/module/bert_optim.py | # Copyright (c) Microsoft. All rights reserved.
import math
import torch
from torch.optim import Optimizer
from torch.nn.utils import clip_grad_norm_
from pytorch_pretrained_bert.optimization import warmup_constant, warmup_cosine, warmup_linear
def warmup_linear_xdl(x, warmup=0.002):
if x < warmup:
return x/warmup
return (1.0 - x)/(1.0 - warmup)
def schedule_func(sch):
try:
f = eval(sch)
except:
f = warmup_linear
return f
class Adamax(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix (and no ).
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
by xiaodl
"""
def __init__(self, params, lr, warmup=-1, t_total=-1, schedule='warmup_linear',
betas=(0.9, 0.999), eps=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if not lr >= 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
betas=betas, eps=eps, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(Adamax, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = schedule_func(group['schedule'])
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def to(self, device):
""" Move the optimizer state to a specified device"""
for state in self.state.values():
state['exp_avg'].to(device)
state['exp_inf'].to(device)
def initialize_step(self, initial_step):
"""Initialize state with a defined step (but we don't have stored averaged).
Arguments:
initial_step (int): Initial step number.
"""
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
# State initialization
state['step'] = initial_step
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_inf'] = torch.zeros_like(p.data)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_inf'] = torch.zeros_like(p.data)
exp_avg, exp_inf = state['exp_avg'], state['exp_inf']
beta1, beta2 = group['betas']
eps = group['eps']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Update biased first moment estimate.
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# Update the exponentially weighted infinity norm.
norm_buf = torch.cat([
exp_inf.mul_(beta2).unsqueeze(0),
grad.abs().add_(eps).unsqueeze_(0)
], 0)
torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long()))
update = exp_avg / (exp_inf + eps)
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = schedule_func(group['schedule'])
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
return loss
class RAdam(Optimizer):
"""Modified from: https://github.com/LiyuanLucasLiu/RAdam/blob/master/radam.py
"""
def __init__(self, params, lr, warmup=-1, t_total=-1, schedule='warmup_linear',
betas=(0.9, 0.999), eps=1e-6, weight_decay=0.001,
max_grad_norm=1.0):
if not lr >= 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
betas=betas, eps=eps, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = schedule_func(group['schedule'])
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def to(self, device):
""" Move the optimizer state to a specified device"""
for state in self.state.values():
state['exp_avg'].to(device)
state['exp_avg_sq'].to(device)
def initialize_step(self, initial_step):
"""Initialize state with a defined step (but we don't have stored averaged).
Arguments:
initial_step (int): Initial step number.
"""
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
# State initialization
state['step'] = initial_step
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
# set_trace()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
eps = group['eps']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Update biased first moment estimate.
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
state['step'] += 1
if group['t_total'] != -1:
schedule_fct = schedule_func(group['schedule'])
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = lr_scheduled * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = lr_scheduled / (1 - beta1 ** state['step'])
buffered[2] = step_size
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * lr_scheduled, p_data_fp32)
p.data.copy_(p_data_fp32)
return loss
| 11,825 | 41.847826 | 190 | py |
bluebert | bluebert-master/mt-bluebert/mt_bluebert/module/similarity.py | # Copyright (c) Microsoft. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy
from torch.nn.utils import weight_norm
from torch.nn.parameter import Parameter
from .common import activation, init_wrapper
from .dropout_wrapper import DropoutWrapper
class DotProduct(nn.Module):
def __init__(self, x1_dim, x2_dim, prefix='sim', opt={}, dropout=None):
super(DotProduct, self).__init__()
assert x1_dim == x2_dim
self.opt = opt
self.prefix = prefix
self.scale_on = opt.get('{}_scale'.format(self.prefix), False)
self.scalor = 1.0 / numpy.power(x2_dim, 0.5)
def forward(self, x1, x2):
assert x1.size(2) == x2.size(2)
scores = x1.bmm(x2.transpose(1, 2))
if self.scale_on:
scores *= self.scalor
return scores
class DotProductProject(nn.Module):
def __init__(self, x1_dim, x2_dim, prefix='sim', opt={}, dropout=None):
super(DotProductProject, self).__init__()
self.prefix = prefix
self.opt = opt
self.hidden_size = opt.get('{}_hidden_size'.format(self.prefix), 64)
self.residual_on = opt.get('{}_residual_on'.format(self.prefix), False)
self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False)
self.share = opt.get('{}_share'.format(self.prefix), False)
self.f = activation(opt.get('{}_activation'.format(self.prefix), 'relu'))
self.scale_on = opt.get('{}_scale_on'.format(self.prefix), False)
self.dropout = dropout
x1_in_dim = x1_dim
x2_in_dim = x2_dim
out_dim = self.hidden_size
self.proj_1 = nn.Linear(x1_in_dim, out_dim, bias=False)
if self.layer_norm_on:
self.proj_1 = weight_norm(self.proj_1)
if self.share and x1_in_dim == x2_in_dim:
self.proj_2 = self.proj_1
else:
self.proj_2 = nn.Linear(x2_in_dim, out_dim)
if self.layer_norm_on:
self.proj_2 = weight_norm(self.proj_2)
if self.scale_on:
self.scalar = Parameter(torch.ones(1,1,1) / (self.hidden_size ** 0.5), requires_grad=False)
else:
self.sclalar = Parameter(torch.ones(1,1, self.hidden_size), requires_grad=True)
def forward(self, x1, x2):
assert x1.size(2) == x2.size(2)
if self.dropout:
x1 = self.dropout(x1)
x2 = self.dropout(x2)
x1_flat = x1.contiguous().view(-1, x1.size(2))
x2_flat = x2.contiguous().view(-1, x2.size(2))
x1_o = self.f(self.proj_1(x1_flat)).view(x1.size(0), x1.size(1), -1)
# x2_o = self.f(self.proj_1(x2_flat)).view(x2.size(0), x2.size(1), -1)
x2_o = self.f(self.proj_2(x2_flat)).view(x2.size(0), x2.size(1), -1)
if self.scale_on:
scalar = self.scalar.expand_as(x2_o)
x2_o = scalar * x2_o
scores = x1_o.bmm(x2_o.transpose(1, 2))
return scores
class Bilinear(nn.Module):
def __init__(self, x1_dim, x2_dim, prefix='sim', opt={}, dropout=None):
super(Bilinear, self).__init__()
self.opt = opt
self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False)
self.transform_on = opt.get('{}_proj_on'.format(self.prefix), False)
# self.init = init_wrapper(opt.get('{}_init'.format(self.prefix), ''))
self.dropout = dropout
if self.transform_on:
self.proj = nn.Linear(x1_dim, x2_dim)
# self.init(self.proj.weight)
if self.layer_norm_on: self.proj = weight_norm(self.proj)
def forward(self, x, y):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
"""
if self.dropout:
x = self.dropout(x)
y = self.dropout(y)
proj = self.proj(y) if self.transform_on else y
if self.dropout:
proj = self.dropout(proj)
scores = x.bmm(proj.unsqueeze(2)).squeeze(2)
return scores
class BilinearSum(nn.Module):
def __init__(self, x1_dim, x2_dim, prefix='sim', opt={}, dropout=None):
super(BilinearSum, self).__init__()
self.x_linear = nn.Linear(x1_dim, 1, bias=False)
self.y_linear = nn.Linear(x2_dim, 1, bias=False)
self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False)
self.init = init_wrapper(opt.get('{}_init'.format(self.prefix), False))
if self.layer_norm_on:
self.x_linear = weight_norm(self.x_linear)
self.y_linear = weight_norm(self.y_linear)
self.init(self.x_linear.weight)
self.init(self.y_linear.weight)
self.dropout = dropout
def forward(self, x1, x2):
"""
x1: batch * len1 * input_size
x2: batch * len2 * input_size
score: batch * len1 * len2
"""
if self.dropout:
x1 = self.dropout(x1)
x2 = self.dropout(x2)
x1_logits = self.x_linear(x1.contiguous().view(-1, x1.size(-1))).view(x1.size(0), -1, 1)
x2_logits = self.y_linear(x2.contiguous().view(-1, x2.size(-1))).view(x2.size(0), 1, -1)
shape = (x1.size(0), x1.size(1), x2.size())
scores = x1_logits.expand_as(shape) + x2_logits.expand_as(shape)
return scores
class Trilinear(nn.Module):
"""Function used in BiDAF"""
def __init__(self, x1_dim, x2_dim, prefix='sim', opt={}, dropout=None):
super(Trilinear, self).__init__()
self.prefix = prefix
self.x_linear = nn.Linear(x1_dim, 1, bias=False)
self.x_dot_linear = nn.Linear(x1_dim, 1, bias=False)
self.y_linear = nn.Linear(x2_dim, 1, bias=False)
self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False)
self.init = init_wrapper(opt.get('{}_init'.format(self.prefix), 'xavier_uniform'))
if self.layer_norm_on:
self.x_linear = weight_norm(self.x_linear)
self.x_dot_linear = weight_norm(self.x_dot_linear)
self.y_linear = weight_norm(self.y_linear)
self.init(self.x_linear.weight)
self.init(self.x_dot_linear.weight)
self.init(self.y_linear.weight)
self.dropout = dropout
def forward(self, x1, x2):
"""
x1: batch * len1 * input_size
x2: batch * len2 * input_size
score: batch * len1 * len2
"""
if self.dropout:
x1 = self.dropout(x1)
x2 = self.dropout(x2)
x1_logits = self.x_linear(x1.contiguous().view(-1, x1.size(-1))).view(x1.size(0), -1, 1)
x2_logits = self.y_linear(x2.contiguous().view(-1, x2.size(-1))).view(x2.size(0), 1, -1)
x1_dot = self.x_dot_linear(x1.contiguous().view(-1, x1.size(-1))).view(x1.size(0), -1, 1).expand_as(x1)
x1_dot = x1 * x1_dot
scores = x1_dot.bmm(x2.transpose(1, 2))
scores += x1_logits.expand_as(scores) + x2_logits.expand_as(scores)
return scores
class SimilarityWrapper(nn.Module):
def __init__(self, x1_dim, x2_dim, prefix='attention', opt={}, dropout=None):
super(SimilarityWrapper, self).__init__()
self.score_func_str = opt.get('{}_sim_func'.format(prefix), 'dotproductproject').lower()
self.score_func = None
if self.score_func_str == 'dotproduct':
self.score_func = DotProduct(x1_dim, x2_dim, prefix=prefix, opt=opt, dropout=dropout)
elif self.score_func_str == 'dotproductproject':
self.score_func = DotProductProject(x1_dim, x2_dim, prefix=prefix, opt=opt, dropout=dropout)
elif self.score_func_str == 'bilinear':
self.score_func = Bilinear(x1_dim, x2_dim, prefix=prefix, opt=opt, dropout=dropout)
elif self.score_func_str == 'bilinearsum':
self.score_func = BilinearSum(x1_dim, x2_dim, prefix=prefix, opt=opt, dropout=dropout)
elif self.score_func_str == 'trilinear':
self.score_func = Trilinear(x1_dim, x2_dim, prefix=prefix, opt=opt, dropout=dropout)
else:
raise NotImplementedError
def forward(self, x1, x2):
scores = self.score_func(x1, x2)
return scores
class AttentionWrapper(nn.Module):
def __init__(self, x1_dim, x2_dim, x3_dim=None, prefix='attention', opt={}, dropout=None):
super(AttentionWrapper, self).__init__()
self.prefix = prefix
self.att_dropout = opt.get('{}_att_dropout'.format(self.prefix), 0)
self.score_func = SimilarityWrapper(x1_dim, x2_dim, prefix=prefix, opt=opt, dropout=dropout)
self.drop_diagonal = opt.get('{}_drop_diagonal'.format(self.prefix), False)
self.output_size = x2_dim if x3_dim is None else x3_dim
def forward(self, query, key, value, key_padding_mask=None, return_scores=False):
logits = self.score_func(query, key)
key_mask = key_padding_mask.unsqueeze(1).expand_as(logits)
logits.data.masked_fill_(key_mask.data, -float('inf'))
if self.drop_diagonal:
assert logits.size(1) == logits.size(2)
diag_mask = torch.diag(logits.data.new(logits.size(1)).zero_() + 1).byte().unsqueeze(0).expand_as(logits)
logits.data.masked_fill_(diag_mask, -float('inf'))
prob = F.softmax(logits.view(-1, key.size(1)), 1)
prob = prob.view(-1, query.size(1), key.size(1))
if self.att_dropout > 0:
prob = self.dropout(prob)
if value is None:
value = key
attn = prob.bmm(value)
if return_scores:
return attn, prob, logits
else:
return attn
class LinearSelfAttn(nn.Module):
"""Self attention over a sequence:
* o_i = softmax(Wx_i) for x_i in X.
"""
def __init__(self, input_size, dropout=None):
super(LinearSelfAttn, self).__init__()
self.linear = nn.Linear(input_size, 1)
self.dropout = dropout
def forward(self, x, x_mask):
x = self.dropout(x)
x_flat = x.contiguous().view(-1, x.size(-1))
scores = self.linear(x_flat).view(x.size(0), x.size(1))
scores.data.masked_fill_(x_mask.data, -float('inf'))
alpha = F.softmax(scores, 1)
return alpha.unsqueeze(1).bmm(x).squeeze(1)
class MLPSelfAttn(nn.Module):
def __init__(self, input_size, opt={}, prefix='attn_sum', dropout=None):
super(MLPSelfAttn, self).__init__()
self.prefix = prefix
self.FC = nn.Linear(input_size, input_size)
self.linear = nn.Linear(input_size, 1)
self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False)
self.f = activation(opt.get('{}_activation'.format(self.prefix), 'relu'))
if dropout is None:
self.dropout = DropoutWrapper(opt.get('{}_dropout_p'.format(self.prefix), 0))
else:
self.dropout = dropout
if self.layer_norm_on:
self.FC = weight_norm(self.FC)
def forward(self, x, x_mask):
x = self.dropout(x)
x_flat = x.contiguous().view(-1, x.size(-1))
scores = self.linear(self.f(self.FC(x_flat))).view(x.size(0), x.size(1))
scores.data.masked_fill_(x_mask.data, -float('inf'))
alpha = F.softmax(scores)
return alpha.unsqueeze(1).bmm(x).squeeze(1)
class SelfAttnWrapper(nn.Module):
def __init__(self, input_size, prefix='attn_sum', opt={}, dropout=None):
super(SelfAttnWrapper, self).__init__()
"""
Self att wrapper, support linear and MLP
"""
attn_type = opt.get('{}_type'.format(prefix), 'linear')
if attn_type == 'mlp':
self.att = MLPSelfAttn(input_size, prefix, opt, dropout)
else:
self.att = LinearSelfAttn(input_size, dropout)
def forward(self, x, x_mask):
return self.att(x, x_mask)
class DeepAttentionWrapper(nn.Module):
def __init__(self, x1_dim, x2_dim, x3_dims, att_cnt, prefix='deep_att', opt=None, dropout=None):
super(DeepAttentionWrapper, self).__init__()
self.opt = {} if opt is None else opt
self.prefix = prefix
self.x1_dim = x1_dim
self.x2_dim = x2_dim
self.x3_dims = x3_dims
if dropout is None:
self.dropout = DropoutWrapper(opt.get('{}_dropout_p'.format(self.prefix), 0))
else:
self.dropout = dropout
self.attn_list = nn.ModuleList()
for i in range(0, att_cnt):
if opt['multihead_on']:
attention = MultiheadAttentionWrapper(self.x1_dim, self.x2_dim, self.x3_dims[i], prefix, opt, dropout=dropout)
else:
attention = AttentionWrapper(self.x1_dim, self.x2_dim, self.x3_dims[i], prefix, opt, self.dropout)
self.attn_list.append(attention)
def forward(self, x1, x2, x3, x2_mask):
rvl = []
for i in range(0, len(x3)):
hiddens = self.attn_list[i](x1, x2, x3[i], x2_mask)
rvl.append(hiddens)
return torch.cat(rvl, 2)
class BilinearFlatSim(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = x_i'Wy for x_i in X.
"""
def __init__(self, x_size, y_size, opt={}, prefix='seqatt', dropout=None):
super(BilinearFlatSim, self).__init__()
self.opt = opt
self.weight_norm_on = opt.get('{}_weight_norm_on'.format(prefix), False)
self.linear = nn.Linear(y_size, x_size)
if self.weight_norm_on:
self.linear = weight_norm(self.linear)
if dropout is None:
self.dropout = DropoutWrapper(opt.get('{}_dropout_p'.format(self.prefix), 0))
else:
self.dropout = dropout
def forward(self, x, y, x_mask):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
"""
x = self.dropout(x)
y = self.dropout(y)
Wy = self.linear(y)
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, -float('inf'))
return xWy
class SimpleFlatSim(nn.Module):
def __init__(self, x_size, y_size, opt={}, prefix='seqatt', dropout=None):
super(SimpleFlatSim, self).__init__()
self.opt = opt
self.weight_norm_on = opt.get('{}_norm_on'.format(prefix), False)
self.linear = nn.Linear(y_size + x_size, 1)
if self.weight_norm_on:
self.linear = weight_norm(self.linear)
if dropout is None:
self.dropout = DropoutWrapper(opt.get('{}_dropout_p'.format(self.prefix), 0))
else:
self.dropout = dropout
def forward(self, x, y, x_mask):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
"""
x = self.dropout(x)
y = self.dropout(y)
y = y.unsqueeze(1).expand_as(x)
flat_x = torch.cat([x, y], 2).contiguous().view(x.size(0) * x.size(1), -1)
flat_scores = self.linear(flat_x)
scores = flat_scores.contiguous().view(x.size(0), -1)
scores.data.masked_fill_(x_mask.data, -float('inf'))
return scores
class FlatSim(nn.Module):
def __init__(self, x_size, y_size, opt={}, prefix='seqatt', dropout=None):
super(FlatSim, self).__init__()
assert x_size == y_size
self.opt = opt
self.weight_norm_on = opt.get('{}_weight_norm_on'.format(prefix), False)
self.linear = nn.Linear(x_size * 3, 1)
if self.weight_norm_on:
self.linear = weight_norm(self.linear)
if dropout is None:
self.dropout = DropoutWrapper(opt.get('{}_dropout_p'.format(self.prefix), 0))
else:
self.dropout = dropout
def forward(self, x, y, x_mask):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
"""
x = self.dropout(x)
y = self.dropout(y)
y = y.unsqueeze(1).expand_as(x)
flat_x = torch.cat([x, y, x * y], 2).contiguous().view(x.size(0) * x.size(1), -1)
flat_scores = self.linear(flat_x)
scores = flat_scores.contiguous().view(x.size(0), -1)
scores.data.masked_fill_(x_mask.data, -float('inf'))
return scores
class FlatSimV2(nn.Module):
def __init__(self, x_size, y_size, opt={}, prefix='seqatt', dropout=None):
super(FlatSimV2, self).__init__()
assert x_size == y_size
self.opt = opt
self.weight_norm_on = opt.get('{}_weight_norm_on'.format(prefix), False)
self.linear = nn.Linear(x_size * 4, 1)
if self.weight_norm_on:
self.linear = weight_norm(self.linear)
if dropout is None:
self.dropout = DropoutWrapper(opt.get('{}_dropout_p'.format(self.prefix), 0))
else:
self.dropout = dropout
def forward(self, x, y, x_mask):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
"""
x = self.dropout(x)
y = self.dropout(y)
y = y.unsqueeze(1).expand_as(x)
flat_x = torch.cat([x, y, x * y, torch.abs(x - y)], 2).contiguous().view(x.size(0) * x.size(1), -1)
flat_scores = self.linear(flat_x)
scores = flat_scores.contiguous().view(x.size(0), -1)
scores.data.masked_fill_(x_mask.data, -float('inf'))
return scores
class FlatSimilarityWrapper(nn.Module):
def __init__(self, x1_dim, x2_dim, prefix='attention', opt={}, dropout=None):
super(FlatSimilarityWrapper, self).__init__()
self.score_func_str = opt.get('{}_att_type'.format(prefix), 'none').lower()
self.att_dropout = DropoutWrapper(opt.get('{}_att_dropout'.format(prefix), 0))
self.score_func = None
if self.score_func_str == 'bilinear':
self.score_func = BilinearFlatSim(x1_dim, x2_dim, prefix=prefix, opt=opt, dropout=dropout)
elif self.score_func_str == 'simple':
self.score_func = SimpleFlatSim(x1_dim, x2_dim, prefix=prefix, opt=opt, dropout=dropout)
elif self.score_func_str == 'flatsim':
self.score_func = FlatSim(x1_dim, x2_dim, prefix=prefix, opt=opt, dropout=dropout)
else:
self.score_func = FlatSimV2(x1_dim, x2_dim, prefix=prefix, opt=opt, dropout=dropout)
def forward(self, x1, x2, mask):
scores = self.score_func(x1, x2, mask)
return scores
class MultiheadAttentionWrapper(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, query_dim, key_dim, value_dim, prefix='attention', opt={}, dropout=None):
super().__init__()
self.prefix = prefix
self.num_heads = opt.get('{}_head'.format(self.prefix), 1)
self.dropout = DropoutWrapper(opt.get('{}_dropout'.format(self.prefix), 0)) if dropout is None else dropout
self.qkv_dim = [query_dim, key_dim, value_dim]
assert query_dim == key_dim, "query dim must equal with key dim"
self.hidden_size = opt.get('{}_hidden_size'.format(self.prefix), 64)
self.proj_on = opt.get('{}_proj_on'.format(prefix), False)
self.share = opt.get('{}_share'.format(self.prefix), False)
self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False)
self.scale_on = opt.get('{}_scale_on'.format(self.prefix), False)
if self.proj_on:
self.proj_modules = nn.ModuleList([nn.Linear(dim, self.hidden_size) for dim in self.qkv_dim[0:2]])
if self.layer_norm_on:
for proj in self.proj_modules:
proj = weight_norm(proj)
if self.share and self.qkv_dim[0] == self.qkv_dim[1]:
self.proj_modules[1] = self.proj_modules[0]
self.f = activation(opt.get('{}_activation'.format(self.prefix), 'relu'))
self.qkv_head_dim = [self.hidden_size // self.num_heads] * 3
self.qkv_head_dim[2] = value_dim // self.num_heads
assert self.qkv_head_dim[0] * self.num_heads == self.hidden_size, "hidden size must be divisible by num_heads"
assert self.qkv_head_dim[2] * self.num_heads == value_dim, "value size must be divisible by num_heads"
else:
self.qkv_head_dim = [emb // self.num_heads for emb in self.qkv_dim]
#import pdb; pdb.set_trace()
assert self.qkv_head_dim[0] * self.num_heads == self.qkv_dim[0], "query size must be divisible by num_heads"
assert self.qkv_head_dim[1] * self.num_heads == self.qkv_dim[1], "key size must be divisible by num_heads"
assert self.qkv_head_dim[2] * self.num_heads == self.qkv_dim[2], "value size must be divisible by num_heads"
if self.scale_on:
self.scaling = self.qkv_head_dim[0]**-0.5
self.drop_diagonal = opt.get('{}_drop_diagonal'.format(self.prefix), False)
self.output_size = self.qkv_dim[2]
def forward(self, query, key, value, key_padding_mask=None):
query = query.transpose(0, 1)
key = key.transpose(0, 1)
value = value.transpose(0, 1)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.qkv_dim[0]
q, k, v = query, key, value
if self.proj_on:
if self.dropout:
q, k = self.dropout(q), self.dropout(k)
q, k = [self.f(proj(input)) for input, proj in zip([query, key], self.proj_modules)]
src_len = k.size(0)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.scale_on:
q *= self.scaling
q = q.contiguous().view(tgt_len, bsz*self.num_heads, self.qkv_head_dim[0]).transpose(0, 1)
k = k.contiguous().view(src_len, bsz*self.num_heads, self.qkv_head_dim[1]).transpose(0, 1)
v = v.contiguous().view(src_len, bsz*self.num_heads, self.qkv_head_dim[2]).transpose(0, 1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.float().masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
).type_as(attn_weights) # FP16 support: cast to float and back
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if self.drop_diagonal:
assert attn_weights.size(1) == attn_weights.size(2)
diag_mask = torch.diag(attn_weights.data.new(attn_weights.size(1)).zero_() + 1).byte().unsqueeze(0).expand_as(attn_weights)
attn_weights.data.masked_fill_(diag_mask, -float('inf'))
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(attn_weights)
attn_weights = self.dropout(attn_weights)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.qkv_head_dim[2]]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, -1)
# output_shape: Batch * Time * Channel
attn = attn.transpose(0, 1)
return attn
| 23,222 | 40.030035 | 135 | py |
bluebert | bluebert-master/mt-bluebert/mt_bluebert/module/dropout_wrapper.py | # Copyright (c) Microsoft. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
class DropoutWrapper(nn.Module):
"""
This is a dropout wrapper which supports the fix mask dropout
"""
def __init__(self, dropout_p=0, enable_vbp=True):
super(DropoutWrapper, self).__init__()
"""variational dropout means fix dropout mask
ref: https://discuss.pytorch.org/t/dropout-for-rnns/633/11
"""
self.enable_variational_dropout = enable_vbp
self.dropout_p = dropout_p
def forward(self, x):
"""
:param x: batch * len * input_size
"""
if self.training == False or self.dropout_p == 0:
return x
if len(x.size()) == 3:
mask = 1.0 / (1-self.dropout_p) * torch.bernoulli((1-self.dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1))
mask.requires_grad = False
return mask.unsqueeze(1).expand_as(x) * x
else:
return F.dropout(x, p=self.dropout_p, training=self.training)
| 1,074 | 33.677419 | 130 | py |
bluebert | bluebert-master/mt-bluebert/mt_bluebert/module/common.py | # Copyright (c) Microsoft. All rights reserved.
import torch
import math
from torch.nn.functional import tanh, relu, prelu, leaky_relu, sigmoid, elu, selu
from torch.nn.init import uniform, normal, eye, xavier_uniform, xavier_normal, kaiming_uniform, kaiming_normal, orthogonal
def linear(x):
return x
def swish(x):
return x * sigmoid(x)
def bertgelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def gptgelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
# default gelue
gelu = bertgelu
def activation(func_a):
"""Activation function wrapper
"""
try:
f = eval(func_a)
except:
f = linear
return f
def init_wrapper(init='xavier_uniform'):
return eval(init)
| 782 | 22.727273 | 122 | py |
bluebert | bluebert-master/mt-bluebert/mt_bluebert/module/sub_layers.py | # Copyright (c) Microsoft. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class LayerNorm(nn.Module):
#ref: https://github.com/pytorch/pytorch/issues/1959
# :https://arxiv.org/pdf/1607.06450.pdf
def __init__(self, hidden_size, eps=1e-4):
super(LayerNorm, self).__init__()
self.alpha = Parameter(torch.ones(1,1,hidden_size)) # gain g
self.beta = Parameter(torch.zeros(1,1,hidden_size)) # bias b
self.eps = eps
def forward(self, x):
"""
Args:
:param x: batch * len * input_size
Returns:
normalized x
"""
mu = torch.mean(x, 2, keepdim=True).expand_as(x)
sigma = torch.std(x, 2, keepdim=True).expand_as(x)
return (x - mu) / (sigma + self.eps) * self.alpha.expand_as(x) + self.beta.expand_as(x)
| 905 | 32.555556 | 95 | py |
bluebert | bluebert-master/mt-bluebert/mt_bluebert/module/san.py | # Copyright (c) Microsoft. All rights reserved.
import torch
import random
import torch.nn as nn
from torch.nn.utils import weight_norm
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from mt_bluebert.module.dropout_wrapper import DropoutWrapper
from mt_bluebert.module.similarity import FlatSimilarityWrapper, SelfAttnWrapper
from mt_bluebert.module.my_optim import weight_norm as WN
SMALL_POS_NUM=1.0e-30
def generate_mask(new_data, dropout_p=0.0, is_training=False):
if not is_training: dropout_p = 0.0
new_data = (1-dropout_p) * (new_data.zero_() + 1)
for i in range(new_data.size(0)):
one = random.randint(0, new_data.size(1)-1)
new_data[i][one] = 1
mask = 1.0/(1 - dropout_p) * torch.bernoulli(new_data)
mask.requires_grad = False
return mask
class Classifier(nn.Module):
def __init__(self, x_size, y_size, opt, prefix='decoder', dropout=None):
super(Classifier, self).__init__()
self.opt = opt
if dropout is None:
self.dropout = DropoutWrapper(opt.get('{}_dropout_p'.format(prefix), 0))
else:
self.dropout = dropout
self.merge_opt = opt.get('{}_merge_opt'.format(prefix), 0)
self.weight_norm_on = opt.get('{}_weight_norm_on'.format(prefix), False)
if self.merge_opt == 1:
self.proj = nn.Linear(x_size * 4, y_size)
else:
self.proj = nn.Linear(x_size * 2, y_size)
if self.weight_norm_on:
self.proj = weight_norm(self.proj)
def forward(self, x1, x2, mask=None):
if self.merge_opt == 1:
x = torch.cat([x1, x2, (x1 - x2).abs(), x1 * x2], 1)
else:
x = torch.cat([x1, x2], 1)
x = self.dropout(x)
scores = self.proj(x)
return scores
class SANClassifier(nn.Module):
"""Implementation of Stochastic Answer Networks for Natural Language Inference, Xiaodong Liu, Kevin Duh and Jianfeng Gao
https://arxiv.org/abs/1804.07888
"""
def __init__(self, x_size, h_size, label_size, opt={}, prefix='decoder', dropout=None):
super(SANClassifier, self).__init__()
if dropout is None:
self.dropout = DropoutWrapper(opt.get('{}_dropout_p'.format(self.prefix), 0))
else:
self.dropout = dropout
self.prefix = prefix
self.query_wsum = SelfAttnWrapper(x_size, prefix='mem_cum', opt=opt, dropout=self.dropout)
self.attn = FlatSimilarityWrapper(x_size, h_size, prefix, opt, self.dropout)
self.rnn_type = '{}{}'.format(opt.get('{}_rnn_type'.format(prefix), 'gru').upper(), 'Cell')
self.rnn =getattr(nn, self.rnn_type)(x_size, h_size)
self.num_turn = opt.get('{}_num_turn'.format(prefix), 5)
self.opt = opt
self.mem_random_drop = opt.get('{}_mem_drop_p'.format(prefix), 0)
self.mem_type = opt.get('{}_mem_type'.format(prefix), 0)
self.weight_norm_on = opt.get('{}_weight_norm_on'.format(prefix), False)
self.label_size = label_size
self.dump_state = opt.get('dump_state_on', False)
self.alpha = Parameter(torch.zeros(1, 1), requires_grad=False)
if self.weight_norm_on:
self.rnn = WN(self.rnn)
self.classifier = Classifier(x_size, self.label_size, opt, prefix=prefix, dropout=self.dropout)
def forward(self, x, h0, x_mask=None, h_mask=None):
h0 = self.query_wsum(h0, h_mask)
if type(self.rnn) is nn.LSTMCell:
c0 = h0.new(h0.size()).zero_()
scores_list = []
for turn in range(self.num_turn):
att_scores = self.attn(x, h0, x_mask)
x_sum = torch.bmm(F.softmax(att_scores, 1).unsqueeze(1), x).squeeze(1)
scores = self.classifier(x_sum, h0)
scores_list.append(scores)
# next turn
if self.rnn is not None:
h0 = self.dropout(h0)
if type(self.rnn) is nn.LSTMCell:
h0, c0 = self.rnn(x_sum, (h0, c0))
else:
h0 = self.rnn(x_sum, h0)
if self.mem_type == 1:
mask = generate_mask(self.alpha.data.new(x.size(0), self.num_turn), self.mem_random_drop, self.training)
mask = [m.contiguous() for m in torch.unbind(mask, 1)]
tmp_scores_list = [mask[idx].view(x.size(0), 1).expand_as(inp) * F.softmax(inp, 1) for idx, inp in enumerate(scores_list)]
scores = torch.stack(tmp_scores_list, 2)
scores = torch.mean(scores, 2)
scores = torch.log(scores)
else:
scores = scores_list[-1]
if self.dump_state:
return scores, scores_list
else:
return scores
| 4,721 | 41.540541 | 134 | py |
bluebert | bluebert-master/mt-bluebert/mt_bluebert/experiments/squad/verify_calc_span.py | from pytorch_pretrained_bert import BertTokenizer
from data_utils.task_def import EncoderModelType
from experiments.squad.squad_utils import calc_tokenized_span_range, parse_squad_label
model = "bert-base-uncased"
do_lower_case = True
tokenizer = BertTokenizer.from_pretrained(model, do_lower_case=do_lower_case)
for no, line in enumerate(open(r"data\canonical_data\squad_v2_train.tsv", encoding="utf-8")):
if no % 1000 == 0:
print(no)
uid, label, context, question = line.strip().split("\t")
answer_start, answer_end, answer, is_impossible = parse_squad_label(label)
calc_tokenized_span_range(context, question, answer, answer_start, answer_end, tokenizer, EncoderModelType.BERT,
verbose=True)
| 751 | 46 | 116 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.