repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
scipy/scipy | scipy/optimize/_minimize.py | 1 | 44930 | """
Unified interfaces to minimization algorithms.
Functions
---------
- minimize : minimization of a function of several variables.
- minimize_scalar : minimization of a function of one variable.
"""
__all__ = ['minimize', 'minimize_scalar']
from warnings import warn
import numpy as np
# unconstrained minimization
from ._optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg,
_minimize_bfgs, _minimize_newtoncg,
_minimize_scalar_brent, _minimize_scalar_bounded,
_minimize_scalar_golden, MemoizeJac, OptimizeResult)
from ._trustregion_dogleg import _minimize_dogleg
from ._trustregion_ncg import _minimize_trust_ncg
from ._trustregion_krylov import _minimize_trust_krylov
from ._trustregion_exact import _minimize_trustregion_exact
from ._trustregion_constr import _minimize_trustregion_constr
# constrained minimization
from ._lbfgsb_py import _minimize_lbfgsb
from ._tnc import _minimize_tnc
from ._cobyla_py import _minimize_cobyla
from ._slsqp_py import _minimize_slsqp
from ._constraints import (old_bound_to_new, new_bounds_to_old,
old_constraint_to_new, new_constraint_to_old,
NonlinearConstraint, LinearConstraint, Bounds,
PreparedConstraint)
from ._differentiable_functions import FD_METHODS
MINIMIZE_METHODS = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
'l-bfgs-b', 'tnc', 'cobyla', 'slsqp', 'trust-constr',
'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov']
MINIMIZE_SCALAR_METHODS = ['brent', 'bounded', 'golden']
def minimize(fun, x0, args=(), method=None, jac=None, hess=None,
hessp=None, bounds=None, constraints=(), tol=None,
callback=None, options=None):
"""Minimization of scalar function of one or more variables.
Parameters
----------
fun : callable
The objective function to be minimized.
``fun(x, *args) -> float``
where ``x`` is a 1-D array with shape (n,) and ``args``
is a tuple of the fixed parameters needed to completely
specify the function.
x0 : ndarray, shape (n,)
Initial guess. Array of real elements of size (n,),
where ``n`` is the number of independent variables.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (`fun`, `jac` and `hess` functions).
method : str or callable, optional
Type of solver. Should be one of
- 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>`
- 'Powell' :ref:`(see here) <optimize.minimize-powell>`
- 'CG' :ref:`(see here) <optimize.minimize-cg>`
- 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>`
- 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>`
- 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>`
- 'TNC' :ref:`(see here) <optimize.minimize-tnc>`
- 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>`
- 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>`
- 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>`
- 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>`
- 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>`
- 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>`
- 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>`
- custom - a callable object, see below for description.
If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``,
depending on whether or not the problem has constraints or bounds.
jac : {callable, '2-point', '3-point', 'cs', bool}, optional
Method for computing the gradient vector. Only for CG, BFGS,
Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov,
trust-exact and trust-constr.
If it is a callable, it should be a function that returns the gradient
vector:
``jac(x, *args) -> array_like, shape (n,)``
where ``x`` is an array with shape (n,) and ``args`` is a tuple with
the fixed parameters. If `jac` is a Boolean and is True, `fun` is
assumed to return a tuple ``(f, g)`` containing the objective
function and the gradient.
Methods 'Newton-CG', 'trust-ncg', 'dogleg', 'trust-exact', and
'trust-krylov' require that either a callable be supplied, or that
`fun` return the objective and gradient.
If None or False, the gradient will be estimated using 2-point finite
difference estimation with an absolute step size.
Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used
to select a finite difference scheme for numerical estimation of the
gradient with a relative step size. These finite difference schemes
obey any specified `bounds`.
hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional
Method for computing the Hessian matrix. Only for Newton-CG, dogleg,
trust-ncg, trust-krylov, trust-exact and trust-constr.
If it is callable, it should return the Hessian matrix:
``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
where ``x`` is a (n,) ndarray and ``args`` is a tuple with the fixed
parameters.
The keywords {'2-point', '3-point', 'cs'} can also be used to select
a finite difference scheme for numerical estimation of the hessian.
Alternatively, objects implementing the `HessianUpdateStrategy`
interface can be used to approximate the Hessian. Available
quasi-Newton methods implementing this interface are:
- `BFGS`;
- `SR1`.
Not all of the options are available for each of the methods; for
availability refer to the notes.
hessp : callable, optional
Hessian of objective function times an arbitrary vector p. Only for
Newton-CG, trust-ncg, trust-krylov, trust-constr.
Only one of `hessp` or `hess` needs to be given. If `hess` is
provided, then `hessp` will be ignored. `hessp` must compute the
Hessian times an arbitrary vector:
``hessp(x, p, *args) -> ndarray shape (n,)``
where ``x`` is a (n,) ndarray, ``p`` is an arbitrary vector with
dimension (n,) and ``args`` is a tuple with the fixed
parameters.
bounds : sequence or `Bounds`, optional
Bounds on variables for Nelder-Mead, L-BFGS-B, TNC, SLSQP, Powell, and
trust-constr methods. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. Sequence of ``(min, max)`` pairs for each element in `x`. None
is used to specify no bound.
constraints : {Constraint, dict} or List of {Constraint, dict}, optional
Constraints definition. Only for COBYLA, SLSQP and trust-constr.
Constraints for 'trust-constr' are defined as a single object or a
list of objects specifying constraints to the optimization problem.
Available constraints are:
- `LinearConstraint`
- `NonlinearConstraint`
Constraints for COBYLA, SLSQP are defined as a list of dictionaries.
Each dictionary with fields:
type : str
Constraint type: 'eq' for equality, 'ineq' for inequality.
fun : callable
The function defining the constraint.
jac : callable, optional
The Jacobian of `fun` (only for SLSQP).
args : sequence, optional
Extra arguments to be passed to the function and Jacobian.
Equality constraint means that the constraint function result is to
be zero whereas inequality means that it is to be non-negative.
Note that COBYLA only supports inequality constraints.
tol : float, optional
Tolerance for termination. When `tol` is specified, the selected
minimization algorithm sets some relevant solver-specific tolerance(s)
equal to `tol`. For detailed control, use solver-specific
options.
options : dict, optional
A dictionary of solver options. All methods except `TNC` accept the
following generic options:
maxiter : int
Maximum number of iterations to perform. Depending on the
method each iteration may use several function evaluations.
For `TNC` use `maxfun` instead of `maxiter`.
disp : bool
Set to True to print convergence messages.
For method-specific options, see :func:`show_options()`.
callback : callable, optional
Called after each iteration. For 'trust-constr' it is a callable with
the signature:
``callback(xk, OptimizeResult state) -> bool``
where ``xk`` is the current parameter vector. and ``state``
is an `OptimizeResult` object, with the same fields
as the ones from the return. If callback returns True
the algorithm execution is terminated.
For all the other methods, the signature is:
``callback(xk)``
where ``xk`` is the current parameter vector.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
minimize_scalar : Interface to minimization algorithms for scalar
univariate functions
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *BFGS*.
**Unconstrained minimization**
Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate
gradient algorithm by Polak and Ribiere, a variant of the
Fletcher-Reeves method described in [5]_ pp.120-122. Only the
first derivatives are used.
Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton
method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_
pp. 136. It uses the first derivatives only. BFGS has proven good
performance even for non-smooth optimizations. This method also
returns an approximation of the Hessian inverse, stored as
`hess_inv` in the OptimizeResult object.
Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a
Newton-CG algorithm [5]_ pp. 168 (also known as the truncated
Newton method). It uses a CG method to the compute the search
direction. See also *TNC* method for a box-constrained
minimization with a similar algorithm. Suitable for large-scale
problems.
Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg
trust-region algorithm [5]_ for unconstrained minimization. This
algorithm requires the gradient and Hessian; furthermore the
Hessian is required to be positive definite.
Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the
Newton conjugate gradient trust-region algorithm [5]_ for
unconstrained minimization. This algorithm requires the gradient
and either the Hessian or a function that computes the product of
the Hessian with a given vector. Suitable for large-scale problems.
Method :ref:`trust-krylov <optimize.minimize-trustkrylov>` uses
the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained
minimization. This algorithm requires the gradient
and either the Hessian or a function that computes the product of
the Hessian with a given vector. Suitable for large-scale problems.
On indefinite problems it requires usually less iterations than the
`trust-ncg` method and is recommended for medium and large-scale problems.
Method :ref:`trust-exact <optimize.minimize-trustexact>`
is a trust-region method for unconstrained minimization in which
quadratic subproblems are solved almost exactly [13]_. This
algorithm requires the gradient and the Hessian (which is
*not* required to be positive definite). It is, in many
situations, the Newton method to converge in fewer iterations
and the most recommended for small and medium-size problems.
**Bound-Constrained minimization**
Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the
Simplex algorithm [1]_, [2]_. This algorithm is robust in many
applications. However, if numerical computation of derivative can be
trusted, other algorithms using the first and/or second derivatives
information might be preferred for their better performance in
general.
Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B
algorithm [6]_, [7]_ for bound constrained minimization.
Method :ref:`Powell <optimize.minimize-powell>` is a modification
of Powell's method [3]_, [4]_ which is a conjugate direction
method. It performs sequential one-dimensional minimizations along
each vector of the directions set (`direc` field in `options` and
`info`), which is updated at each iteration of the main
minimization loop. The function need not be differentiable, and no
derivatives are taken. If bounds are not provided, then an
unbounded line search will be used. If bounds are provided and
the initial guess is within the bounds, then every function
evaluation throughout the minimization procedure will be within
the bounds. If bounds are provided, the initial guess is outside
the bounds, and `direc` is full rank (default has full rank), then
some function evaluations during the first iteration may be
outside the bounds, but every function evaluation after the first
iteration will be within the bounds. If `direc` is not full rank,
then some parameters may not be optimized and the solution is not
guaranteed to be within the bounds.
Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton
algorithm [5]_, [8]_ to minimize a function with variables subject
to bounds. This algorithm uses gradient information; it is also
called Newton Conjugate-Gradient. It differs from the *Newton-CG*
method described above as it wraps a C implementation and allows
each variable to be given upper and lower bounds.
**Constrained Minimization**
Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the
Constrained Optimization BY Linear Approximation (COBYLA) method
[9]_, [10]_, [11]_. The algorithm is based on linear
approximations to the objective function and each constraint. The
method wraps a FORTRAN implementation of the algorithm. The
constraints functions 'fun' may return either a single number
or an array or list of numbers.
Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential
Least SQuares Programming to minimize a function of several
variables with any combination of bounds, equality and inequality
constraints. The method wraps the SLSQP Optimization subroutine
originally implemented by Dieter Kraft [12]_. Note that the
wrapper handles infinite values in bounds by converting them into
large floating values.
Method :ref:`trust-constr <optimize.minimize-trustconstr>` is a
trust-region algorithm for constrained optimization. It swiches
between two implementations depending on the problem definition.
It is the most versatile constrained minimization algorithm
implemented in SciPy and the most appropriate for large-scale problems.
For equality constrained problems it is an implementation of Byrd-Omojokun
Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When
inequality constraints are imposed as well, it swiches to the trust-region
interior point method described in [16]_. This interior point algorithm,
in turn, solves inequality constraints by introducing slack variables
and solving a sequence of equality-constrained barrier problems
for progressively smaller values of the barrier parameter.
The previously described equality constrained SQP method is
used to solve the subproblems with increasing levels of accuracy
as the iterate gets closer to a solution.
**Finite-Difference Options**
For Method :ref:`trust-constr <optimize.minimize-trustconstr>`
the gradient and the Hessian may be approximated using
three finite-difference schemes: {'2-point', '3-point', 'cs'}.
The scheme 'cs' is, potentially, the most accurate but it
requires the function to correctly handle complex inputs and to
be differentiable in the complex plane. The scheme '3-point' is more
accurate than '2-point' but requires twice as many operations. If the
gradient is estimated via finite-differences the Hessian must be
estimated using one of the quasi-Newton strategies.
**Method specific options for the** `hess` **keyword**
+--------------+------+----------+-------------------------+-----+
| method/Hess | None | callable | '2-point/'3-point'/'cs' | HUS |
+==============+======+==========+=========================+=====+
| Newton-CG | x | (n, n) | x | x |
| | | LO | | |
+--------------+------+----------+-------------------------+-----+
| dogleg | | (n, n) | | |
+--------------+------+----------+-------------------------+-----+
| trust-ncg | | (n, n) | x | x |
+--------------+------+----------+-------------------------+-----+
| trust-krylov | | (n, n) | x | x |
+--------------+------+----------+-------------------------+-----+
| trust-exact | | (n, n) | | |
+--------------+------+----------+-------------------------+-----+
| trust-constr | x | (n, n) | x | x |
| | | LO | | |
| | | sp | | |
+--------------+------+----------+-------------------------+-----+
where LO=LinearOperator, sp=Sparse matrix, HUS=HessianUpdateStrategy
**Custom minimizers**
It may be useful to pass a custom minimization method, for example
when using a frontend to this method such as `scipy.optimize.basinhopping`
or a different library. You can simply pass a callable as the ``method``
parameter.
The callable is called as ``method(fun, x0, args, **kwargs, **options)``
where ``kwargs`` corresponds to any other parameters passed to `minimize`
(such as `callback`, `hess`, etc.), except the `options` dict, which has
its contents also passed as `method` parameters pair by pair. Also, if
`jac` has been passed as a bool type, `jac` and `fun` are mangled so that
`fun` returns just the function values and `jac` is converted to a function
returning the Jacobian. The method shall return an `OptimizeResult`
object.
The provided `method` callable must be able to accept (and possibly ignore)
arbitrary parameters; the set of parameters accepted by `minimize` may
expand in future versions and then these parameters will be passed to
the method. You can find an example in the scipy.optimize tutorial.
References
----------
.. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function
Minimization. The Computer Journal 7: 308-13.
.. [2] Wright M H. 1996. Direct search methods: Once scorned, now
respectable, in Numerical Analysis 1995: Proceedings of the 1995
Dundee Biennial Conference in Numerical Analysis (Eds. D F
Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK.
191-208.
.. [3] Powell, M J D. 1964. An efficient method for finding the minimum of
a function of several variables without calculating derivatives. The
Computer Journal 7: 155-162.
.. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery.
Numerical Recipes (any edition), Cambridge University Press.
.. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization.
Springer New York.
.. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory
Algorithm for Bound Constrained Optimization. SIAM Journal on
Scientific and Statistical Computing 16 (5): 1190-1208.
.. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm
778: L-BFGS-B, FORTRAN routines for large scale bound constrained
optimization. ACM Transactions on Mathematical Software 23 (4):
550-560.
.. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method.
1984. SIAM Journal of Numerical Analysis 21: 770-778.
.. [9] Powell, M J D. A direct search optimization method that models
the objective and constraint functions by linear interpolation.
1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez
and J-P Hennart, Kluwer Academic (Dordrecht), 51-67.
.. [10] Powell M J D. Direct search algorithms for optimization
calculations. 1998. Acta Numerica 7: 287-336.
.. [11] Powell M J D. A view of algorithms for optimization without
derivatives. 2007.Cambridge University Technical Report DAMTP
2007/NA03
.. [12] Kraft, D. A software package for sequential quadratic
programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace
Center -- Institute for Flight Mechanics, Koln, Germany.
.. [13] Conn, A. R., Gould, N. I., and Toint, P. L.
Trust region methods. 2000. Siam. pp. 169-200.
.. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free
implementation of the GLTR method for iterative solution of
the trust region problem", :arxiv:`1611.04718`
.. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the
Trust-Region Subproblem using the Lanczos Method",
SIAM J. Optim., 9(2), 504--525, (1999).
.. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999.
An interior point algorithm for large-scale nonlinear programming.
SIAM Journal on Optimization 9.4: 877-900.
.. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the
implementation of an algorithm for large-scale equality constrained
optimization. SIAM Journal on Optimization 8.3: 682-706.
Examples
--------
Let us consider the problem of minimizing the Rosenbrock function. This
function (and its respective derivatives) is implemented in `rosen`
(resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`.
>>> from scipy.optimize import minimize, rosen, rosen_der
A simple application of the *Nelder-Mead* method is:
>>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
>>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6)
>>> res.x
array([ 1., 1., 1., 1., 1.])
Now using the *BFGS* algorithm, using the first derivative and a few
options:
>>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
... options={'gtol': 1e-6, 'disp': True})
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 26
Function evaluations: 31
Gradient evaluations: 31
>>> res.x
array([ 1., 1., 1., 1., 1.])
>>> print(res.message)
Optimization terminated successfully.
>>> res.hess_inv
array([[ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary
[ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269],
[ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151],
[ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ],
[ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]])
Next, consider a minimization problem with several constraints (namely
Example 16.4 from [5]_). The objective function is:
>>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
There are three constraints defined as:
>>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
And variables must be positive, hence the following bounds:
>>> bnds = ((0, None), (0, None))
The optimization problem is solved using the SLSQP method as:
>>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
... constraints=cons)
It should converge to the theoretical solution (1.4 ,1.7).
"""
x0 = np.atleast_1d(np.asarray(x0))
if x0.ndim != 1:
message = ('Use of `minimize` with `x0.ndim != 1` is deprecated. '
'Currently, singleton dimensions will be removed from '
'`x0`, but an error will be raised in SciPy 1.11.0.')
warn(message, DeprecationWarning, stacklevel=2)
x0 = np.atleast_1d(np.squeeze(x0))
if x0.dtype.kind in np.typecodes["AllInteger"]:
x0 = np.asarray(x0, dtype=float)
if not isinstance(args, tuple):
args = (args,)
if method is None:
# Select automatically
if constraints:
method = 'SLSQP'
elif bounds is not None:
method = 'L-BFGS-B'
else:
method = 'BFGS'
if callable(method):
meth = "_custom"
else:
meth = method.lower()
if options is None:
options = {}
# check if optional parameters are supported by the selected method
# - jac
if meth in ('nelder-mead', 'powell', 'cobyla') and bool(jac):
warn('Method %s does not use gradient information (jac).' % method,
RuntimeWarning)
# - hess
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
'trust-krylov', 'trust-exact', '_custom') and hess is not None:
warn('Method %s does not use Hessian information (hess).' % method,
RuntimeWarning)
# - hessp
if meth not in ('newton-cg', 'trust-ncg', 'trust-constr',
'trust-krylov', '_custom') \
and hessp is not None:
warn('Method %s does not use Hessian-vector product '
'information (hessp).' % method, RuntimeWarning)
# - constraints or bounds
if (meth not in ('cobyla', 'slsqp', 'trust-constr', '_custom') and
np.any(constraints)):
warn('Method %s cannot handle constraints.' % method,
RuntimeWarning)
if meth not in ('nelder-mead', 'powell', 'l-bfgs-b', 'tnc', 'slsqp',
'trust-constr', '_custom') and bounds is not None:
warn('Method %s cannot handle bounds.' % method,
RuntimeWarning)
# - return_all
if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'slsqp') and
options.get('return_all', False)):
warn('Method %s does not support the return_all option.' % method,
RuntimeWarning)
# check gradient vector
if callable(jac):
pass
elif jac is True:
# fun returns func and grad
fun = MemoizeJac(fun)
jac = fun.derivative
elif (jac in FD_METHODS and
meth in ['trust-constr', 'bfgs', 'cg', 'l-bfgs-b', 'tnc', 'slsqp']):
# finite differences with relative step
pass
elif meth in ['trust-constr']:
# default jac calculation for this method
jac = '2-point'
elif jac is None or bool(jac) is False:
# this will cause e.g. LBFGS to use forward difference, absolute step
jac = None
else:
# default if jac option is not understood
jac = None
# set default tolerances
if tol is not None:
options = dict(options)
if meth == 'nelder-mead':
options.setdefault('xatol', tol)
options.setdefault('fatol', tol)
if meth in ('newton-cg', 'powell', 'tnc'):
options.setdefault('xtol', tol)
if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'):
options.setdefault('ftol', tol)
if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg',
'trust-ncg', 'trust-exact', 'trust-krylov'):
options.setdefault('gtol', tol)
if meth in ('cobyla', '_custom'):
options.setdefault('tol', tol)
if meth == 'trust-constr':
options.setdefault('xtol', tol)
options.setdefault('gtol', tol)
options.setdefault('barrier_tol', tol)
if meth == '_custom':
# custom method called before bounds and constraints are 'standardised'
# custom method should be able to accept whatever bounds/constraints
# are provided to it.
return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
bounds=bounds, constraints=constraints,
callback=callback, **options)
constraints = standardize_constraints(constraints, x0, meth)
remove_vars = False
if bounds is not None:
if meth in {"tnc", "slsqp", "l-bfgs-b"}:
# These methods can't take the finite-difference derivatives they
# need when a variable is fixed by the bounds. To avoid this issue,
# remove fixed variables from the problem.
# NOTE: if this list is expanded, then be sure to update the
# accompanying tests and test_optimize.eb_data. Consider also if
# default OptimizeResult will need updating.
# convert to new-style bounds so we only have to consider one case
bounds = standardize_bounds(bounds, x0, 'new')
# determine whether any variables are fixed
i_fixed = (bounds.lb == bounds.ub)
if np.all(i_fixed):
# all the parameters are fixed, a minimizer is not able to do
# anything
return _optimize_result_for_equal_bounds(
fun, bounds, meth, args=args, constraints=constraints
)
# determine whether finite differences are needed for any grad/jac
fd_needed = (not callable(jac))
for con in constraints:
if not callable(con.get('jac', None)):
fd_needed = True
# If finite differences are ever used, remove all fixed variables
# Always remove fixed variables for TNC; see gh-14565
remove_vars = i_fixed.any() and (fd_needed or meth == "tnc")
if remove_vars:
x_fixed = (bounds.lb)[i_fixed]
x0 = x0[~i_fixed]
bounds = _remove_from_bounds(bounds, i_fixed)
fun = _remove_from_func(fun, i_fixed, x_fixed)
if callable(callback):
callback = _remove_from_func(callback, i_fixed, x_fixed)
if callable(jac):
jac = _remove_from_func(jac, i_fixed, x_fixed, remove=1)
# make a copy of the constraints so the user's version doesn't
# get changed. (Shallow copy is ok)
constraints = [con.copy() for con in constraints]
for con in constraints: # yes, guaranteed to be a list
con['fun'] = _remove_from_func(con['fun'], i_fixed,
x_fixed, min_dim=1,
remove=0)
if callable(con.get('jac', None)):
con['jac'] = _remove_from_func(con['jac'], i_fixed,
x_fixed, min_dim=2,
remove=1)
bounds = standardize_bounds(bounds, x0, meth)
if meth == 'nelder-mead':
res = _minimize_neldermead(fun, x0, args, callback, bounds=bounds,
**options)
elif meth == 'powell':
res = _minimize_powell(fun, x0, args, callback, bounds, **options)
elif meth == 'cg':
res = _minimize_cg(fun, x0, args, jac, callback, **options)
elif meth == 'bfgs':
res = _minimize_bfgs(fun, x0, args, jac, callback, **options)
elif meth == 'newton-cg':
res = _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
**options)
elif meth == 'l-bfgs-b':
res = _minimize_lbfgsb(fun, x0, args, jac, bounds,
callback=callback, **options)
elif meth == 'tnc':
res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
**options)
elif meth == 'cobyla':
res = _minimize_cobyla(fun, x0, args, constraints, callback=callback,
**options)
elif meth == 'slsqp':
res = _minimize_slsqp(fun, x0, args, jac, bounds,
constraints, callback=callback, **options)
elif meth == 'trust-constr':
res = _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp,
bounds, constraints,
callback=callback, **options)
elif meth == 'dogleg':
res = _minimize_dogleg(fun, x0, args, jac, hess,
callback=callback, **options)
elif meth == 'trust-ncg':
res = _minimize_trust_ncg(fun, x0, args, jac, hess, hessp,
callback=callback, **options)
elif meth == 'trust-krylov':
res = _minimize_trust_krylov(fun, x0, args, jac, hess, hessp,
callback=callback, **options)
elif meth == 'trust-exact':
res = _minimize_trustregion_exact(fun, x0, args, jac, hess,
callback=callback, **options)
else:
raise ValueError('Unknown solver %s' % method)
if remove_vars:
res.x = _add_to_array(res.x, i_fixed, x_fixed)
res.jac = _add_to_array(res.jac, i_fixed, np.nan)
if "hess_inv" in res:
res.hess_inv = None # unknown
return res
def minimize_scalar(fun, bracket=None, bounds=None, args=(),
method='brent', tol=None, options=None):
"""Minimization of scalar function of one variable.
Parameters
----------
fun : callable
Objective function.
Scalar function, must return a scalar.
bracket : sequence, optional
For methods 'brent' and 'golden', `bracket` defines the bracketing
interval and can either have three items ``(a, b, c)`` so that
``a < b < c`` and ``fun(b) < fun(a), fun(c)`` or two items ``a`` and
``c`` which are assumed to be a starting interval for a downhill
bracket search (see `bracket`); it doesn't always mean that the
obtained solution will satisfy ``a <= x <= c``.
bounds : sequence, optional
For method 'bounded', `bounds` is mandatory and must have two items
corresponding to the optimization bounds.
args : tuple, optional
Extra arguments passed to the objective function.
method : str or callable, optional
Type of solver. Should be one of:
- :ref:`Brent <optimize.minimize_scalar-brent>`
- :ref:`Bounded <optimize.minimize_scalar-bounded>`
- :ref:`Golden <optimize.minimize_scalar-golden>`
- custom - a callable object (added in version 0.14.0), see below
See the 'Notes' section for details of each solver.
tol : float, optional
Tolerance for termination. For detailed control, use solver-specific
options.
options : dict, optional
A dictionary of solver options.
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
See :func:`show_options()` for solver-specific options.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
minimize : Interface to minimization algorithms for scalar multivariate
functions
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *Brent*.
Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's
algorithm to find a local minimum. The algorithm uses inverse
parabolic interpolation when possible to speed up convergence of
the golden section method.
Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the
golden section search technique. It uses analog of the bisection
method to decrease the bracketed interval. It is usually
preferable to use the *Brent* method.
Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can
perform bounded minimization. It uses the Brent method to find a
local minimum in the interval x1 < xopt < x2.
**Custom minimizers**
It may be useful to pass a custom minimization method, for example
when using some library frontend to minimize_scalar. You can simply
pass a callable as the ``method`` parameter.
The callable is called as ``method(fun, args, **kwargs, **options)``
where ``kwargs`` corresponds to any other parameters passed to `minimize`
(such as `bracket`, `tol`, etc.), except the `options` dict, which has
its contents also passed as `method` parameters pair by pair. The method
shall return an `OptimizeResult` object.
The provided `method` callable must be able to accept (and possibly ignore)
arbitrary parameters; the set of parameters accepted by `minimize` may
expand in future versions and then these parameters will be passed to
the method. You can find an example in the scipy.optimize tutorial.
.. versionadded:: 0.11.0
Examples
--------
Consider the problem of minimizing the following function.
>>> def f(x):
... return (x - 2) * x * (x + 2)**2
Using the *Brent* method, we find the local minimum as:
>>> from scipy.optimize import minimize_scalar
>>> res = minimize_scalar(f)
>>> res.x
1.28077640403
Using the *Bounded* method, we find a local minimum with specified
bounds as:
>>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded')
>>> res.x
-2.0000002026
"""
if not isinstance(args, tuple):
args = (args,)
if callable(method):
meth = "_custom"
else:
meth = method.lower()
if options is None:
options = {}
if tol is not None:
options = dict(options)
if meth == 'bounded' and 'xatol' not in options:
warn("Method 'bounded' does not support relative tolerance in x; "
"defaulting to absolute tolerance.", RuntimeWarning)
options['xatol'] = tol
elif meth == '_custom':
options.setdefault('tol', tol)
else:
options.setdefault('xtol', tol)
# replace boolean "disp" option, if specified, by an integer value.
disp = options.get('disp')
if isinstance(disp, bool):
options['disp'] = 2 * int(disp)
if meth == '_custom':
return method(fun, args=args, bracket=bracket, bounds=bounds, **options)
elif meth == 'brent':
return _minimize_scalar_brent(fun, bracket, args, **options)
elif meth == 'bounded':
if bounds is None:
raise ValueError('The `bounds` parameter is mandatory for '
'method `bounded`.')
return _minimize_scalar_bounded(fun, bounds, args, **options)
elif meth == 'golden':
return _minimize_scalar_golden(fun, bracket, args, **options)
else:
raise ValueError('Unknown solver %s' % method)
def _remove_from_bounds(bounds, i_fixed):
"""Removes fixed variables from a `Bounds` instance"""
lb = bounds.lb[~i_fixed]
ub = bounds.ub[~i_fixed]
return Bounds(lb, ub) # don't mutate original Bounds object
def _remove_from_func(fun_in, i_fixed, x_fixed, min_dim=None, remove=0):
"""Wraps a function such that fixed variables need not be passed in"""
def fun_out(x_in, *args, **kwargs):
x_out = np.zeros_like(i_fixed, dtype=x_in.dtype)
x_out[i_fixed] = x_fixed
x_out[~i_fixed] = x_in
y_out = fun_in(x_out, *args, **kwargs)
y_out = np.array(y_out)
if min_dim == 1:
y_out = np.atleast_1d(y_out)
elif min_dim == 2:
y_out = np.atleast_2d(y_out)
if remove == 1:
y_out = y_out[..., ~i_fixed]
elif remove == 2:
y_out = y_out[~i_fixed, ~i_fixed]
return y_out
return fun_out
def _add_to_array(x_in, i_fixed, x_fixed):
"""Adds fixed variables back to an array"""
i_free = ~i_fixed
if x_in.ndim == 2:
i_free = i_free[:, None] @ i_free[None, :]
x_out = np.zeros_like(i_free, dtype=x_in.dtype)
x_out[~i_free] = x_fixed
x_out[i_free] = x_in.ravel()
return x_out
def standardize_bounds(bounds, x0, meth):
"""Converts bounds to the form required by the solver."""
if meth in {'trust-constr', 'powell', 'nelder-mead', 'new'}:
if not isinstance(bounds, Bounds):
lb, ub = old_bound_to_new(bounds)
bounds = Bounds(lb, ub)
elif meth in ('l-bfgs-b', 'tnc', 'slsqp', 'old'):
if isinstance(bounds, Bounds):
bounds = new_bounds_to_old(bounds.lb, bounds.ub, x0.shape[0])
return bounds
def standardize_constraints(constraints, x0, meth):
"""Converts constraints to the form required by the solver."""
all_constraint_types = (NonlinearConstraint, LinearConstraint, dict)
new_constraint_types = all_constraint_types[:-1]
if constraints is None:
constraints = []
elif isinstance(constraints, all_constraint_types):
constraints = [constraints]
else:
constraints = list(constraints) # ensure it's a mutable sequence
if meth in ['trust-constr', 'new']:
for i, con in enumerate(constraints):
if not isinstance(con, new_constraint_types):
constraints[i] = old_constraint_to_new(i, con)
else:
# iterate over copy, changing original
for i, con in enumerate(list(constraints)):
if isinstance(con, new_constraint_types):
old_constraints = new_constraint_to_old(con, x0)
constraints[i] = old_constraints[0]
constraints.extend(old_constraints[1:]) # appends 1 if present
return constraints
def _optimize_result_for_equal_bounds(
fun, bounds, method, args=(), constraints=()
):
"""
Provides a default OptimizeResult for when a bounded minimization method
has (lb == ub).all().
Parameters
----------
fun: callable
bounds: Bounds
method: str
constraints: Constraint
"""
success = True
message = 'All independent variables were fixed by bounds.'
# bounds is new-style
x0 = bounds.lb
if constraints:
message = ("All independent variables were fixed by bounds at values"
" that satisfy the constraints.")
constraints = standardize_constraints(constraints, x0, 'new')
maxcv = 0
for c in constraints:
pc = PreparedConstraint(c, x0)
violation = pc.violation(x0)
if np.sum(violation):
maxcv = max(maxcv, np.max(violation))
success = False
message = (f"All independent variables were fixed by bounds, but "
f"the independent variables do not satisfy the "
f"constraints exactly. (Maximum violation: {maxcv}).")
return OptimizeResult(
x=x0, fun=fun(x0, *args), success=success, message=message, nfev=1,
njev=0, nhev=0,
)
| bsd-3-clause | caf8da6473aa7f781ea3e6d83d53ba59 | 43.353406 | 89 | 0.614445 | 3.985276 | false | false | false | false |
scipy/scipy | scipy/special/__init__.py | 8 | 29005 | """
========================================
Special functions (:mod:`scipy.special`)
========================================
.. currentmodule:: scipy.special
Almost all of the functions below accept NumPy arrays as input
arguments as well as single numbers. This means they follow
broadcasting and automatic array-looping rules. Technically,
they are `NumPy universal functions
<https://numpy.org/doc/stable/user/basics.ufuncs.html#ufuncs-basics>`_.
Functions which do not accept NumPy arrays are marked by a warning
in the section description.
.. seealso::
`scipy.special.cython_special` -- Typed Cython versions of special functions
Error handling
==============
Errors are handled by returning NaNs or other appropriate values.
Some of the special function routines can emit warnings or raise
exceptions when an error occurs. By default this is disabled; to
query and control the current error handling state the following
functions are provided.
.. autosummary::
:toctree: generated/
geterr -- Get the current way of handling special-function errors.
seterr -- Set how special-function errors are handled.
errstate -- Context manager for special-function error handling.
SpecialFunctionWarning -- Warning that can be emitted by special functions.
SpecialFunctionError -- Exception that can be raised by special functions.
Available functions
===================
Airy functions
--------------
.. autosummary::
:toctree: generated/
airy -- Airy functions and their derivatives.
airye -- Exponentially scaled Airy functions and their derivatives.
ai_zeros -- Compute `nt` zeros and values of the Airy function Ai and its derivative.
bi_zeros -- Compute `nt` zeros and values of the Airy function Bi and its derivative.
itairy -- Integrals of Airy functions
Elliptic functions and integrals
--------------------------------
.. autosummary::
:toctree: generated/
ellipj -- Jacobian elliptic functions.
ellipk -- Complete elliptic integral of the first kind.
ellipkm1 -- Complete elliptic integral of the first kind around `m` = 1.
ellipkinc -- Incomplete elliptic integral of the first kind.
ellipe -- Complete elliptic integral of the second kind.
ellipeinc -- Incomplete elliptic integral of the second kind.
elliprc -- Degenerate symmetric integral RC.
elliprd -- Symmetric elliptic integral of the second kind.
elliprf -- Completely-symmetric elliptic integral of the first kind.
elliprg -- Completely-symmetric elliptic integral of the second kind.
elliprj -- Symmetric elliptic integral of the third kind.
Bessel functions
----------------
.. autosummary::
:toctree: generated/
jv -- Bessel function of the first kind of real order and \
complex argument.
jve -- Exponentially scaled Bessel function of order `v`.
yn -- Bessel function of the second kind of integer order and \
real argument.
yv -- Bessel function of the second kind of real order and \
complex argument.
yve -- Exponentially scaled Bessel function of the second kind \
of real order.
kn -- Modified Bessel function of the second kind of integer \
order `n`
kv -- Modified Bessel function of the second kind of real order \
`v`
kve -- Exponentially scaled modified Bessel function of the \
second kind.
iv -- Modified Bessel function of the first kind of real order.
ive -- Exponentially scaled modified Bessel function of the \
first kind.
hankel1 -- Hankel function of the first kind.
hankel1e -- Exponentially scaled Hankel function of the first kind.
hankel2 -- Hankel function of the second kind.
hankel2e -- Exponentially scaled Hankel function of the second kind.
wright_bessel -- Wright's generalized Bessel function.
The following function does not accept NumPy arrays (it is not a
universal function):
.. autosummary::
:toctree: generated/
lmbda -- Jahnke-Emden Lambda function, Lambdav(x).
Zeros of Bessel functions
^^^^^^^^^^^^^^^^^^^^^^^^^
The following functions do not accept NumPy arrays (they are not
universal functions):
.. autosummary::
:toctree: generated/
jnjnp_zeros -- Compute zeros of integer-order Bessel functions Jn and Jn'.
jnyn_zeros -- Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
jn_zeros -- Compute zeros of integer-order Bessel function Jn(x).
jnp_zeros -- Compute zeros of integer-order Bessel function derivative Jn'(x).
yn_zeros -- Compute zeros of integer-order Bessel function Yn(x).
ynp_zeros -- Compute zeros of integer-order Bessel function derivative Yn'(x).
y0_zeros -- Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
y1_zeros -- Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
y1p_zeros -- Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
Faster versions of common Bessel functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
j0 -- Bessel function of the first kind of order 0.
j1 -- Bessel function of the first kind of order 1.
y0 -- Bessel function of the second kind of order 0.
y1 -- Bessel function of the second kind of order 1.
i0 -- Modified Bessel function of order 0.
i0e -- Exponentially scaled modified Bessel function of order 0.
i1 -- Modified Bessel function of order 1.
i1e -- Exponentially scaled modified Bessel function of order 1.
k0 -- Modified Bessel function of the second kind of order 0, :math:`K_0`.
k0e -- Exponentially scaled modified Bessel function K of order 0
k1 -- Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
k1e -- Exponentially scaled modified Bessel function K of order 1.
Integrals of Bessel functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
itj0y0 -- Integrals of Bessel functions of order 0.
it2j0y0 -- Integrals related to Bessel functions of order 0.
iti0k0 -- Integrals of modified Bessel functions of order 0.
it2i0k0 -- Integrals related to modified Bessel functions of order 0.
besselpoly -- Weighted integral of a Bessel function.
Derivatives of Bessel functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
jvp -- Compute nth derivative of Bessel function Jv(z) with respect to `z`.
yvp -- Compute nth derivative of Bessel function Yv(z) with respect to `z`.
kvp -- Compute nth derivative of real-order modified Bessel function Kv(z)
ivp -- Compute nth derivative of modified Bessel function Iv(z) with respect to `z`.
h1vp -- Compute nth derivative of Hankel function H1v(z) with respect to `z`.
h2vp -- Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Spherical Bessel functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
spherical_jn -- Spherical Bessel function of the first kind or its derivative.
spherical_yn -- Spherical Bessel function of the second kind or its derivative.
spherical_in -- Modified spherical Bessel function of the first kind or its derivative.
spherical_kn -- Modified spherical Bessel function of the second kind or its derivative.
Riccati-Bessel functions
^^^^^^^^^^^^^^^^^^^^^^^^
The following functions do not accept NumPy arrays (they are not
universal functions):
.. autosummary::
:toctree: generated/
riccati_jn -- Compute Ricatti-Bessel function of the first kind and its derivative.
riccati_yn -- Compute Ricatti-Bessel function of the second kind and its derivative.
Struve functions
----------------
.. autosummary::
:toctree: generated/
struve -- Struve function.
modstruve -- Modified Struve function.
itstruve0 -- Integral of the Struve function of order 0.
it2struve0 -- Integral related to the Struve function of order 0.
itmodstruve0 -- Integral of the modified Struve function of order 0.
Raw statistical functions
-------------------------
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
.. autosummary::
:toctree: generated/
bdtr -- Binomial distribution cumulative distribution function.
bdtrc -- Binomial distribution survival function.
bdtri -- Inverse function to `bdtr` with respect to `p`.
bdtrik -- Inverse function to `bdtr` with respect to `k`.
bdtrin -- Inverse function to `bdtr` with respect to `n`.
btdtr -- Cumulative distribution function of the beta distribution.
btdtri -- The `p`-th quantile of the beta distribution.
btdtria -- Inverse of `btdtr` with respect to `a`.
btdtrib -- btdtria(a, p, x).
fdtr -- F cumulative distribution function.
fdtrc -- F survival function.
fdtri -- The `p`-th quantile of the F-distribution.
fdtridfd -- Inverse to `fdtr` vs dfd.
gdtr -- Gamma distribution cumulative distribution function.
gdtrc -- Gamma distribution survival function.
gdtria -- Inverse of `gdtr` vs a.
gdtrib -- Inverse of `gdtr` vs b.
gdtrix -- Inverse of `gdtr` vs x.
nbdtr -- Negative binomial cumulative distribution function.
nbdtrc -- Negative binomial survival function.
nbdtri -- Inverse of `nbdtr` vs `p`.
nbdtrik -- Inverse of `nbdtr` vs `k`.
nbdtrin -- Inverse of `nbdtr` vs `n`.
ncfdtr -- Cumulative distribution function of the non-central F distribution.
ncfdtridfd -- Calculate degrees of freedom (denominator) for the noncentral F-distribution.
ncfdtridfn -- Calculate degrees of freedom (numerator) for the noncentral F-distribution.
ncfdtri -- Inverse cumulative distribution function of the non-central F distribution.
ncfdtrinc -- Calculate non-centrality parameter for non-central F distribution.
nctdtr -- Cumulative distribution function of the non-central `t` distribution.
nctdtridf -- Calculate degrees of freedom for non-central t distribution.
nctdtrit -- Inverse cumulative distribution function of the non-central t distribution.
nctdtrinc -- Calculate non-centrality parameter for non-central t distribution.
nrdtrimn -- Calculate mean of normal distribution given other params.
nrdtrisd -- Calculate standard deviation of normal distribution given other params.
pdtr -- Poisson cumulative distribution function.
pdtrc -- Poisson survival function.
pdtri -- Inverse to `pdtr` vs m.
pdtrik -- Inverse to `pdtr` vs k.
stdtr -- Student t distribution cumulative distribution function.
stdtridf -- Inverse of `stdtr` vs df.
stdtrit -- Inverse of `stdtr` vs `t`.
chdtr -- Chi square cumulative distribution function.
chdtrc -- Chi square survival function.
chdtri -- Inverse to `chdtrc`.
chdtriv -- Inverse to `chdtr` vs `v`.
ndtr -- Gaussian cumulative distribution function.
log_ndtr -- Logarithm of Gaussian cumulative distribution function.
ndtri -- Inverse of `ndtr` vs x.
ndtri_exp -- Inverse of `log_ndtr` vs x.
chndtr -- Non-central chi square cumulative distribution function.
chndtridf -- Inverse to `chndtr` vs `df`.
chndtrinc -- Inverse to `chndtr` vs `nc`.
chndtrix -- Inverse to `chndtr` vs `x`.
smirnov -- Kolmogorov-Smirnov complementary cumulative distribution function.
smirnovi -- Inverse to `smirnov`.
kolmogorov -- Complementary cumulative distribution function of Kolmogorov distribution.
kolmogi -- Inverse function to `kolmogorov`.
tklmbda -- Tukey-Lambda cumulative distribution function.
logit -- Logit ufunc for ndarrays.
expit -- Logistic sigmoid function.
log_expit -- Logarithm of the logistic sigmoid function.
boxcox -- Compute the Box-Cox transformation.
boxcox1p -- Compute the Box-Cox transformation of 1 + `x`.
inv_boxcox -- Compute the inverse of the Box-Cox transformation.
inv_boxcox1p -- Compute the inverse of the Box-Cox transformation.
owens_t -- Owen's T Function.
Information Theory functions
----------------------------
.. autosummary::
:toctree: generated/
entr -- Elementwise function for computing entropy.
rel_entr -- Elementwise function for computing relative entropy.
kl_div -- Elementwise function for computing Kullback-Leibler divergence.
huber -- Huber loss function.
pseudo_huber -- Pseudo-Huber loss function.
Gamma and related functions
---------------------------
.. autosummary::
:toctree: generated/
gamma -- Gamma function.
gammaln -- Logarithm of the absolute value of the Gamma function for real inputs.
loggamma -- Principal branch of the logarithm of the Gamma function.
gammasgn -- Sign of the gamma function.
gammainc -- Regularized lower incomplete gamma function.
gammaincinv -- Inverse to `gammainc`.
gammaincc -- Regularized upper incomplete gamma function.
gammainccinv -- Inverse to `gammaincc`.
beta -- Beta function.
betaln -- Natural logarithm of absolute value of beta function.
betainc -- Incomplete beta integral.
betaincinv -- Inverse function to beta integral.
psi -- The digamma function.
rgamma -- Gamma function inverted.
polygamma -- Polygamma function n.
multigammaln -- Returns the log of multivariate gamma, also sometimes called the generalized gamma.
digamma -- psi(x[, out]).
poch -- Rising factorial (z)_m.
Error function and Fresnel integrals
------------------------------------
.. autosummary::
:toctree: generated/
erf -- Returns the error function of complex argument.
erfc -- Complementary error function, ``1 - erf(x)``.
erfcx -- Scaled complementary error function, ``exp(x**2) * erfc(x)``.
erfi -- Imaginary error function, ``-i erf(i z)``.
erfinv -- Inverse function for erf.
erfcinv -- Inverse function for erfc.
wofz -- Faddeeva function.
dawsn -- Dawson's integral.
fresnel -- Fresnel sin and cos integrals.
fresnel_zeros -- Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
modfresnelp -- Modified Fresnel positive integrals.
modfresnelm -- Modified Fresnel negative integrals.
voigt_profile -- Voigt profile.
The following functions do not accept NumPy arrays (they are not
universal functions):
.. autosummary::
:toctree: generated/
erf_zeros -- Compute nt complex zeros of error function erf(z).
fresnelc_zeros -- Compute nt complex zeros of cosine Fresnel integral C(z).
fresnels_zeros -- Compute nt complex zeros of sine Fresnel integral S(z).
Legendre functions
------------------
.. autosummary::
:toctree: generated/
lpmv -- Associated Legendre function of integer order and real degree.
sph_harm -- Compute spherical harmonics.
The following functions do not accept NumPy arrays (they are not
universal functions):
.. autosummary::
:toctree: generated/
clpmn -- Associated Legendre function of the first kind for complex arguments.
lpn -- Legendre function of the first kind.
lqn -- Legendre function of the second kind.
lpmn -- Sequence of associated Legendre functions of the first kind.
lqmn -- Sequence of associated Legendre functions of the second kind.
Ellipsoidal harmonics
---------------------
.. autosummary::
:toctree: generated/
ellip_harm -- Ellipsoidal harmonic functions E^p_n(l).
ellip_harm_2 -- Ellipsoidal harmonic functions F^p_n(l).
ellip_normal -- Ellipsoidal harmonic normalization constants gamma^p_n.
Orthogonal polynomials
----------------------
The following functions evaluate values of orthogonal polynomials:
.. autosummary::
:toctree: generated/
assoc_laguerre -- Compute the generalized (associated) Laguerre polynomial of degree n and order k.
eval_legendre -- Evaluate Legendre polynomial at a point.
eval_chebyt -- Evaluate Chebyshev polynomial of the first kind at a point.
eval_chebyu -- Evaluate Chebyshev polynomial of the second kind at a point.
eval_chebyc -- Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point.
eval_chebys -- Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point.
eval_jacobi -- Evaluate Jacobi polynomial at a point.
eval_laguerre -- Evaluate Laguerre polynomial at a point.
eval_genlaguerre -- Evaluate generalized Laguerre polynomial at a point.
eval_hermite -- Evaluate physicist's Hermite polynomial at a point.
eval_hermitenorm -- Evaluate probabilist's (normalized) Hermite polynomial at a point.
eval_gegenbauer -- Evaluate Gegenbauer polynomial at a point.
eval_sh_legendre -- Evaluate shifted Legendre polynomial at a point.
eval_sh_chebyt -- Evaluate shifted Chebyshev polynomial of the first kind at a point.
eval_sh_chebyu -- Evaluate shifted Chebyshev polynomial of the second kind at a point.
eval_sh_jacobi -- Evaluate shifted Jacobi polynomial at a point.
The following functions compute roots and quadrature weights for
orthogonal polynomials:
.. autosummary::
:toctree: generated/
roots_legendre -- Gauss-Legendre quadrature.
roots_chebyt -- Gauss-Chebyshev (first kind) quadrature.
roots_chebyu -- Gauss-Chebyshev (second kind) quadrature.
roots_chebyc -- Gauss-Chebyshev (first kind) quadrature.
roots_chebys -- Gauss-Chebyshev (second kind) quadrature.
roots_jacobi -- Gauss-Jacobi quadrature.
roots_laguerre -- Gauss-Laguerre quadrature.
roots_genlaguerre -- Gauss-generalized Laguerre quadrature.
roots_hermite -- Gauss-Hermite (physicst's) quadrature.
roots_hermitenorm -- Gauss-Hermite (statistician's) quadrature.
roots_gegenbauer -- Gauss-Gegenbauer quadrature.
roots_sh_legendre -- Gauss-Legendre (shifted) quadrature.
roots_sh_chebyt -- Gauss-Chebyshev (first kind, shifted) quadrature.
roots_sh_chebyu -- Gauss-Chebyshev (second kind, shifted) quadrature.
roots_sh_jacobi -- Gauss-Jacobi (shifted) quadrature.
The functions below, in turn, return the polynomial coefficients in
``orthopoly1d`` objects, which function similarly as `numpy.poly1d`.
The ``orthopoly1d`` class also has an attribute ``weights``, which returns
the roots, weights, and total weights for the appropriate form of Gaussian
quadrature. These are returned in an ``n x 3`` array with roots in the first
column, weights in the second column, and total weights in the final column.
Note that ``orthopoly1d`` objects are converted to `~numpy.poly1d` when doing
arithmetic, and lose information of the original orthogonal polynomial.
.. autosummary::
:toctree: generated/
legendre -- Legendre polynomial.
chebyt -- Chebyshev polynomial of the first kind.
chebyu -- Chebyshev polynomial of the second kind.
chebyc -- Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
chebys -- Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
jacobi -- Jacobi polynomial.
laguerre -- Laguerre polynomial.
genlaguerre -- Generalized (associated) Laguerre polynomial.
hermite -- Physicist's Hermite polynomial.
hermitenorm -- Normalized (probabilist's) Hermite polynomial.
gegenbauer -- Gegenbauer (ultraspherical) polynomial.
sh_legendre -- Shifted Legendre polynomial.
sh_chebyt -- Shifted Chebyshev polynomial of the first kind.
sh_chebyu -- Shifted Chebyshev polynomial of the second kind.
sh_jacobi -- Shifted Jacobi polynomial.
.. warning::
Computing values of high-order polynomials (around ``order > 20``) using
polynomial coefficients is numerically unstable. To evaluate polynomial
values, the ``eval_*`` functions should be used instead.
Hypergeometric functions
------------------------
.. autosummary::
:toctree: generated/
hyp2f1 -- Gauss hypergeometric function 2F1(a, b; c; z).
hyp1f1 -- Confluent hypergeometric function 1F1(a, b; x).
hyperu -- Confluent hypergeometric function U(a, b, x) of the second kind.
hyp0f1 -- Confluent hypergeometric limit function 0F1.
Parabolic cylinder functions
----------------------------
.. autosummary::
:toctree: generated/
pbdv -- Parabolic cylinder function D.
pbvv -- Parabolic cylinder function V.
pbwa -- Parabolic cylinder function W.
The following functions do not accept NumPy arrays (they are not
universal functions):
.. autosummary::
:toctree: generated/
pbdv_seq -- Parabolic cylinder functions Dv(x) and derivatives.
pbvv_seq -- Parabolic cylinder functions Vv(x) and derivatives.
pbdn_seq -- Parabolic cylinder functions Dn(z) and derivatives.
Mathieu and related functions
-----------------------------
.. autosummary::
:toctree: generated/
mathieu_a -- Characteristic value of even Mathieu functions.
mathieu_b -- Characteristic value of odd Mathieu functions.
The following functions do not accept NumPy arrays (they are not
universal functions):
.. autosummary::
:toctree: generated/
mathieu_even_coef -- Fourier coefficients for even Mathieu and modified Mathieu functions.
mathieu_odd_coef -- Fourier coefficients for even Mathieu and modified Mathieu functions.
The following return both function and first derivative:
.. autosummary::
:toctree: generated/
mathieu_cem -- Even Mathieu function and its derivative.
mathieu_sem -- Odd Mathieu function and its derivative.
mathieu_modcem1 -- Even modified Mathieu function of the first kind and its derivative.
mathieu_modcem2 -- Even modified Mathieu function of the second kind and its derivative.
mathieu_modsem1 -- Odd modified Mathieu function of the first kind and its derivative.
mathieu_modsem2 -- Odd modified Mathieu function of the second kind and its derivative.
Spheroidal wave functions
-------------------------
.. autosummary::
:toctree: generated/
pro_ang1 -- Prolate spheroidal angular function of the first kind and its derivative.
pro_rad1 -- Prolate spheroidal radial function of the first kind and its derivative.
pro_rad2 -- Prolate spheroidal radial function of the secon kind and its derivative.
obl_ang1 -- Oblate spheroidal angular function of the first kind and its derivative.
obl_rad1 -- Oblate spheroidal radial function of the first kind and its derivative.
obl_rad2 -- Oblate spheroidal radial function of the second kind and its derivative.
pro_cv -- Characteristic value of prolate spheroidal function.
obl_cv -- Characteristic value of oblate spheroidal function.
pro_cv_seq -- Characteristic values for prolate spheroidal wave functions.
obl_cv_seq -- Characteristic values for oblate spheroidal wave functions.
The following functions require pre-computed characteristic value:
.. autosummary::
:toctree: generated/
pro_ang1_cv -- Prolate spheroidal angular function pro_ang1 for precomputed characteristic value.
pro_rad1_cv -- Prolate spheroidal radial function pro_rad1 for precomputed characteristic value.
pro_rad2_cv -- Prolate spheroidal radial function pro_rad2 for precomputed characteristic value.
obl_ang1_cv -- Oblate spheroidal angular function obl_ang1 for precomputed characteristic value.
obl_rad1_cv -- Oblate spheroidal radial function obl_rad1 for precomputed characteristic value.
obl_rad2_cv -- Oblate spheroidal radial function obl_rad2 for precomputed characteristic value.
Kelvin functions
----------------
.. autosummary::
:toctree: generated/
kelvin -- Kelvin functions as complex numbers.
kelvin_zeros -- Compute nt zeros of all Kelvin functions.
ber -- Kelvin function ber.
bei -- Kelvin function bei
berp -- Derivative of the Kelvin function `ber`.
beip -- Derivative of the Kelvin function `bei`.
ker -- Kelvin function ker.
kei -- Kelvin function ker.
kerp -- Derivative of the Kelvin function ker.
keip -- Derivative of the Kelvin function kei.
The following functions do not accept NumPy arrays (they are not
universal functions):
.. autosummary::
:toctree: generated/
ber_zeros -- Compute nt zeros of the Kelvin function ber(x).
bei_zeros -- Compute nt zeros of the Kelvin function bei(x).
berp_zeros -- Compute nt zeros of the Kelvin function ber'(x).
beip_zeros -- Compute nt zeros of the Kelvin function bei'(x).
ker_zeros -- Compute nt zeros of the Kelvin function ker(x).
kei_zeros -- Compute nt zeros of the Kelvin function kei(x).
kerp_zeros -- Compute nt zeros of the Kelvin function ker'(x).
keip_zeros -- Compute nt zeros of the Kelvin function kei'(x).
Combinatorics
-------------
.. autosummary::
:toctree: generated/
comb -- The number of combinations of N things taken k at a time.
perm -- Permutations of N things taken k at a time, i.e., k-permutations of N.
Lambert W and related functions
-------------------------------
.. autosummary::
:toctree: generated/
lambertw -- Lambert W function.
wrightomega -- Wright Omega function.
Other special functions
-----------------------
.. autosummary::
:toctree: generated/
agm -- Arithmetic, Geometric Mean.
bernoulli -- Bernoulli numbers B0..Bn (inclusive).
binom -- Binomial coefficient
diric -- Periodic sinc function, also called the Dirichlet function.
euler -- Euler numbers E0..En (inclusive).
expn -- Exponential integral E_n.
exp1 -- Exponential integral E_1 of complex argument z.
expi -- Exponential integral Ei.
factorial -- The factorial of a number or array of numbers.
factorial2 -- Double factorial.
factorialk -- Multifactorial of n of order k, n(!!...!).
shichi -- Hyperbolic sine and cosine integrals.
sici -- Sine and cosine integrals.
softmax -- Softmax function.
log_softmax -- Logarithm of softmax function.
spence -- Spence's function, also known as the dilogarithm.
zeta -- Riemann zeta function.
zetac -- Riemann zeta function minus 1.
Convenience functions
---------------------
.. autosummary::
:toctree: generated/
cbrt -- Cube root of `x`.
exp10 -- 10**x.
exp2 -- 2**x.
radian -- Convert from degrees to radians.
cosdg -- Cosine of the angle `x` given in degrees.
sindg -- Sine of angle given in degrees.
tandg -- Tangent of angle x given in degrees.
cotdg -- Cotangent of the angle `x` given in degrees.
log1p -- Calculates log(1+x) for use when `x` is near zero.
expm1 -- ``exp(x) - 1`` for use when `x` is near zero.
cosm1 -- ``cos(x) - 1`` for use when `x` is near zero.
powm1 -- ``x**y - 1`` for use when `y` is near zero or `x` is near 1.
round -- Round to nearest integer.
xlogy -- Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
xlog1py -- Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
logsumexp -- Compute the log of the sum of exponentials of input elements.
exprel -- Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
sinc -- Return the sinc function.
"""
from ._sf_error import SpecialFunctionWarning, SpecialFunctionError
from . import _ufuncs
from ._ufuncs import *
from . import _basic
from ._basic import *
from ._logsumexp import logsumexp, softmax, log_softmax
from . import _orthogonal
from ._orthogonal import *
from ._spfun_stats import multigammaln
from ._ellip_harm import (
ellip_harm,
ellip_harm_2,
ellip_normal
)
from ._lambertw import lambertw
from ._spherical_bessel import (
spherical_jn,
spherical_yn,
spherical_in,
spherical_kn
)
# Deprecated namespaces, to be removed in v2.0.0
from . import add_newdocs, basic, orthogonal, specfun, sf_error, spfun_stats
__all__ = _ufuncs.__all__ + _basic.__all__ + _orthogonal.__all__ + [
'SpecialFunctionWarning',
'SpecialFunctionError',
'logsumexp',
'softmax',
'log_softmax',
'multigammaln',
'ellip_harm',
'ellip_harm_2',
'ellip_normal',
'lambertw',
'spherical_jn',
'spherical_yn',
'spherical_in',
'spherical_kn',
]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| bsd-3-clause | b4fc7f8f8028f510f0c4ea7fde366988 | 39.852113 | 104 | 0.678124 | 3.753721 | false | false | false | false |
ellmetha/django-machina | machina/apps/forum/forms.py | 1 | 4255 | """
Forum forms
===========
This module defines forms provided by the ``forum`` application.
"""
from django import forms
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.forms.forms import NON_FIELD_ERRORS
from django.utils.translation import gettext_lazy as _
from mptt.forms import TreeNodeChoiceField
from machina.core.db.models import get_model
from machina.core.loading import get_class
Forum = get_model('forum', 'Forum')
GroupForumPermission = get_model('forum_permission', 'GroupForumPermission')
UserForumPermission = get_model('forum_permission', 'UserForumPermission')
PermissionConfig = get_class('forum_permission.defaults', 'PermissionConfig')
class PickUserForm(forms.Form):
""" Form allowing to pick a user to edit their permissions. """
user = UserForumPermission._meta.get_field('user').formfield()
anonymous_user = forms.BooleanField(
label=_('Anonymous'),
initial=False,
help_text=_(
'Please select this option if you want to edit the permissions of the anonymous user'
),
)
authenticated_user = forms.BooleanField(
label=_('Authenticated'),
initial=False,
help_text=_(
'Please select this option if you want to edit the permissions of every ' +
'(non-specific) logged in user'
),
)
def __init__(self, *args, **kwargs):
admin_site = kwargs.pop('admin_site')
super().__init__(*args, **kwargs)
self.fields['user'].required = False
self.fields['user'].widget = ForeignKeyRawIdWidget(
UserForumPermission._meta.get_field('user').remote_field, admin_site,
)
self.fields['anonymous_user'].required = False
self.fields['authenticated_user'].required = False
def clean(self):
cleaned_data = super().clean()
user = cleaned_data.get('user', None)
anonymous_user = cleaned_data.get('anonymous_user', None)
authed_user = cleaned_data.get('authenticated_user', None)
if (user and anonymous_user) or (user and authed_user) or (anonymous_user and authed_user):
self._errors[NON_FIELD_ERRORS] = self.error_class([
_(
'Choose either a user ID or check either the anonymous or ' +
'authenticated user checkbox'
),
],)
return cleaned_data
class PickGroupForm(forms.Form):
""" Form allowing to pick a group to edit its permissions. """
group = GroupForumPermission._meta.get_field('group').formfield()
def __init__(self, *args, **kwargs):
admin_site = kwargs.pop('admin_site')
super().__init__(*args, **kwargs)
self.fields['group'].required = False
self.fields['group'].widget = ForeignKeyRawIdWidget(
GroupForumPermission._meta.get_field('group').remote_field, admin_site,
)
class PickForumForm(forms.Form):
""" Form allowing to pick a specific forum. """
forum = TreeNodeChoiceField(queryset=Forum.objects.all(), required=False)
class PermissionsForm(forms.Form):
""" Form allowing to edit permissions. """
PERM_GRANTED = 'granted'
PERM_NOT_GRANTED = 'not-granted'
PERM_NOT_SET = 'not-set'
def __init__(self, *args, **kwargs):
self.permissions_dict = kwargs.pop('permissions_dict', {})
super().__init__(*args, **kwargs)
# Initializes permission fields
f_choices = (
(self.PERM_NOT_SET, _('Not set')),
(self.PERM_GRANTED, _('Granted')),
(self.PERM_NOT_GRANTED, _('Not granted')),
)
for scope in PermissionConfig.scopes:
codenames = [
x['codename'] for x in PermissionConfig.permissions if x['scope'] == scope
]
permissions = filter(lambda v: v[0] in codenames, self.permissions_dict.items())
for codename, p in permissions:
self.fields[codename] = forms.ChoiceField(
label=p[0].name, choices=f_choices, required=False, widget=forms.RadioSelect,
)
self.fields[codename].initial = p[1]
self.fields[codename].scope = scope
| bsd-3-clause | 31d83b685042532b2e72e600bc0f6983 | 34.165289 | 99 | 0.617861 | 4.107143 | false | false | false | false |
ellmetha/django-machina | machina/apps/forum_conversation/migrations/0013_auto_20201220_1745.py | 1 | 1043 | # Generated by Django 3.1.2 on 2020-12-20 22:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forum_conversation', '0012_auto_20200423_1049'),
]
operations = [
migrations.AlterField(
model_name='topic',
name='created',
field=models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Creation date'),
),
migrations.AlterField(
model_name='topic',
name='last_post_on',
field=models.DateTimeField(blank=True, db_index=True, null=True, verbose_name='Last post added on'),
),
migrations.AlterField(
model_name='topic',
name='updated',
field=models.DateTimeField(auto_now=True, db_index=True, verbose_name='Update date'),
),
migrations.AddIndex(
model_name='topic',
index=models.Index(fields=['type', 'last_post_on'], name='forum_conve_type_cc96d0_idx'),
),
]
| bsd-3-clause | 6897f7e0135b211ee996637f5d128f50 | 31.59375 | 112 | 0.584851 | 3.965779 | false | false | false | false |
ellmetha/django-machina | machina/templatetags/forum_tags.py | 1 | 2609 | from django import template
from django.utils.safestring import mark_safe
from machina.core.db.models import get_model
from machina.core.loading import get_class
Forum = get_model('forum', 'Forum')
PermissionHandler = get_class('forum_permission.handler', 'PermissionHandler')
TrackingHandler = get_class('forum_tracking.handler', 'TrackingHandler')
register = template.Library()
class RecurseTreeForumVisibilityContentNode(template.Node):
def __init__(self, template_nodes, forums_contents_var):
self.template_nodes = template_nodes
self.forums_contents_var = forums_contents_var
def _render_node(self, context, node):
bits = []
context.push()
for child in node.children:
bits.append(self._render_node(context, child))
context['node'] = node
context['children'] = mark_safe(''.join(bits))
rendered = self.template_nodes.render(context)
context.pop()
return rendered
def render(self, context):
forums_contents = self.forums_contents_var.resolve(context)
roots = forums_contents.top_nodes
bits = [self._render_node(context, node) for node in roots]
return ''.join(bits)
@register.tag
def recurseforumcontents(parser, token):
""" Iterates over the content nodes and renders the contained forum block for each node. """
bits = token.contents.split()
forums_contents_var = template.Variable(bits[1])
template_nodes = parser.parse(('endrecurseforumcontents',))
parser.delete_first_token()
return RecurseTreeForumVisibilityContentNode(template_nodes, forums_contents_var)
@register.inclusion_tag('forum/forum_list.html', takes_context=True)
def forum_list(context, forum_visibility_contents):
""" Renders the considered forum list.
This will render the given list of forums by respecting the order and the depth of each
forum in the forums tree.
Usage::
{% forum_list my_forums %}
"""
request = context.get('request')
tracking_handler = TrackingHandler(request=request)
data_dict = {
'forum_contents': forum_visibility_contents,
'unread_forums': tracking_handler.get_unread_forums_from_list(
request.user, forum_visibility_contents.forums),
'user': request.user,
'request': request,
}
root_level = forum_visibility_contents.root_level
if root_level is not None:
data_dict['root_level'] = root_level
data_dict['root_level_middle'] = root_level + 1
data_dict['root_level_sub'] = root_level + 2
return data_dict
| bsd-3-clause | abb13174a37ccca560d045c15a8e70b4 | 31.6125 | 96 | 0.68187 | 3.859467 | false | false | false | false |
ellmetha/django-machina | machina/apps/forum_tracking/urls.py | 1 | 1597 | """
Forum tracking URLs
===================
This module defines URL patterns associated with the django-machina's ``forum_tracking``
application.
"""
from django.urls import path
from machina.core.loading import get_class
from machina.core.urls import URLPatternsFactory
class ForumTrackingURLPatternsFactory(URLPatternsFactory):
""" Allows to generate the URL patterns of the ``forum_search`` application. """
app_namespace = 'forum_tracking'
mark_forums_read_view = get_class('forum_tracking.views', 'MarkForumsReadView')
mark_topics_read_view = get_class('forum_tracking.views', 'MarkTopicsReadView')
unread_topics_view = get_class('forum_tracking.views', 'UnreadTopicsView')
def get_urlpatterns(self):
""" Returns the URL patterns managed by the considered factory / application. """
return [
path(
'mark/forums/',
self.mark_forums_read_view.as_view(),
name='mark_all_forums_read',
),
path(
'mark/forums/<int:pk>/',
self.mark_forums_read_view.as_view(),
name='mark_subforums_read',
),
path(
'mark/forum/<int:pk>/topics/',
self.mark_topics_read_view.as_view(),
name='mark_topics_read',
),
path(
'unread-topics/',
self.unread_topics_view.as_view(),
name='unread_topics',
),
]
urlpatterns_factory = ForumTrackingURLPatternsFactory()
| bsd-3-clause | 93cad08ef959ef1be6384d73cc48623b | 30.313725 | 92 | 0.574202 | 4.148052 | false | false | false | false |
statsmodels/statsmodels | statsmodels/multivariate/factor_rotation/_analytic_rotation.py | 6 | 4008 | # -*- coding: utf-8 -*-
"""
This file contains analytic implementations of rotation methods.
"""
import numpy as np
import scipy as sp
def target_rotation(A, H, full_rank=False):
r"""
Analytically performs orthogonal rotations towards a target matrix,
i.e., we minimize:
.. math::
\phi(L) =\frac{1}{2}\|AT-H\|^2.
where :math:`T` is an orthogonal matrix. This problem is also known as
an orthogonal Procrustes problem.
Under the assumption that :math:`A^*H` has full rank, the analytical
solution :math:`T` is given by:
.. math::
T = (A^*HH^*A)^{-\frac{1}{2}}A^*H,
see Green (1952). In other cases the solution is given by :math:`T = UV`,
where :math:`U` and :math:`V` result from the singular value decomposition
of :math:`A^*H`:
.. math::
A^*H = U\Sigma V,
see Schonemann (1966).
Parameters
----------
A : numpy matrix (default None)
non rotated factors
H : numpy matrix
target matrix
full_rank : bool (default FAlse)
if set to true full rank is assumed
Returns
-------
The matrix :math:`T`.
References
----------
[1] Green (1952, Psychometrika) - The orthogonal approximation of an
oblique structure in factor analysis
[2] Schonemann (1966) - A generalized solution of the orthogonal
procrustes problem
[3] Gower, Dijksterhuis (2004) - Procrustes problems
"""
ATH = A.T.dot(H)
if full_rank or np.linalg.matrix_rank(ATH) == A.shape[1]:
T = sp.linalg.fractional_matrix_power(ATH.dot(ATH.T), -1/2).dot(ATH)
else:
U, D, V = np.linalg.svd(ATH, full_matrices=False)
T = U.dot(V)
return T
def procrustes(A, H):
r"""
Analytically solves the following Procrustes problem:
.. math::
\phi(L) =\frac{1}{2}\|AT-H\|^2.
(With no further conditions on :math:`H`)
Under the assumption that :math:`A^*H` has full rank, the analytical
solution :math:`T` is given by:
.. math::
T = (A^*HH^*A)^{-\frac{1}{2}}A^*H,
see Navarra, Simoncini (2010).
Parameters
----------
A : numpy matrix
non rotated factors
H : numpy matrix
target matrix
full_rank : bool (default False)
if set to true full rank is assumed
Returns
-------
The matrix :math:`T`.
References
----------
[1] Navarra, Simoncini (2010) - A guide to empirical orthogonal functions
for climate data analysis
"""
return np.linalg.inv(A.T.dot(A)).dot(A.T).dot(H)
def promax(A, k=2):
r"""
Performs promax rotation of the matrix :math:`A`.
This method was not very clear to me from the literature, this
implementation is as I understand it should work.
Promax rotation is performed in the following steps:
* Determine varimax rotated patterns :math:`V`.
* Construct a rotation target matrix :math:`|V_{ij}|^k/V_{ij}`
* Perform procrustes rotation towards the target to obtain T
* Determine the patterns
First, varimax rotation a target matrix :math:`H` is determined with
orthogonal varimax rotation.
Then, oblique target rotation is performed towards the target.
Parameters
----------
A : numpy matrix
non rotated factors
k : float
parameter, should be positive
References
----------
[1] Browne (2001) - An overview of analytic rotation in exploratory
factor analysis
[2] Navarra, Simoncini (2010) - A guide to empirical orthogonal functions
for climate data analysis
"""
assert k > 0
# define rotation target using varimax rotation:
from ._wrappers import rotate_factors
V, T = rotate_factors(A, 'varimax')
H = np.abs(V)**k/V
# solve procrustes problem
S = procrustes(A, H) # np.linalg.inv(A.T.dot(A)).dot(A.T).dot(H);
# normalize
d = np.sqrt(np.diag(np.linalg.inv(S.T.dot(S))))
D = np.diag(d)
T = np.linalg.inv(S.dot(D)).T
return A.dot(T), T
| bsd-3-clause | 6896d2ff22df150682db05e762d13281 | 25.196078 | 78 | 0.612525 | 3.458154 | false | false | false | false |
statsmodels/statsmodels | statsmodels/discrete/tests/results/results_predict.py | 3 | 11609 | """This file has been manually edited based on the generated results
edits
- rearrange Bunch
- use DataFrame
note seond `_cons` in params_table rownames is lnalpha
"""
# flake8: noqa
import numpy as np
import pandas as pd
from statsmodels.tools.testing import ParamsTableTestBunch
est = dict(
rank = 9,
N = 3629,
ic = 4,
k = 9,
k_eq = 2,
k_dv = 1,
converged = 1,
rc = 0,
k_autoCns = 0,
ll = -10404.95565541838,
k_eq_model = 1,
ll_0 = -10786.68925314471,
rank0 = 2,
df_m = 7,
chi2 = 763.467195452653,
p = 1.4153888670e-160,
ll_c = -14287.94887436967,
chi2_c = 7765.986437902575,
r2_p = .0353893200005773,
k_aux = 1,
alpha = .6166738507905131,
cmdline = "nbreg docvis private medicaid aget aget2 educyr actlim totchr",
cmd = "nbreg",
predict = "nbreg_p",
dispers = "mean",
diparm_opt2 = "noprob",
chi2_ct = "LR",
chi2type = "LR",
opt = "moptimize",
vce = "oim",
title = "Negative binomial regression",
diparm1 = "lnalpha, exp label(",
user = "nbreg_lf",
crittype = "log likelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "docvis",
properties = "b V",
)
params_table = np.array([
.18528179233626, .03348067897193, 5.5339914848088, 3.130241768e-08,
.11966086737334, .25090271729919, np.nan, 1.9599639845401,
0, .08475784499449, .04718372808048, 1.7963363312438,
.07244104305261, -.00772056269958, .17723625268856, np.nan,
1.9599639845401, 0, .22409326577213, .04170620298531,
5.3731399583668, 7.737722210e-08, .14235060998901, .30583592155526,
np.nan, 1.9599639845401, 0, -.04817183015548,
.00992361535076, -4.8542621265318, 1.208358166e-06, -.06762175883941,
-.02872190147156, np.nan, 1.9599639845401, 0,
.02692548760568, .00419162167105, 6.4236445267994, 1.330497007e-10,
.01871006009359, .03514091511776, np.nan, 1.9599639845401,
0, .17048038202011, .03448967943245, 4.9429390132204,
7.695356233e-07, .10288185249418, .23807891154605, np.nan,
1.9599639845401, 0, .27516170294682, .01205852749453,
22.818847746673, 2.98049648e-115, .25152742335095, .29879598254269,
np.nan, 1.9599639845401, 0, .67840343342789,
.0664120899438, 10.21505924602, 1.697754022e-24, .54823812900001,
.80856873785576, np.nan, 1.9599639845401, 0,
-.48341499971517, .03134835693943, -15.420744399751, 1.187278967e-53,
-.54485665029097, -.42197334913938, np.nan, 1.9599639845401,
0]).reshape(9,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'private medicaid aget aget2 educyr actlim totchr _cons _cons'.split()
# results for
# margins , predict(n) predict(pr(0)) predict(pr(1)) predict(pr(0, 1)) predict(pr(2, .)) atmeans
table = np.array([
6.1604164491362, .09102737953925, 67.676521946673, 0,
5.9820060636322, 6.3388268346402, np.nan, 1.9599639845401,
0, .07860475517176, .00344783069748, 22.798322211469,
4.76427218e-115, .07184713117991, .08536237916362, np.nan,
1.9599639845401, 0, .10090462231979, .00218578691875,
46.16397941361, 0, .09662055868115, .10518868595842,
np.nan, 1.9599639845401, 0, .17950937749155,
.00553924697666, 32.406819599838, 2.20005624e-230, .16865265291582,
.19036610206727, np.nan, 1.9599639845401, 0,
.82049062250845, .00553924699078, 148.12313368113, 0,
.80963389790505, .83134734711186, np.nan, 1.9599639845401,
0]).reshape(5,9)
table_colnames = 'b se z pvalue ll ul df crit eform'.split()
table_rownames = '1bn._predict 2._predict 3._predict 4._predict 5._predict'.split()
dframe_atmeans = pd.DataFrame(table, index=table_rownames, columns=table_colnames)
# result for
# margins, predict(n) predict(pr(0)) predict(pr(1)) predict(pr(0, 1)) predict(pr(2, .))
table = np.array([
6.8071952338104, .10838829819462, 62.803783685096, 0,
6.5947580730033, 7.0196323946174, np.nan, 1.9599639845401,
0, .08826646029161, .00350687276409, 25.169564517851,
8.63155623e-140, .08139311597563, .09513980460758, np.nan,
1.9599639845401, 0, .10719978561286, .00205026104517,
52.285920305334, 0, .10318134780543, .1112182234203,
np.nan, 1.9599639845401, 0, .19546624590447,
.0054522133947, 35.850806223874, 1.78661674e-281, .18478010401484,
.2061523877941, np.nan, 1.9599639845401, 0,
.80453375409553, .00545221340471, 147.56094348787, 0,
.79384761218628, .81521989600478, np.nan, 1.9599639845401,
0]).reshape(5,9)
table_colnames = 'b se z pvalue ll ul df crit eform'.split()
table_rownames = '1bn._predict 2._predict 3._predict 4._predict 5._predict'.split()
dframe_mean = pd.DataFrame(table, index=table_rownames, columns=table_colnames)
results_nb_docvis = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
results_margins_atmeans=dframe_atmeans,
results_margins_mean=dframe_mean,
**est,
)
# ############################# ZINBP
est = dict(
rank = 11,
N = 3629,
ic = 8,
k = 11,
k_eq = 3,
k_dv = 1,
converged = 1,
rc = 0,
k_autoCns = 0,
ll = -10404.95308201019,
k_eq_model = 1,
ll_0 = -10775.51516555833,
chi2 = 741.1241670962918,
p = 9.2654212845e-153,
N_zero = 392,
df_m = 7,
df_c = 2,
k_aux = 1,
cmdline = "zinb docvis private medicaid aget aget2 educyr actlim totchr, inflate(aget)",
cmd = "zinb",
predict = "zip_p",
inflate = "logit",
chi2type = "LR",
opt = "moptimize",
vce = "oim",
title = "Zero-inflated negative binomial regression",
diparm1 = "lnalpha, exp label(",
user = "zinb_llf",
crittype = "log likelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "docvis",
properties = "b V",
)
params_table = np.array([
.18517571292817, .03350948180038, 5.5260691296648, 3.274851365e-08,
.11949833545881, .25085309039752, np.nan, 1.9599639845401,
0, .08473133853831, .04717665613525, 1.7960437529823,
.07248755882811, -.00773320839781, .17719588547443, np.nan,
1.9599639845401, 0, .22335574980273, .04293022169984,
5.2027625518539, 1.963476761e-07, .13921406142272, .30749743818274,
np.nan, 1.9599639845401, 0, -.04804896097964,
.01006690700638, -4.7729616404713, 1.815363776e-06, -.06777973614785,
-.02831818581142, np.nan, 1.9599639845401, 0,
.0269244937276, .00419096037609, 6.4244209707123, 1.323724094e-10,
.01871036232982, .03513862512538, np.nan, 1.9599639845401,
0, .17042579343453, .03449014549225, 4.9412894901473,
7.760757819e-07, .10282635044816, .23802523642089, np.nan,
1.9599639845401, 0, .27500074932161, .01226558007071,
22.420525383741, 2.48238139e-111, .25096065413353, .29904084450969,
np.nan, 1.9599639845401, 0, .67986743798706,
.06944204778986, 9.7904289925953, 1.237696321e-22, .54376352530622,
.81597135066789, np.nan, 1.9599639845401, 0,
-1.2833474076485, 3.692336844421, -.34757051204241, .72816275506989,
-8.520194641504, 5.9534998262071, np.nan, 1.9599639845401,
0, -6.5587800419911, 13.305282477745, -.49294556902205,
.62205104781253, -32.636654502503, 19.519094418521, np.nan,
1.9599639845401, 0, -.4845756474516, .03531398529193,
-13.721919048382, 7.505227546e-43, -.55378978677435, -.41536150812884,
np.nan, 1.9599639845401, 0]).reshape(11,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'private medicaid aget aget2 educyr actlim totchr _cons aget _cons _cons'.split()
# results for
# margins , predict(n) predict(pr(0)) predict(pr(1)) predict(pr(0, 1)) predict(pr(2, .)) atmeans
table = np.array([
6.1616899436815, .09285785618544, 66.356151184199, 0,
5.9796918898764, 6.3436879974865, np.nan, 1.9599639845401,
0, .07857785668717, .00351221423708, 22.372740209725,
7.25412664e-111, .0716940432765, .08546167009785, np.nan,
1.9599639845401, 0, .10079961393875, .00263347068017,
38.276338027191, 0, .09563810625128, .10596112162622,
np.nan, 1.9599639845401, 0, .17937747062593,
.00586287199331, 30.595494977635, 1.40505722e-205, .16788645267307,
.19086848857879, np.nan, 1.9599639845401, 0,
.82062252937407, .00586287199668, 139.96937505016, 0,
.80913151141461, .83211354733353, np.nan, 1.9599639845401,
0]).reshape(5,9)
table_colnames = 'b se z pvalue ll ul df crit eform'.split()
table_rownames = '1bn._predict 2._predict 3._predict 4._predict 5._predict'.split()
dframe_atmeans = pd.DataFrame(table, index=table_rownames, columns=table_colnames)
# result for
# margins, predict(n) predict(pr(0)) predict(pr(1)) predict(pr(0, 1)) predict(pr(2, .))
table = np.array([
6.8063733751586, .10879833124057, 62.559538345387, 0,
6.593132564349, 7.0196141859682, np.nan, 1.9599639845401,
0, .08842743693234, .00405939469823, 21.7834045482,
3.33356305e-105, .08047116952478, .0963837043399, np.nan,
1.9599639845401, 0, .10706809868425, .00273617889716,
39.130518401155, 0, .10170528659055, .11243091077794,
np.nan, 1.9599639845401, 0, .19549553561658,
.00545764150876, 35.820516115406, 5.29431574e-281, .18479875481889,
.20619231641428, np.nan, 1.9599639845401, 0,
.80450446438342, .0054576415014, 147.40881462742, 0,
.79380768360013, .8152012451667, np.nan, 1.9599639845401,
0]).reshape(5,9)
table_colnames = 'b se z pvalue ll ul df crit eform'.split()
table_rownames = '1bn._predict 2._predict 3._predict 4._predict 5._predict'.split()
dframe_mean = pd.DataFrame(table, index=table_rownames, columns=table_colnames)
results_zinb_docvis = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
results_margins_atmeans=dframe_atmeans,
results_margins_mean=dframe_mean,
**est,
)
| bsd-3-clause | 4e43b83b287066ce2cd555c7bc3862e8 | 44.885375 | 105 | 0.596692 | 2.599418 | false | false | true | false |
statsmodels/statsmodels | statsmodels/examples/es_misc_poisson2.py | 3 | 1895 |
import numpy as np
import statsmodels.api as sm
from statsmodels.miscmodels.count import (PoissonGMLE, PoissonOffsetGMLE,
PoissonZiGMLE)
from statsmodels.discrete.discrete_model import Poisson
DEC = 3
class Dummy:
pass
self = Dummy()
# generate artificial data
np.random.seed(98765678)
nobs = 200
rvs = np.random.randn(nobs,6)
data_exog = rvs
data_exog = sm.add_constant(data_exog, prepend=False)
xbeta = 1 + 0.1*rvs.sum(1)
data_endog = np.random.poisson(np.exp(xbeta))
#estimate discretemod.Poisson as benchmark
res_discrete = Poisson(data_endog, data_exog).fit()
mod_glm = sm.GLM(data_endog, data_exog, family=sm.families.Poisson())
res_glm = mod_glm.fit()
#estimate generic MLE
self.mod = PoissonGMLE(data_endog, data_exog)
res = self.mod.fit()
offset = res.params[0] * data_exog[:,0] #1d ???
mod1 = PoissonOffsetGMLE(data_endog, data_exog[:,1:], offset=offset)
start_params = np.ones(6)/2.
start_params = res.params[1:]
res1 = mod1.fit(start_params=start_params, method='nm', maxiter=1000, maxfun=1000)
print('mod2')
mod2 = PoissonZiGMLE(data_endog, data_exog[:,1:], offset=offset)
start_params = np.r_[np.ones(6)/2.,10]
start_params = np.r_[res.params[1:], 20.] #-100]
res2 = mod2.fit(start_params=start_params, method='bfgs', maxiter=1000, maxfun=2000)
print('mod3')
mod3 = PoissonZiGMLE(data_endog, data_exog, offset=None)
start_params = np.r_[np.ones(7)/2.,10]
start_params = np.r_[res.params, 20.]
res3 = mod3.fit(start_params=start_params, method='nm', maxiter=1000, maxfun=2000)
print('mod4')
data_endog2 = np.r_[data_endog, np.zeros(nobs)]
data_exog2 = np.r_[data_exog, data_exog]
mod4 = PoissonZiGMLE(data_endog2, data_exog2, offset=None)
start_params = np.r_[np.ones(7)/2.,10]
start_params = np.r_[res.params, 0.]
res4 = mod4.fit(start_params=start_params, method='nm', maxiter=1000, maxfun=1000)
print(res4.summary())
| bsd-3-clause | 8f404d71db6c769c1456eb324d84c72d | 29.564516 | 84 | 0.701319 | 2.654062 | false | false | false | false |
statsmodels/statsmodels | statsmodels/datasets/sunspots/data.py | 3 | 1535 | """Yearly sunspots data 1700-2008"""
from statsmodels.datasets import utils as du
__docformat__ = 'restructuredtext'
COPYRIGHT = """This data is public domain."""
TITLE = __doc__
SOURCE = """
http://www.ngdc.noaa.gov/stp/solar/solarda3.html
The original dataset contains monthly data on sunspot activity in the file
./src/sunspots_yearly.dat. There is also sunspots_monthly.dat.
"""
DESCRSHORT = """Yearly (1700-2008) data on sunspots from the National
Geophysical Data Center."""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of Observations - 309 (Annual 1700 - 2008)
Number of Variables - 1
Variable name definitions::
SUNACTIVITY - Number of sunspots for each year
The data file contains a 'YEAR' variable that is not returned by load.
"""
def load_pandas():
data = _get_data()
# TODO: time series
endog = data.set_index(data.YEAR).SUNACTIVITY
dataset = du.Dataset(data=data, names=list(data.columns),
endog=endog, endog_name='volume')
return dataset
def load():
"""
Load the yearly sunspot data and returns a data class.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
Notes
-----
This dataset only contains data for one variable, so the attributes
data, raw_data, and endog are all the same variable. There is no exog
attribute defined.
"""
return load_pandas()
def _get_data():
return du.load_csv(__file__, 'sunspots.csv').astype(float)
| bsd-3-clause | 7da306b3b37e490f838a60526e72dad2 | 24.583333 | 74 | 0.657329 | 3.586449 | false | false | false | false |
statsmodels/statsmodels | statsmodels/datasets/fair/data.py | 3 | 2491 | """Fair's Extramarital Affairs Data"""
from statsmodels.datasets import utils as du
__docformat__ = 'restructuredtext'
COPYRIGHT = """Included with permission of the author."""
TITLE = """Affairs dataset"""
SOURCE = """
Fair, Ray. 1978. "A Theory of Extramarital Affairs," `Journal of Political
Economy`, February, 45-61.
The data is available at http://fairmodel.econ.yale.edu/rayfair/pdf/2011b.htm
"""
DESCRSHORT = """Extramarital affair data."""
DESCRLONG = """Extramarital affair data used to explain the allocation
of an individual's time among work, time spent with a spouse, and time
spent with a paramour. The data is used as an example of regression
with censored data."""
#suggested notes
NOTE = """::
Number of observations: 6366
Number of variables: 9
Variable name definitions:
rate_marriage : How rate marriage, 1 = very poor, 2 = poor, 3 = fair,
4 = good, 5 = very good
age : Age
yrs_married : No. years married. Interval approximations. See
original paper for detailed explanation.
children : No. children
religious : How relgious, 1 = not, 2 = mildly, 3 = fairly,
4 = strongly
educ : Level of education, 9 = grade school, 12 = high
school, 14 = some college, 16 = college graduate,
17 = some graduate school, 20 = advanced degree
occupation : 1 = student, 2 = farming, agriculture; semi-skilled,
or unskilled worker; 3 = white-colloar; 4 = teacher
counselor social worker, nurse; artist, writers;
technician, skilled worker, 5 = managerial,
administrative, business, 6 = professional with
advanced degree
occupation_husb : Husband's occupation. Same as occupation.
affairs : measure of time spent in extramarital affairs
See the original paper for more details.
"""
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
return load_pandas()
def load_pandas():
data = _get_data()
return du.process_pandas(data, endog_idx=8, exog_idx=None)
def _get_data():
return du.load_csv(__file__, 'fair.csv', convert_float=True)
| bsd-3-clause | c27e0b5961642d1dcbc761fc3608ff5e | 34.084507 | 79 | 0.600161 | 3.838213 | false | false | false | false |
statsmodels/statsmodels | statsmodels/stats/meta_analysis.py | 3 | 26580 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 2 14:34:25 2020
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
import pandas as pd
from scipy import stats
from statsmodels.stats.base import HolderTuple
class CombineResults:
"""Results from combined estimate of means or effect sizes
This currently includes intermediate results that might be removed
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
self._ini_keys = list(kwds.keys())
self.df_resid = self.k - 1
# TODO: move to property ?
self.sd_eff_w_fe_hksj = np.sqrt(self.var_hksj_fe)
self.sd_eff_w_re_hksj = np.sqrt(self.var_hksj_re)
# explained variance measures
self.h2 = self.q / (self.k - 1)
self.i2 = 1 - 1 / self.h2
# memoize ci_samples
self.cache_ci = {}
def conf_int_samples(self, alpha=0.05, use_t=None, nobs=None,
ci_func=None):
"""confidence intervals for the effect size estimate of samples
Additional information needs to be provided for confidence intervals
that are not based on normal distribution using available variance.
This is likely to change in future.
Parameters
----------
alpha : float in (0, 1)
Significance level for confidence interval. Nominal coverage is
``1 - alpha``.
use_t : None or bool
If use_t is None, then the attribute `use_t` determines whether
normal or t-distribution is used for confidence intervals.
Specifying use_t overrides the attribute.
If use_t is false, then confidence intervals are based on the
normal distribution. If it is true, then the t-distribution is
used.
nobs : None or float
Number of observations used for degrees of freedom computation.
Only used if use_t is true.
ci_func : None or callable
User provided function to compute confidence intervals.
This is not used yet and will allow using non-standard confidence
intervals.
Returns
-------
ci_eff : tuple of ndarrays
Tuple (ci_low, ci_upp) with confidence interval computed for each
sample.
Notes
-----
CombineResults currently only has information from the combine_effects
function, which does not provide details about individual samples.
"""
# this is a bit messy, we don't have enough information about
# computing conf_int already in results for other than normal
# TODO: maybe there is a better
if (alpha, use_t) in self.cache_ci:
return self.cache_ci[(alpha, use_t)]
if use_t is None:
use_t = self.use_t
if ci_func is not None:
kwds = {"use_t": use_t} if use_t is not None else {}
ci_eff = ci_func(alpha=alpha, **kwds)
self.ci_sample_distr = "ci_func"
else:
if use_t is False:
crit = stats.norm.isf(alpha / 2)
self.ci_sample_distr = "normal"
else:
if nobs is not None:
df_resid = nobs - 1
crit = stats.t.isf(alpha / 2, df_resid)
self.ci_sample_distr = "t"
else:
msg = ("`use_t=True` requires `nobs` for each sample "
"or `ci_func`. Using normal distribution for "
"confidence interval of individual samples.")
import warnings
warnings.warn(msg)
crit = stats.norm.isf(alpha / 2)
self.ci_sample_distr = "normal"
# sgn = np.asarray([-1, 1])
# ci_eff = self.eff + sgn * crit * self.sd_eff
ci_low = self.eff - crit * self.sd_eff
ci_upp = self.eff + crit * self.sd_eff
ci_eff = (ci_low, ci_upp)
# if (alpha, use_t) not in self.cache_ci: # not needed
self.cache_ci[(alpha, use_t)] = ci_eff
return ci_eff
def conf_int(self, alpha=0.05, use_t=None):
"""confidence interval for the overall mean estimate
Parameters
----------
alpha : float in (0, 1)
Significance level for confidence interval. Nominal coverage is
``1 - alpha``.
use_t : None or bool
If use_t is None, then the attribute `use_t` determines whether
normal or t-distribution is used for confidence intervals.
Specifying use_t overrides the attribute.
If use_t is false, then confidence intervals are based on the
normal distribution. If it is true, then the t-distribution is
used.
Returns
-------
ci_eff_fe : tuple of floats
Confidence interval for mean effects size based on fixed effects
model with scale=1.
ci_eff_re : tuple of floats
Confidence interval for mean effects size based on random effects
model with scale=1
ci_eff_fe_wls : tuple of floats
Confidence interval for mean effects size based on fixed effects
model with estimated scale corresponding to WLS, ie. HKSJ.
ci_eff_re_wls : tuple of floats
Confidence interval for mean effects size based on random effects
model with estimated scale corresponding to WLS, ie. HKSJ.
If random effects method is fully iterated, i.e. Paule-Mandel, then
the estimated scale is 1.
"""
if use_t is None:
use_t = self.use_t
if use_t is False:
crit = stats.norm.isf(alpha / 2)
else:
crit = stats.t.isf(alpha / 2, self.df_resid)
sgn = np.asarray([-1, 1])
m_fe = self.mean_effect_fe
m_re = self.mean_effect_re
ci_eff_fe = m_fe + sgn * crit * self.sd_eff_w_fe
ci_eff_re = m_re + sgn * crit * self.sd_eff_w_re
ci_eff_fe_wls = m_fe + sgn * crit * np.sqrt(self.var_hksj_fe)
ci_eff_re_wls = m_re + sgn * crit * np.sqrt(self.var_hksj_re)
return ci_eff_fe, ci_eff_re, ci_eff_fe_wls, ci_eff_re_wls
def test_homogeneity(self):
"""Test whether the means of all samples are the same
currently no options, test uses chisquare distribution
default might change depending on `use_t`
Returns
-------
res : HolderTuple instance
The results include the following attributes:
- statistic : float
Test statistic, ``q`` in meta-analysis, this is the
pearson_chi2 statistic for the fixed effects model.
- pvalue : float
P-value based on chisquare distribution.
- df : float
Degrees of freedom, equal to number of studies or samples
minus 1.
"""
pvalue = stats.chi2.sf(self.q, self.k - 1)
res = HolderTuple(statistic=self.q,
pvalue=pvalue,
df=self.k - 1,
distr="chi2")
return res
def summary_array(self, alpha=0.05, use_t=None):
"""Create array with sample statistics and mean estimates
Parameters
----------
alpha : float in (0, 1)
Significance level for confidence interval. Nominal coverage is
``1 - alpha``.
use_t : None or bool
If use_t is None, then the attribute `use_t` determines whether
normal or t-distribution is used for confidence intervals.
Specifying use_t overrides the attribute.
If use_t is false, then confidence intervals are based on the
normal distribution. If it is true, then the t-distribution is
used.
Returns
-------
res : ndarray
Array with columns
['eff', "sd_eff", "ci_low", "ci_upp", "w_fe","w_re"].
Rows include statistics for samples and estimates of overall mean.
column_names : list of str
The names for the columns, used when creating summary DataFrame.
"""
ci_low, ci_upp = self.conf_int_samples(alpha=alpha, use_t=use_t)
res = np.column_stack([self.eff, self.sd_eff,
ci_low, ci_upp,
self.weights_rel_fe, self.weights_rel_re])
ci = self.conf_int(alpha=alpha, use_t=use_t)
res_fe = [[self.mean_effect_fe, self.sd_eff_w_fe,
ci[0][0], ci[0][1], 1, np.nan]]
res_re = [[self.mean_effect_re, self.sd_eff_w_re,
ci[1][0], ci[1][1], np.nan, 1]]
res_fe_wls = [[self.mean_effect_fe, self.sd_eff_w_fe_hksj,
ci[2][0], ci[2][1], 1, np.nan]]
res_re_wls = [[self.mean_effect_re, self.sd_eff_w_re_hksj,
ci[3][0], ci[3][1], np.nan, 1]]
res = np.concatenate([res, res_fe, res_re, res_fe_wls, res_re_wls],
axis=0)
column_names = ['eff', "sd_eff", "ci_low", "ci_upp", "w_fe", "w_re"]
return res, column_names
def summary_frame(self, alpha=0.05, use_t=None):
"""Create DataFrame with sample statistics and mean estimates
Parameters
----------
alpha : float in (0, 1)
Significance level for confidence interval. Nominal coverage is
``1 - alpha``.
use_t : None or bool
If use_t is None, then the attribute `use_t` determines whether
normal or t-distribution is used for confidence intervals.
Specifying use_t overrides the attribute.
If use_t is false, then confidence intervals are based on the
normal distribution. If it is true, then the t-distribution is
used.
Returns
-------
res : DataFrame
pandas DataFrame instance with columns
['eff', "sd_eff", "ci_low", "ci_upp", "w_fe","w_re"].
Rows include statistics for samples and estimates of overall mean.
"""
if use_t is None:
use_t = self.use_t
labels = (list(self.row_names) +
["fixed effect", "random effect",
"fixed effect wls", "random effect wls"])
res, col_names = self.summary_array(alpha=alpha, use_t=use_t)
results = pd.DataFrame(res, index=labels, columns=col_names)
return results
def plot_forest(self, alpha=0.05, use_t=None, use_exp=False,
ax=None, **kwds):
"""Forest plot with means and confidence intervals
Parameters
----------
ax : None or matplotlib axis instance
If ax is provided, then the plot will be added to it.
alpha : float in (0, 1)
Significance level for confidence interval. Nominal coverage is
``1 - alpha``.
use_t : None or bool
If use_t is None, then the attribute `use_t` determines whether
normal or t-distribution is used for confidence intervals.
Specifying use_t overrides the attribute.
If use_t is false, then confidence intervals are based on the
normal distribution. If it is true, then the t-distribution is
used.
use_exp : bool
If `use_exp` is True, then the effect size and confidence limits
will be exponentiated. This transform log-odds-ration into
odds-ratio, and similarly for risk-ratio.
ax : AxesSubplot, optional
If given, this axes is used to plot in instead of a new figure
being created.
kwds : optional keyword arguments
Keywords are forwarded to the dot_plot function that creates the
plot.
Returns
-------
fig : Matplotlib figure instance
See Also
--------
dot_plot
"""
from statsmodels.graphics.dotplots import dot_plot
res_df = self.summary_frame(alpha=alpha, use_t=use_t)
if use_exp:
res_df = np.exp(res_df[["eff", "ci_low", "ci_upp"]])
hw = np.abs(res_df[["ci_low", "ci_upp"]] - res_df[["eff"]].values)
fig = dot_plot(points=res_df["eff"], intervals=hw,
lines=res_df.index, line_order=res_df.index, **kwds)
return fig
def effectsize_smd(mean1, sd1, nobs1, mean2, sd2, nobs2):
"""effect sizes for mean difference for use in meta-analysis
mean1, sd1, nobs1 are for treatment
mean2, sd2, nobs2 are for control
Effect sizes are computed for the mean difference ``mean1 - mean2``
standardized by an estimate of the within variance.
This does not have option yet.
It uses standardized mean difference with bias correction as effect size.
This currently does not use np.asarray, all computations are possible in
pandas.
Parameters
----------
mean1 : array
mean of second sample, treatment groups
sd1 : array
standard deviation of residuals in treatment groups, within
nobs1 : array
number of observations in treatment groups
mean2, sd2, nobs2 : arrays
mean, standard deviation and number of observations of control groups
Returns
-------
smd_bc : array
bias corrected estimate of standardized mean difference
var_smdbc : array
estimate of variance of smd_bc
Notes
-----
Status: API will still change. This is currently intended for support of
meta-analysis.
References
----------
Borenstein, Michael. 2009. Introduction to Meta-Analysis.
Chichester: Wiley.
Chen, Ding-Geng, and Karl E. Peace. 2013. Applied Meta-Analysis with R.
Chapman & Hall/CRC Biostatistics Series.
Boca Raton: CRC Press/Taylor & Francis Group.
"""
# TODO: not used yet, design and options ?
# k = len(mean1)
# if row_names is None:
# row_names = list(range(k))
# crit = stats.norm.isf(alpha / 2)
# var_diff_uneq = sd1**2 / nobs1 + sd2**2 / nobs2
var_diff = (sd1**2 * (nobs1 - 1) +
sd2**2 * (nobs2 - 1)) / (nobs1 + nobs2 - 2)
sd_diff = np.sqrt(var_diff)
nobs = nobs1 + nobs2
bias_correction = 1 - 3 / (4 * nobs - 9)
smd = (mean1 - mean2) / sd_diff
smd_bc = bias_correction * smd
var_smdbc = nobs / nobs1 / nobs2 + smd_bc**2 / 2 / (nobs - 3.94)
return smd_bc, var_smdbc
def effectsize_2proportions(count1, nobs1, count2, nobs2, statistic="diff",
zero_correction=None, zero_kwds=None):
"""Effects sizes for two sample binomial proportions
Parameters
----------
count1, nobs1, count2, nobs2 : array_like
data for two samples
statistic : {"diff", "odds-ratio", "risk-ratio", "arcsine"}
statistic for the comparison of two proportions
Effect sizes for "odds-ratio" and "risk-ratio" are in logarithm.
zero_correction : {None, float, "tac", "clip"}
Some statistics are not finite when zero counts are in the data.
The options to remove zeros are:
* float : if zero_correction is a single float, then it will be added
to all count (cells) if the sample has any zeros.
* "tac" : treatment arm continuity correction see Ruecker et al 2009,
section 3.2
* "clip" : clip proportions without adding a value to all cells
The clip bounds can be set with zero_kwds["clip_bounds"]
zero_kwds : dict
additional options to handle zero counts
"clip_bounds" tuple, default (1e-6, 1 - 1e-6) if zero_correction="clip"
other options not yet implemented
Returns
-------
effect size : array
Effect size for each sample.
var_es : array
Estimate of variance of the effect size
Notes
-----
Status: API is experimental, Options for zero handling is incomplete.
The names for ``statistics`` keyword can be shortened to "rd", "rr", "or"
and "as".
The statistics are defined as:
- risk difference = p1 - p2
- log risk ratio = log(p1 / p2)
- log odds_ratio = log(p1 / (1 - p1) * (1 - p2) / p2)
- arcsine-sqrt = arcsin(sqrt(p1)) - arcsin(sqrt(p2))
where p1 and p2 are the estimated proportions in sample 1 (treatment) and
sample 2 (control).
log-odds-ratio and log-risk-ratio can be transformed back to ``or`` and
`rr` using `exp` function.
See Also
--------
statsmodels.stats.contingency_tables
"""
if zero_correction is None:
cc1 = cc2 = 0
elif zero_correction == "tac":
# treatment arm continuity correction Ruecker et al 2009, section 3.2
nobs_t = nobs1 + nobs2
cc1 = nobs2 / nobs_t
cc2 = nobs1 / nobs_t
elif zero_correction == "clip":
clip_bounds = zero_kwds.get("clip_bounds", (1e-6, 1 - 1e-6))
cc1 = cc2 = 0
elif zero_correction:
# TODO: check is float_like
cc1 = cc2 = zero_correction
else:
msg = "zero_correction not recognized or supported"
raise NotImplementedError(msg)
zero_mask1 = (count1 == 0) | (count1 == nobs1)
zero_mask2 = (count2 == 0) | (count2 == nobs2)
zmask = np.logical_or(zero_mask1, zero_mask2)
n1 = nobs1 + (cc1 + cc2) * zmask
n2 = nobs2 + (cc1 + cc2) * zmask
p1 = (count1 + cc1) / (n1)
p2 = (count2 + cc2) / (n2)
if zero_correction == "clip":
p1 = np.clip(p1, *clip_bounds)
p2 = np.clip(p2, *clip_bounds)
if statistic in ["diff", "rd"]:
rd = p1 - p2
rd_var = p1 * (1 - p1) / n1 + p2 * (1 - p2) / n2
eff = rd
var_eff = rd_var
elif statistic in ["risk-ratio", "rr"]:
# rr = p1 / p2
log_rr = np.log(p1) - np.log(p2)
log_rr_var = (1 - p1) / p1 / n1 + (1 - p2) / p2 / n2
eff = log_rr
var_eff = log_rr_var
elif statistic in ["odds-ratio", "or"]:
# or_ = p1 / (1 - p1) * (1 - p2) / p2
log_or = np.log(p1) - np.log(1 - p1) - np.log(p2) + np.log(1 - p2)
log_or_var = 1 / (p1 * (1 - p1) * n1) + 1 / (p2 * (1 - p2) * n2)
eff = log_or
var_eff = log_or_var
elif statistic in ["arcsine", "arcsin", "as"]:
as_ = np.arcsin(np.sqrt(p1)) - np.arcsin(np.sqrt(p2))
as_var = (1 / n1 + 1 / n2) / 4
eff = as_
var_eff = as_var
else:
msg = 'statistic not recognized, use one of "rd", "rr", "or", "as"'
raise NotImplementedError(msg)
return eff, var_eff
def combine_effects(effect, variance, method_re="iterated", row_names=None,
use_t=False, alpha=0.05, **kwds):
"""combining effect sizes for effect sizes using meta-analysis
This currently does not use np.asarray, all computations are possible in
pandas.
Parameters
----------
effect : array
mean of effect size measure for all samples
variance : array
variance of mean or effect size measure for all samples
method_re : {"iterated", "chi2"}
method that is use to compute the between random effects variance
"iterated" or "pm" uses Paule and Mandel method to iteratively
estimate the random effects variance. Options for the iteration can
be provided in the ``kwds``
"chi2" or "dl" uses DerSimonian and Laird one-step estimator.
row_names : list of strings (optional)
names for samples or studies, will be included in results summary and
table.
alpha : float in (0, 1)
significance level, default is 0.05, for the confidence intervals
Returns
-------
results : CombineResults
Contains estimation results and intermediate statistics, and includes
a method to return a summary table.
Statistics from intermediate calculations might be removed at a later
time.
Notes
-----
Status: Basic functionality is verified, mainly compared to R metafor
package. However, API might still change.
This computes both fixed effects and random effects estimates. The
random effects results depend on the method to estimate the RE variance.
Scale estimate
In fixed effects models and in random effects models without fully
iterated random effects variance, the model will in general not account
for all residual variance. Traditional meta-analysis uses a fixed
scale equal to 1, that might not produce test statistics and
confidence intervals with the correct size. Estimating the scale to account
for residual variance often improves the small sample properties of
inference and confidence intervals.
This adjustment to the standard errors is often referred to as HKSJ
method based attributed to Hartung and Knapp and Sidik and Jonkman.
However, this is equivalent to estimating the scale in WLS.
The results instance includes both, fixed scale and estimated scale
versions of standard errors and confidence intervals.
References
----------
Borenstein, Michael. 2009. Introduction to Meta-Analysis.
Chichester: Wiley.
Chen, Ding-Geng, and Karl E. Peace. 2013. Applied Meta-Analysis with R.
Chapman & Hall/CRC Biostatistics Series.
Boca Raton: CRC Press/Taylor & Francis Group.
"""
k = len(effect)
if row_names is None:
row_names = list(range(k))
crit = stats.norm.isf(alpha / 2)
# alias for initial version
eff = effect
var_eff = variance
sd_eff = np.sqrt(var_eff)
# fixed effects computation
weights_fe = 1 / var_eff # no bias correction ?
w_total_fe = weights_fe.sum(0)
weights_rel_fe = weights_fe / w_total_fe
eff_w_fe = weights_rel_fe * eff
mean_effect_fe = eff_w_fe.sum()
var_eff_w_fe = 1 / w_total_fe
sd_eff_w_fe = np.sqrt(var_eff_w_fe)
# random effects computation
q = (weights_fe * eff**2).sum(0)
q -= (weights_fe * eff).sum()**2 / w_total_fe
df = k - 1
if method_re.lower() in ["iterated", "pm"]:
tau2, _ = _fit_tau_iterative(eff, var_eff, **kwds)
elif method_re.lower() in ["chi2", "dl"]:
c = w_total_fe - (weights_fe**2).sum() / w_total_fe
tau2 = (q - df) / c
else:
raise ValueError('method_re should be "iterated" or "chi2"')
weights_re = 1 / (var_eff + tau2) # no bias_correction ?
w_total_re = weights_re.sum(0)
weights_rel_re = weights_re / weights_re.sum(0)
eff_w_re = weights_rel_re * eff
mean_effect_re = eff_w_re.sum()
var_eff_w_re = 1 / w_total_re
sd_eff_w_re = np.sqrt(var_eff_w_re)
# ci_low_eff_re = mean_effect_re - crit * sd_eff_w_re
# ci_upp_eff_re = mean_effect_re + crit * sd_eff_w_re
scale_hksj_re = (weights_re * (eff - mean_effect_re)**2).sum() / df
scale_hksj_fe = (weights_fe * (eff - mean_effect_fe)**2).sum() / df
var_hksj_re = (weights_rel_re * (eff - mean_effect_re)**2).sum() / df
var_hksj_fe = (weights_rel_fe * (eff - mean_effect_fe)**2).sum() / df
res = CombineResults(**locals())
return res
def _fit_tau_iterative(eff, var_eff, tau2_start=0, atol=1e-5, maxiter=50):
"""Paule-Mandel iterative estimate of between random effect variance
implementation follows DerSimonian and Kacker 2007 Appendix 8
see also Kacker 2004
Parameters
----------
eff : ndarray
effect sizes
var_eff : ndarray
variance of effect sizes
tau2_start : float
starting value for iteration
atol : float, default: 1e-5
convergence tolerance for absolute value of estimating equation
maxiter : int
maximum number of iterations
Returns
-------
tau2 : float
estimate of random effects variance tau squared
converged : bool
True if iteration has converged.
"""
tau2 = tau2_start
k = eff.shape[0]
converged = False
for i in range(maxiter):
w = 1 / (var_eff + tau2)
m = w.dot(eff) / w.sum(0)
resid_sq = (eff - m)**2
q_w = w.dot(resid_sq)
# estimating equation
ee = q_w - (k - 1)
if ee < 0:
tau2 = 0
converged = 0
break
if np.allclose(ee, 0, atol=atol):
converged = True
break
# update tau2
delta = ee / (w**2).dot(resid_sq)
tau2 += delta
return tau2, converged
def _fit_tau_mm(eff, var_eff, weights):
"""one-step method of moment estimate of between random effect variance
implementation follows Kacker 2004 and DerSimonian and Kacker 2007 eq. 6
Parameters
----------
eff : ndarray
effect sizes
var_eff : ndarray
variance of effect sizes
weights : ndarray
weights for estimating overall weighted mean
Returns
-------
tau2 : float
estimate of random effects variance tau squared
"""
w = weights
m = w.dot(eff) / w.sum(0)
resid_sq = (eff - m)**2
q_w = w.dot(resid_sq)
w_t = w.sum()
expect = w.dot(var_eff) - (w**2).dot(var_eff) / w_t
denom = w_t - (w**2).sum() / w_t
# moment estimate from estimating equation
tau2 = (q_w - expect) / denom
return tau2
def _fit_tau_iter_mm(eff, var_eff, tau2_start=0, atol=1e-5, maxiter=50):
"""iterated method of moment estimate of between random effect variance
This repeatedly estimates tau, updating weights in each iteration
see two-step estimators in DerSimonian and Kacker 2007
Parameters
----------
eff : ndarray
effect sizes
var_eff : ndarray
variance of effect sizes
tau2_start : float
starting value for iteration
atol : float, default: 1e-5
convergence tolerance for change in tau2 estimate between iterations
maxiter : int
maximum number of iterations
Returns
-------
tau2 : float
estimate of random effects variance tau squared
converged : bool
True if iteration has converged.
"""
tau2 = tau2_start
converged = False
for _ in range(maxiter):
w = 1 / (var_eff + tau2)
tau2_new = _fit_tau_mm(eff, var_eff, w)
tau2_new = max(0, tau2_new)
delta = tau2_new - tau2
if np.allclose(delta, 0, atol=atol):
converged = True
break
tau2 = tau2_new
return tau2, converged
| bsd-3-clause | fd2f24b5b25d9a24fd28a2bd525c3c23 | 34.15873 | 79 | 0.583371 | 3.712291 | false | false | false | false |
statsmodels/statsmodels | statsmodels/tsa/tests/results/arima112_results.py | 6 | 27176 | import numpy as np
from statsmodels.tools.tools import Bunch
llf = np.array([-245.40783909604])
nobs = np.array([202])
k = np.array([5])
k_exog = np.array([1])
sigma = np.array([.8100467417583])
chi2 = np.array([2153.20304012])
df_model = np.array([3])
k_ar = np.array([1])
k_ma = np.array([2])
params = np.array([
.92817025087557,
-.89593490671979,
1.3025011610587,
.30250063082791,
.8100467417583])
cov_params = np.array([
.00638581549851,
.0001858475428,
2.8222806545671,
.8538806860364,
-1.1429127085819,
.0001858475428,
.00132037832566,
-.14420925344502,
-.04447007102804,
.0576156187095,
2.8222806545671,
-.14420925344502,
40397.568324803,
12222.977216556,
-16359.547340433,
.8538806860364,
-.04447007102804,
12222.977216556,
3698.2722243412,
-4949.8609964351,
-1.1429127085819,
.0576156187095,
-16359.547340433,
-4949.8609964351,
6625.0231409853]).reshape(5, 5)
xb = np.array([
.92817026376724,
.92817026376724,
.69511789083481,
.77192437648773,
.66135895252228,
.77525061368942,
.64687132835388,
.79659670591354,
.65842008590698,
.71215486526489,
.69971066713333,
.72092038393021,
.68201982975006,
.76510280370712,
.64253836870193,
.78239262104034,
.64609551429749,
.74087703227997,
.71774411201477,
.7119727730751,
.73067259788513,
.67785596847534,
.70898467302322,
.71334755420685,
.72984194755554,
.7017787694931,
.75292426347733,
.67507487535477,
.78219056129456,
.78040039539337,
.71250075101852,
.82028061151505,
.63505899906158,
.79452306032181,
.72773635387421,
.79555094242096,
.76685506105423,
.77427339553833,
.82101213932037,
.77917188405991,
.78917801380157,
.86641925573349,
.78457218408585,
.83697980642319,
.83281791210175,
.85224026441574,
.75030690431595,
.8551008105278,
.78025943040848,
.72790426015854,
.84552866220474,
.72061747312546,
.78669738769531,
.73868823051453,
.78071022033691,
.78002023696899,
.83737623691559,
.98988044261932,
.72882527112961,
1.2245427370071,
.85331875085831,
1.1637357473373,
.86477434635162,
1.3248475790024,
.81245219707489,
.98008638620377,
.85591268539429,
1.0162551403046,
.8165408372879,
.78947591781616,
.94166398048401,
.93266606330872,
.85924750566483,
1.1245046854019,
.75576168298721,
1.0030617713928,
.91267073154449,
1.0848042964935,
1.0778224468231,
1.1551086902618,
.97817331552505,
1.4012540578842,
1.2360861301422,
1.3335381746292,
1.4352362155914,
1.4941285848618,
.9415163397789,
1.437669634819,
1.2404690980911,
1.2285294532776,
1.3219480514526,
1.1560415029526,
.83524394035339,
.87116771936417,
1.5561962127686,
.47358739376068,
.78093349933624,
.90549737215042,
1.0217791795731,
.86397403478622,
1.1526786088943,
.87662625312805,
.95803648233414,
.89513635635376,
.85281348228455,
1.0852742195129,
.76808404922485,
.96872144937515,
1.0732915401459,
.02145584858954,
1.3687089681625,
.50049883127213,
1.3895837068558,
.6889950633049,
1.2795144319534,
.7050421833992,
1.2218985557556,
.74481928348541,
1.3074514865875,
.7919961810112,
1.2807723283768,
1.0120536088943,
1.1938916444778,
.68923074007034,
1.6174983978271,
.64740318059921,
1.4949930906296,
1.2678960561752,
1.0586776733398,
.55762887001038,
1.2790743112564,
.66515874862671,
1.2538269758224,
.70554333925247,
1.2391568422318,
.75241559743881,
1.2129040956497,
.69235223531723,
1.0785228013992,
.8043577671051,
1.0037930011749,
.78750842809677,
1.1880930662155,
.74399447441101,
1.1791603565216,
.85870295763016,
1.0032330751419,
.8019300699234,
1.1696527004242,
.92376220226288,
.99186056852341,
.94733852148056,
1.0748032331467,
.64247089624405,
.95419937372208,
.92043441534042,
.8104555606842,
.66252142190933,
1.1178470849991,
.69223344326019,
1.0570795536041,
.90239083766937,
.95320242643356,
1.0541093349457,
1.0082466602325,
1.1376332044601,
1.1841852664948,
.90440809726715,
1.2733660936356,
.66835701465607,
1.1515763998032,
.44600257277489,
.93500959873199,
1.0847823619843,
.83353632688522,
1.0442448854446,
1.077241897583,
.71010553836823,
.89557945728302,
1.0163468122482,
1.094814658165,
.89641278982162,
1.2808450460434,
1.0223702192307,
.96094745397568,
1.309353351593,
.73499941825867,
2.4902238845825,
-.2579345703125,
1.9272556304932,
.53125941753387,
.7708500623703,
1.0312130451202,
1.6360099315643,
.6022145152092,
1.6338716745377,
1.3494771718979,
1.1322995424271,
2.1901025772095,
-.72639065980911,
-.37026473879814,
1.2391144037247,
1.1353877782822])
y = np.array([
np.nan,
29.908170700073,
29.84511756897,
30.121925354004,
30.031360626221,
30.315252304077,
30.196870803833,
30.5465965271,
30.498420715332,
30.52215385437,
30.619710922241,
30.70092010498,
30.722021102905,
30.975101470947,
30.862537384033,
31.162391662598,
31.086095809937,
31.220876693726,
31.407745361328,
31.461973190308,
31.670673370361,
31.627857208252,
31.728984832764,
31.833349227905,
32.009841918945,
32.08177947998,
32.33292388916,
32.325073242188,
32.662189483643,
33.060398101807,
33.162502288818,
33.670280456543,
33.535060882568,
33.894519805908,
34.127738952637,
34.495552062988,
34.866851806641,
35.17427444458,
35.721012115479,
36.079170227051,
36.489177703857,
37.16641998291,
37.584571838379,
38.136978149414,
38.732818603516,
39.352241516113,
39.65030670166,
40.255104064941,
40.68025970459,
40.827903747559,
41.445526123047,
41.620620727539,
41.986698150635,
42.238689422607,
42.580707550049,
42.98002243042,
43.537376403809,
44.689880371094,
44.928825378418,
46.824542999268,
47.653316497803,
49.263732910156,
50.164772033691,
52.324848175049,
53.112449645996,
53.980087280273,
54.855911254883,
55.916255950928,
56.616539001465,
56.889472961426,
57.941665649414,
58.832668304443,
59.55924987793,
61.124504089355,
61.555759429932,
62.603061676025,
63.612670898438,
64.984802246094,
66.577819824219,
68.255104064941,
69.478172302246,
72.001251220703,
74.236083984375,
76.533538818359,
79.435234069824,
82.39412689209,
83.541511535645,
86.137664794922,
88.440467834473,
90.32852935791,
92.82194519043,
94.556045532227,
95.235244750977,
95.871170043945,
99.056198120117,
98.573585510254,
98.680938720703,
99.705497741699,
100.82178497314,
101.66397857666,
103.25267791748,
104.17662811279,
105.0580368042,
105.99513244629,
106.55281066895,
108.08527374268,
108.46807861328,
109.46871948242,
110.97328948975,
108.72145080566,
110.86870574951,
110.70049285889,
112.78958892822,
113.38899230957,
115.0795211792,
115.70503997803,
117.22190093994,
117.94481658936,
119.80744934082,
120.69200134277,
122.48076629639,
124.11205291748,
125.69389343262,
126.08923339844,
129.11749267578,
129.54739379883,
131.99499511719,
134.66789245605,
135.75866699219,
135.6576385498,
137.47906494141,
137.86515808105,
139.55383300781,
140.10552978516,
141.73915100098,
142.45240783691,
144.01290893555,
144.49235534668,
145.57852172852,
146.40435791016,
147.30380249023,
147.98750305176,
149.58808898926,
150.14398193359,
151.67915344238,
152.65870666504,
153.6032409668,
154.30192565918,
155.86964416504,
157.02377319336,
157.99186706543,
159.14733886719,
160.47479248047,
160.54246520996,
161.35418701172,
162.42044067383,
162.81045532227,
162.86251831055,
164.31784057617,
164.59222412109,
165.75708007813,
166.80238342285,
167.65319824219,
169.15411376953,
170.30824279785,
172.03762817383,
173.88418579102,
174.80439758301,
176.87336730957,
177.06834411621,
178.55157470703,
178.04600524902,
178.63500976563,
180.38478088379,
180.83354187012,
182.24424743652,
183.67724609375,
183.91009521484,
184.59558105469,
185.91633605957,
187.39482116699,
188.29640197754,
190.38084411621,
191.82237243652,
192.76095581055,
195.10935974121,
195.43499755859,
201.69021606445,
199.14205932617,
202.62725830078,
203.23126220703,
202.67083740234,
204.60522460938,
207.55601501465,
207.94021606445,
210.76686096191,
213.84446716309,
215.12928771973,
220.80010986328,
216.16261291504,
211.80372619629,
213.91012573242,
215.60438537598])
resid = np.array([
np.nan,
-.7581701874733,
-.49511715769768,
-.75192391872406,
-.49135887622833,
-.76525229215622,
-.44687059521675,
-.70659655332565,
-.68842077255249,
-.60215425491333,
-.63971120119095,
-.66091901063919,
-.51202166080475,
-.75510257482529,
-.48253855109215,
-.72239124774933,
-.60609650611877,
-.53087604045868,
-.65774464607239,
-.52197223901749,
-.7206723690033,
-.60785627365112,
-.6089842915535,
-.55334770679474,
-.62984347343445,
-.50177800655365,
-.68292456865311,
-.44507533311844,
-.38219094276428,
-.61039841175079,
-.31250306963921,
-.77027755975723,
-.4350620508194,
-.494520008564,
-.42773708701134,
-.39555323123932,
-.46685197949409,
-.27427339553833,
-.42101442813873,
-.37917038798332,
-.18917952477932,
-.36641922593117,
-.28457221388817,
-.23697751760483,
-.23281940817833,
-.45223876833916,
-.25030693411827,
-.35510078072548,
-.58026248216629,
-.22790426015854,
-.54552561044693,
-.42061823606491,
-.48669815063477,
-.43868899345398,
-.38070866465569,
-.28002023696899,
.16262374818325,
-.48988044261932,
.67117244005203,
-.02454199641943,
.44668045639992,
.0362650193274,
.83522641658783,
-.02484837733209,
-.11245145648718,
.01991361007094,
.0440888479352,
-.1162573993206,
-.51654160022736,
.11052562296391,
-.04166246205568,
-.13266679644585,
.4407517015934,
-.32450538873672,
.04423752427101,
.0969405695796,
.28733000159264,
.51519411802292,
.52217602729797,
.24489280581474,
1.1218250989914,
.99874752759933,
.96391087770462,
1.4664648771286,
1.4647653102875,
.20586840808392,
1.1584821939468,
1.062330365181,
.65953236818314,
1.1714720726013,
.57805341482162,
-.15604154765606,
-.23524549603462,
1.6288322210312,
-.95619779825211,
-.67358434200287,
.1190680116415,
.09450265020132,
-.02177914790809,
.43602138757706,
.04732597246766,
-.07663082331419,
.0419635027647,
-.29513788223267,
.44718953967094,
-.38527730107307,
.0319189876318,
.43128004670143,
-2.2732961177826,
.77854722738266,
-.66871201992035,
.69950574636459,
-.08958829939365,
.41101104021072,
-.07951752096415,
.2949578166008,
-.02190163731575,
.5551837682724,
.0925500690937,
.50799924135208,
.61922925710678,
.38794788718224,
-.29389011859894,
1.4107677936554,
-.21750450134277,
.95260292291641,
1.4050008058548,
.03210696578026,
-.65866851806641,
.54236197471619,
-.27907428145409,
.43484738469124,
-.15383619070053,
.39446276426315,
-.03915995359421,
.34759050607681,
-.21290412545204,
.00764474179596,
.02148328535259,
-.10436081886292,
-.10379911959171,
.41248852014542,
-.18809306621552,
.35601159930229,
.12084264308214,
-.05869990959764,
-.10323911905289,
.39806687831879,
.2303563952446,
-.02376830019057,
.20813637971878,
.25265842676163,
-.57480323314667,
-.14247089624405,
.14580672979355,
-.4204343855381,
-.61045861244202,
.33747857809067,
-.41785016655922,
.10776958614588,
.14291742444038,
-.1023878082633,
.44680669903755,
.14588765799999,
.59174418449402,
.66236984729767,
.01581169478595,
.7956041097641,
-.47337827086449,
.33164295554161,
-.95156413316727,
-.34601172804832,
.66499650478363,
-.38478538393974,
.36646059155464,
.35576421022415,
-.47725108265877,
-.21010553836823,
.30441749095917,
.38366231322289,
.00517613813281,
.80359941720963,
.41915187239647,
-.02237024717033,
1.039052605629,
-.409359395504,
3.7650005817413,
-2.2902269363403,
1.5579376220703,
.072744384408,
-1.3312624692917,
.90316116809845,
1.3147799968719,
-.21801064908504,
1.1927837133408,
1.7281278371811,
.15252174437046,
3.4807071685791,
-3.9110956192017,
-3.9886209964752,
.86727404594421,
.55887448787689,
.78061258792877])
yr = np.array([
np.nan,
-.7581701874733,
-.49511715769768,
-.75192391872406,
-.49135887622833,
-.76525229215622,
-.44687059521675,
-.70659655332565,
-.68842077255249,
-.60215425491333,
-.63971120119095,
-.66091901063919,
-.51202166080475,
-.75510257482529,
-.48253855109215,
-.72239124774933,
-.60609650611877,
-.53087604045868,
-.65774464607239,
-.52197223901749,
-.7206723690033,
-.60785627365112,
-.6089842915535,
-.55334770679474,
-.62984347343445,
-.50177800655365,
-.68292456865311,
-.44507533311844,
-.38219094276428,
-.61039841175079,
-.31250306963921,
-.77027755975723,
-.4350620508194,
-.494520008564,
-.42773708701134,
-.39555323123932,
-.46685197949409,
-.27427339553833,
-.42101442813873,
-.37917038798332,
-.18917952477932,
-.36641922593117,
-.28457221388817,
-.23697751760483,
-.23281940817833,
-.45223876833916,
-.25030693411827,
-.35510078072548,
-.58026248216629,
-.22790426015854,
-.54552561044693,
-.42061823606491,
-.48669815063477,
-.43868899345398,
-.38070866465569,
-.28002023696899,
.16262374818325,
-.48988044261932,
.67117244005203,
-.02454199641943,
.44668045639992,
.0362650193274,
.83522641658783,
-.02484837733209,
-.11245145648718,
.01991361007094,
.0440888479352,
-.1162573993206,
-.51654160022736,
.11052562296391,
-.04166246205568,
-.13266679644585,
.4407517015934,
-.32450538873672,
.04423752427101,
.0969405695796,
.28733000159264,
.51519411802292,
.52217602729797,
.24489280581474,
1.1218250989914,
.99874752759933,
.96391087770462,
1.4664648771286,
1.4647653102875,
.20586840808392,
1.1584821939468,
1.062330365181,
.65953236818314,
1.1714720726013,
.57805341482162,
-.15604154765606,
-.23524549603462,
1.6288322210312,
-.95619779825211,
-.67358434200287,
.1190680116415,
.09450265020132,
-.02177914790809,
.43602138757706,
.04732597246766,
-.07663082331419,
.0419635027647,
-.29513788223267,
.44718953967094,
-.38527730107307,
.0319189876318,
.43128004670143,
-2.2732961177826,
.77854722738266,
-.66871201992035,
.69950574636459,
-.08958829939365,
.41101104021072,
-.07951752096415,
.2949578166008,
-.02190163731575,
.5551837682724,
.0925500690937,
.50799924135208,
.61922925710678,
.38794788718224,
-.29389011859894,
1.4107677936554,
-.21750450134277,
.95260292291641,
1.4050008058548,
.03210696578026,
-.65866851806641,
.54236197471619,
-.27907428145409,
.43484738469124,
-.15383619070053,
.39446276426315,
-.03915995359421,
.34759050607681,
-.21290412545204,
.00764474179596,
.02148328535259,
-.10436081886292,
-.10379911959171,
.41248852014542,
-.18809306621552,
.35601159930229,
.12084264308214,
-.05869990959764,
-.10323911905289,
.39806687831879,
.2303563952446,
-.02376830019057,
.20813637971878,
.25265842676163,
-.57480323314667,
-.14247089624405,
.14580672979355,
-.4204343855381,
-.61045861244202,
.33747857809067,
-.41785016655922,
.10776958614588,
.14291742444038,
-.1023878082633,
.44680669903755,
.14588765799999,
.59174418449402,
.66236984729767,
.01581169478595,
.7956041097641,
-.47337827086449,
.33164295554161,
-.95156413316727,
-.34601172804832,
.66499650478363,
-.38478538393974,
.36646059155464,
.35576421022415,
-.47725108265877,
-.21010553836823,
.30441749095917,
.38366231322289,
.00517613813281,
.80359941720963,
.41915187239647,
-.02237024717033,
1.039052605629,
-.409359395504,
3.7650005817413,
-2.2902269363403,
1.5579376220703,
.072744384408,
-1.3312624692917,
.90316116809845,
1.3147799968719,
-.21801064908504,
1.1927837133408,
1.7281278371811,
.15252174437046,
3.4807071685791,
-3.9110956192017,
-3.9886209964752,
.86727404594421,
.55887448787689,
.78061258792877])
mse = np.array([
.77732294797897,
.77732294797897,
.70387578010559,
.69261533021927,
.68906670808792,
.68708789348602,
.68558460474014,
.68429106473923,
.6831266283989,
.68206071853638,
.68107759952545,
.68016695976257,
.67932069301605,
.67853212356567,
.67779558897018,
.67710596323013,
.67645901441574,
.6758508682251,
.67527812719345,
.67473775148392,
.67422717809677,
.67374390363693,
.67328584194183,
.6728510260582,
.67243778705597,
.67204451560974,
.67166984081268,
.67131245136261,
.67097115516663,
.67064493894577,
.67033278942108,
.67003381252289,
.66974723339081,
.66947221755981,
.66920816898346,
.66895437240601,
.66871029138565,
.66847538948059,
.66824907064438,
.66803097724915,
.66782057285309,
.66761755943298,
.66742146015167,
.66723203659058,
.66704881191254,
.66687160730362,
.66670006513596,
.66653394699097,
.66637301445007,
.66621696949005,
.66606563329697,
.66591882705688,
.66577625274658,
.66563785076141,
.66550332307816,
.66537261009216,
.6652455329895,
.66512185335159,
.66500157117844,
.66488444805145,
.66477036476135,
.66465926170349,
.66455101966858,
.66444545984268,
.6643425822258,
.66424214839935,
.66414421796799,
.66404861211777,
.66395533084869,
.66386413574219,
.66377514600754,
.66368812322617,
.66360312700272,
.66351997852325,
.66343873739243,
.6633592247963,
.66328144073486,
.66320532560349,
.66313081979752,
.66305786371231,
.66298645734787,
.6629164814949,
.66284799575806,
.66278082132339,
.66271501779556,
.66265046596527,
.66258722543716,
.6625252366066,
.66246438026428,
.66240465641022,
.66234612464905,
.66228866577148,
.66223222017288,
.66217684745789,
.66212248802185,
.66206908226013,
.66201663017273,
.661965072155,
.66191446781158,
.6618646979332,
.66181582212448,
.66176778078079,
.66172051429749,
.66167408227921,
.66162836551666,
.66158348321915,
.66153925657272,
.66149580478668,
.66145300865173,
.66141092777252,
.6613695025444,
.66132873296738,
.6612885594368,
.66124904155731,
.66121011972427,
.66117179393768,
.66113406419754,
.6610968708992,
.66106027364731,
.66102415323257,
.66098862886429,
.66095358133316,
.66091907024384,
.66088503599167,
.66085147857666,
.66081839799881,
.66078579425812,
.66075360774994,
.66072189807892,
.66069066524506,
.66065979003906,
.66062939167023,
.66059935092926,
.66056972742081,
.66054052114487,
.6605116724968,
.66048324108124,
.6604551076889,
.66042739152908,
.66040003299713,
.66037303209305,
.66034632921219,
.66032004356384,
.66029399633408,
.66026836633682,
.66024297475815,
.66021794080734,
.66019320487976,
.6601687669754,
.66014462709427,
.66012072563171,
.66009718179703,
.66007387638092,
.66005086898804,
.66002810001373,
.66000562906265,
.65998339653015,
.65996146202087,
.65993976593018,
.65991830825806,
.65989708900452,
.65987610816956,
.65985536575317,
.65983480215073,
.6598145365715,
.65979450941086,
.65977466106415,
.65975499153137,
.65973562002182,
.6597164273262,
.65969741344452,
.65967857837677,
.6596599817276,
.65964162349701,
.65962338447571,
.65960538387299,
.6595875620842,
.65956991910934,
.65955245494843,
.65953516960144,
.65951806306839,
.65950113534927,
.65948438644409,
.6594677567482,
.65945136547089,
.65943509340286,
.65941900014877,
.65940302610397,
.65938723087311,
.65937161445618,
.65935611724854,
.65934079885483,
.65932559967041,
.65931057929993,
.65929567813873,
.65928089618683,
.65926629304886,
.65925180912018,
.65923744440079,
.65922319889069,
.65920913219452,
.65919518470764,
.65918135643005])
stdp = np.array([
.92817026376724,
.92817026376724,
.69511789083481,
.77192437648773,
.66135895252228,
.77525061368942,
.64687132835388,
.79659670591354,
.65842008590698,
.71215486526489,
.69971066713333,
.72092038393021,
.68201982975006,
.76510280370712,
.64253836870193,
.78239262104034,
.64609551429749,
.74087703227997,
.71774411201477,
.7119727730751,
.73067259788513,
.67785596847534,
.70898467302322,
.71334755420685,
.72984194755554,
.7017787694931,
.75292426347733,
.67507487535477,
.78219056129456,
.78040039539337,
.71250075101852,
.82028061151505,
.63505899906158,
.79452306032181,
.72773635387421,
.79555094242096,
.76685506105423,
.77427339553833,
.82101213932037,
.77917188405991,
.78917801380157,
.86641925573349,
.78457218408585,
.83697980642319,
.83281791210175,
.85224026441574,
.75030690431595,
.8551008105278,
.78025943040848,
.72790426015854,
.84552866220474,
.72061747312546,
.78669738769531,
.73868823051453,
.78071022033691,
.78002023696899,
.83737623691559,
.98988044261932,
.72882527112961,
1.2245427370071,
.85331875085831,
1.1637357473373,
.86477434635162,
1.3248475790024,
.81245219707489,
.98008638620377,
.85591268539429,
1.0162551403046,
.8165408372879,
.78947591781616,
.94166398048401,
.93266606330872,
.85924750566483,
1.1245046854019,
.75576168298721,
1.0030617713928,
.91267073154449,
1.0848042964935,
1.0778224468231,
1.1551086902618,
.97817331552505,
1.4012540578842,
1.2360861301422,
1.3335381746292,
1.4352362155914,
1.4941285848618,
.9415163397789,
1.437669634819,
1.2404690980911,
1.2285294532776,
1.3219480514526,
1.1560415029526,
.83524394035339,
.87116771936417,
1.5561962127686,
.47358739376068,
.78093349933624,
.90549737215042,
1.0217791795731,
.86397403478622,
1.1526786088943,
.87662625312805,
.95803648233414,
.89513635635376,
.85281348228455,
1.0852742195129,
.76808404922485,
.96872144937515,
1.0732915401459,
.02145584858954,
1.3687089681625,
.50049883127213,
1.3895837068558,
.6889950633049,
1.2795144319534,
.7050421833992,
1.2218985557556,
.74481928348541,
1.3074514865875,
.7919961810112,
1.2807723283768,
1.0120536088943,
1.1938916444778,
.68923074007034,
1.6174983978271,
.64740318059921,
1.4949930906296,
1.2678960561752,
1.0586776733398,
.55762887001038,
1.2790743112564,
.66515874862671,
1.2538269758224,
.70554333925247,
1.2391568422318,
.75241559743881,
1.2129040956497,
.69235223531723,
1.0785228013992,
.8043577671051,
1.0037930011749,
.78750842809677,
1.1880930662155,
.74399447441101,
1.1791603565216,
.85870295763016,
1.0032330751419,
.8019300699234,
1.1696527004242,
.92376220226288,
.99186056852341,
.94733852148056,
1.0748032331467,
.64247089624405,
.95419937372208,
.92043441534042,
.8104555606842,
.66252142190933,
1.1178470849991,
.69223344326019,
1.0570795536041,
.90239083766937,
.95320242643356,
1.0541093349457,
1.0082466602325,
1.1376332044601,
1.1841852664948,
.90440809726715,
1.2733660936356,
.66835701465607,
1.1515763998032,
.44600257277489,
.93500959873199,
1.0847823619843,
.83353632688522,
1.0442448854446,
1.077241897583,
.71010553836823,
.89557945728302,
1.0163468122482,
1.094814658165,
.89641278982162,
1.2808450460434,
1.0223702192307,
.96094745397568,
1.309353351593,
.73499941825867,
2.4902238845825,
-.2579345703125,
1.9272556304932,
.53125941753387,
.7708500623703,
1.0312130451202,
1.6360099315643,
.6022145152092,
1.6338716745377,
1.3494771718979,
1.1322995424271,
2.1901025772095,
-.72639065980911,
-.37026473879814,
1.2391144037247,
1.1353877782822])
icstats = np.array([
202,
np.nan,
-245.40783909604,
5,
500.81567819208,
517.35701667909])
results = Bunch(
llf=llf,
nobs=nobs,
k=k,
k_exog=k_exog,
sigma=sigma,
chi2=chi2,
df_model=df_model,
k_ar=k_ar,
k_ma=k_ma,
params=params,
cov_params=cov_params,
xb=xb,
y=y,
resid=resid,
yr=yr,
mse=mse,
stdp=stdp,
icstats=icstats
)
| bsd-3-clause | beae8f5a71009724cd882704eeebc3fb | 19.66616 | 41 | 0.655873 | 2.338525 | false | false | true | false |
statsmodels/statsmodels | statsmodels/discrete/discrete_margins.py | 3 | 26628 | #Splitting out maringal effects to see if they can be generalized
from statsmodels.compat.python import lzip
import numpy as np
from scipy.stats import norm
from statsmodels.tools.decorators import cache_readonly
#### margeff helper functions ####
#NOTE: todo marginal effects for group 2
# group 2 oprobit, ologit, gologit, mlogit, biprobit
def _check_margeff_args(at, method):
"""
Checks valid options for margeff
"""
if at not in ['overall','mean','median','zero','all']:
raise ValueError("%s not a valid option for `at`." % at)
if method not in ['dydx','eyex','dyex','eydx']:
raise ValueError("method is not understood. Got %s" % method)
def _check_discrete_args(at, method):
"""
Checks the arguments for margeff if the exogenous variables are discrete.
"""
if method in ['dyex','eyex']:
raise ValueError("%s not allowed for discrete variables" % method)
if at in ['median', 'zero']:
raise ValueError("%s not allowed for discrete variables" % at)
def _get_const_index(exog):
"""
Returns a boolean array of non-constant column indices in exog and
an scalar array of where the constant is or None
"""
effects_idx = exog.var(0) != 0
if np.any(~effects_idx):
const_idx = np.where(~effects_idx)[0]
else:
const_idx = None
return effects_idx, const_idx
def _isdummy(X):
"""
Given an array X, returns the column indices for the dummy variables.
Parameters
----------
X : array_like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 2, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _isdummy(X)
>>> ind
array([0, 3, 4])
"""
X = np.asarray(X)
if X.ndim > 1:
ind = np.zeros(X.shape[1]).astype(bool)
max = (np.max(X, axis=0) == 1)
min = (np.min(X, axis=0) == 0)
remainder = np.all(X % 1. == 0, axis=0)
ind = min & max & remainder
if X.ndim == 1:
ind = np.asarray([ind])
return np.where(ind)[0]
def _get_dummy_index(X, const_idx):
dummy_ind = _isdummy(X)
dummy = True
if dummy_ind.size == 0: # do not waste your time
dummy = False
dummy_ind = None # this gets passed to stand err func
return dummy_ind, dummy
def _iscount(X):
"""
Given an array X, returns the column indices for count variables.
Parameters
----------
X : array_like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 10, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _iscount(X)
>>> ind
array([0, 3, 4])
"""
X = np.asarray(X)
remainder = np.logical_and(np.logical_and(np.all(X % 1. == 0, axis = 0),
X.var(0) != 0), np.all(X >= 0, axis=0))
dummy = _isdummy(X)
remainder = np.where(remainder)[0].tolist()
for idx in dummy:
remainder.remove(idx)
return np.array(remainder)
def _get_count_index(X, const_idx):
count_ind = _iscount(X)
count = True
if count_ind.size == 0: # do not waste your time
count = False
count_ind = None # for stand err func
return count_ind, count
def _get_margeff_exog(exog, at, atexog, ind):
if atexog is not None: # user supplied
if isinstance(atexog, dict):
# assumes values are singular or of len(exog)
for key in atexog:
exog[:,key] = atexog[key]
elif isinstance(atexog, np.ndarray): #TODO: handle DataFrames
if atexog.ndim == 1:
k_vars = len(atexog)
else:
k_vars = atexog.shape[1]
try:
assert k_vars == exog.shape[1]
except:
raise ValueError("atexog does not have the same number "
"of variables as exog")
exog = atexog
#NOTE: we should fill in atexog after we process at
if at == 'mean':
exog = np.atleast_2d(exog.mean(0))
elif at == 'median':
exog = np.atleast_2d(np.median(exog, axis=0))
elif at == 'zero':
exog = np.zeros((1,exog.shape[1]))
exog[0,~ind] = 1
return exog
def _get_count_effects(effects, exog, count_ind, method, model, params):
"""
If there's a count variable, the predicted difference is taken by
subtracting one and adding one to exog then averaging the difference
"""
# this is the index for the effect and the index for count col in exog
for i in count_ind:
exog0 = exog.copy()
exog0[:, i] -= 1
effect0 = model.predict(params, exog0)
exog0[:, i] += 2
effect1 = model.predict(params, exog0)
#NOTE: done by analogy with dummy effects but untested bc
# stata does not handle both count and eydx anywhere
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = ((effect1 - effect0)/2)
return effects
def _get_dummy_effects(effects, exog, dummy_ind, method, model, params):
"""
If there's a dummy variable, the predicted difference is taken at
0 and 1
"""
# this is the index for the effect and the index for dummy col in exog
for i in dummy_ind:
exog0 = exog.copy() # only copy once, can we avoid a copy?
exog0[:,i] = 0
effect0 = model.predict(params, exog0)
#fittedvalues0 = np.dot(exog0,params)
exog0[:,i] = 1
effect1 = model.predict(params, exog0)
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = (effect1 - effect0)
return effects
def _effects_at(effects, at):
if at == 'all':
effects = effects
elif at == 'overall':
effects = effects.mean(0)
else:
effects = effects[0,:]
return effects
def _margeff_cov_params_dummy(model, cov_margins, params, exog, dummy_ind,
method, J):
r"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d = 1 - F(XB) | d = 0
The row of the Jacobian for this variable is given by
f(XB)*X | d = 1 - f(XB)*X | d = 0
Where F is the default prediction of the model.
"""
for i in dummy_ind:
exog0 = exog.copy()
exog1 = exog.copy()
exog0[:,i] = 0
exog1[:,i] = 1
dfdb0 = model._derivative_predict(params, exog0, method)
dfdb1 = model._derivative_predict(params, exog1, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0)
if J > 1:
K = dfdb.shape[1] // (J-1)
cov_margins[i::K, :] = dfdb
else:
# dfdb could be too short if there are extra params, k_extra > 0
cov_margins[i, :len(dfdb)] = dfdb # how each F changes with change in B
return cov_margins
def _margeff_cov_params_count(model, cov_margins, params, exog, count_ind,
method, J):
r"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d += 1 - F(XB) | d -= 1
The row of the Jacobian for this variable is given by
(f(XB)*X | d += 1 - f(XB)*X | d -= 1) / 2
where F is the default prediction for the model.
"""
for i in count_ind:
exog0 = exog.copy()
exog0[:,i] -= 1
dfdb0 = model._derivative_predict(params, exog0, method)
exog0[:,i] += 2
dfdb1 = model._derivative_predict(params, exog0, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0) / 2
if J > 1:
K = dfdb.shape[1] / (J-1)
cov_margins[i::K, :] = dfdb
else:
# dfdb could be too short if there are extra params, k_extra > 0
cov_margins[i, :len(dfdb)] = dfdb # how each F changes with change in B
return cov_margins
def margeff_cov_params(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
Computes the variance-covariance of marginal effects by the delta method.
Parameters
----------
model : model instance
The model that returned the fitted results. Its pdf method is used
for computing the Jacobian of discrete variables in dummy_ind and
count_ind
params : array_like
estimated model parameters
exog : array_like
exogenous variables at which to calculate the derivative
cov_params : array_like
The variance-covariance of the parameters
at : str
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation.
Only overall has any effect here.you
derivative : function or array_like
If a function, it returns the marginal effects of the model with
respect to the exogenous variables evaluated at exog. Expected to be
called derivative(params, exog). This will be numerically
differentiated. Otherwise, it can be the Jacobian of the marginal
effects with respect to the parameters.
dummy_ind : array_like
Indices of the columns of exog that contain dummy variables
count_ind : array_like
Indices of the columns of exog that contain count variables
Notes
-----
For continuous regressors, the variance-covariance is given by
Asy. Var[MargEff] = [d margeff / d params] V [d margeff / d params]'
where V is the parameter variance-covariance.
The outer Jacobians are computed via numerical differentiation if
derivative is a function.
"""
if callable(derivative):
from statsmodels.tools.numdiff import approx_fprime_cs
params = params.ravel('F') # for Multinomial
try:
jacobian_mat = approx_fprime_cs(params, derivative,
args=(exog,method))
except TypeError: # norm.cdf does not take complex values
from statsmodels.tools.numdiff import approx_fprime
jacobian_mat = approx_fprime(params, derivative,
args=(exog,method))
if at == 'overall':
jacobian_mat = np.mean(jacobian_mat, axis=1)
else:
jacobian_mat = jacobian_mat.squeeze() # exog was 2d row vector
if dummy_ind is not None:
jacobian_mat = _margeff_cov_params_dummy(model, jacobian_mat,
params, exog, dummy_ind, method, J)
if count_ind is not None:
jacobian_mat = _margeff_cov_params_count(model, jacobian_mat,
params, exog, count_ind, method, J)
else:
jacobian_mat = derivative
#NOTE: this will not go through for at == 'all'
return np.dot(np.dot(jacobian_mat, cov_params), jacobian_mat.T)
def margeff_cov_with_se(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
See margeff_cov_params.
Same function but returns both the covariance of the marginal effects
and their standard errors.
"""
cov_me = margeff_cov_params(model, params, exog, cov_params, at,
derivative, dummy_ind,
count_ind, method, J)
return cov_me, np.sqrt(np.diag(cov_me))
def margeff():
raise NotImplementedError
def _check_at_is_all(method):
if method['at'] == 'all':
raise ValueError("Only margeff are available when `at` is "
"'all'. Please input specific points if you would "
"like to do inference.")
_transform_names = dict(dydx='dy/dx',
eyex='d(lny)/d(lnx)',
dyex='dy/d(lnx)',
eydx='d(lny)/dx')
class Margins:
"""
Mostly a do nothing class. Lays out the methods expected of a sub-class.
This is just a sketch of what we may want out of a general margins class.
I (SS) need to look at details of other models.
"""
def __init__(self, results, get_margeff, derivative, dist=None,
margeff_args=()):
self._cache = {}
self.results = results
self.dist = dist
self.get_margeff(margeff_args)
def _reset(self):
self._cache = {}
def get_margeff(self, *args, **kwargs):
self._reset()
self.margeff = self.get_margeff(*args)
@cache_readonly
def tvalues(self):
raise NotImplementedError
@cache_readonly
def cov_margins(self):
raise NotImplementedError
@cache_readonly
def margins_se(self):
raise NotImplementedError
def summary_frame(self):
raise NotImplementedError
@cache_readonly
def pvalues(self):
raise NotImplementedError
def conf_int(self, alpha=.05):
raise NotImplementedError
def summary(self, alpha=.05):
raise NotImplementedError
#class DiscreteMargins(Margins):
class DiscreteMargins:
"""Get marginal effects of a Discrete Choice model.
Parameters
----------
results : DiscreteResults instance
The results instance of a fitted discrete choice model
args : tuple
Args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
kwargs : dict
Keyword args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
"""
def __init__(self, results, args, kwargs={}):
self._cache = {}
self.results = results
self.get_margeff(*args, **kwargs)
def _reset(self):
self._cache = {}
@cache_readonly
def tvalues(self):
_check_at_is_all(self.margeff_options)
return self.margeff / self.margeff_se
def summary_frame(self, alpha=.05):
"""
Returns a DataFrame summarizing the marginal effects.
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
frame : DataFrames
A DataFrame summarizing the marginal effects.
Notes
-----
The dataframe is created on each call and not cached, as are the
tables build in `summary()`
"""
_check_at_is_all(self.margeff_options)
results = self.results
model = self.results.model
from pandas import DataFrame, MultiIndex
names = [_transform_names[self.margeff_options['method']],
'Std. Err.', 'z', 'Pr(>|z|)',
'Conf. Int. Low', 'Cont. Int. Hi.']
ind = self.results.model.exog.var(0) != 0 # True if not a constant
exog_names = self.results.model.exog_names
k_extra = getattr(model, 'k_extra', 0)
if k_extra > 0:
exog_names = exog_names[:-k_extra]
var_names = [name for i,name in enumerate(exog_names) if ind[i]]
if self.margeff.ndim == 2:
# MNLogit case
ci = self.conf_int(alpha)
table = np.column_stack([i.ravel("F") for i in
[self.margeff, self.margeff_se, self.tvalues,
self.pvalues, ci[:, 0, :], ci[:, 1, :]]])
_, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
ynames = np.repeat(yname_list, len(var_names))
xnames = np.tile(var_names, len(yname_list))
index = MultiIndex.from_tuples(list(zip(ynames, xnames)),
names=['endog', 'exog'])
else:
table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,
self.pvalues, self.conf_int(alpha)))
index=var_names
return DataFrame(table, columns=names, index=index)
@cache_readonly
def pvalues(self):
_check_at_is_all(self.margeff_options)
return norm.sf(np.abs(self.tvalues)) * 2
def conf_int(self, alpha=.05):
"""
Returns the confidence intervals of the marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
conf_int : ndarray
An array with lower, upper confidence intervals for the marginal
effects.
"""
_check_at_is_all(self.margeff_options)
me_se = self.margeff_se
q = norm.ppf(1 - alpha / 2)
lower = self.margeff - q * me_se
upper = self.margeff + q * me_se
return np.asarray(lzip(lower, upper))
def summary(self, alpha=.05):
"""
Returns a summary table for marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
Summary : SummaryTable
A SummaryTable instance
"""
_check_at_is_all(self.margeff_options)
results = self.results
model = results.model
title = model.__class__.__name__ + " Marginal Effects"
method = self.margeff_options['method']
top_left = [('Dep. Variable:', [model.endog_names]),
('Method:', [method]),
('At:', [self.margeff_options['at']]),]
from statsmodels.iolib.summary import (Summary, summary_params,
table_extend)
exog_names = model.exog_names[:] # copy
smry = Summary()
# TODO: sigh, we really need to hold on to this in _data...
_, const_idx = _get_const_index(model.exog)
if const_idx is not None:
exog_names.pop(const_idx[0])
if getattr(model, 'k_extra', 0) > 0:
exog_names = exog_names[:-model.k_extra]
J = int(getattr(model, "J", 1))
if J > 1:
yname, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
else:
yname = model.endog_names
yname_list = [yname]
smry.add_table_2cols(self, gleft=top_left, gright=[],
yname=yname, xname=exog_names, title=title)
# NOTE: add_table_params is not general enough yet for margeff
# could use a refactor with getattr instead of hard-coded params
# tvalues etc.
table = []
conf_int = self.conf_int(alpha)
margeff = self.margeff
margeff_se = self.margeff_se
tvalues = self.tvalues
pvalues = self.pvalues
if J > 1:
for eq in range(J):
restup = (results, margeff[:,eq], margeff_se[:,eq],
tvalues[:,eq], pvalues[:,eq], conf_int[:,:,eq])
tble = summary_params(restup, yname=yname_list[eq],
xname=exog_names, alpha=alpha, use_t=False,
skip_header=True)
tble.title = yname_list[eq]
# overwrite coef with method name
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[' + str(alpha/2), str(1-alpha/2) + ']']
tble.insert_header_row(0, header)
table.append(tble)
table = table_extend(table, keep_headers=True)
else:
restup = (results, margeff, margeff_se, tvalues, pvalues, conf_int)
table = summary_params(restup, yname=yname, xname=exog_names,
alpha=alpha, use_t=False, skip_header=True)
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[' + str(alpha/2), str(1-alpha/2) + ']']
table.insert_header_row(0, header)
smry.tables.append(table)
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is all
only margeff will be available.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semi-elasticity -- dy/d(lnx)
- 'eydx' - estimate semi-elasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array_like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
self._reset() # always reset the cache when this is called
#TODO: if at is not all or overall, we can also put atexog values
# in summary table head
method = method.lower()
at = at.lower()
_check_margeff_args(at, method)
self.margeff_options = dict(method=method, at=at)
results = self.results
model = results.model
params = results.params
exog = model.exog.copy() # copy because values are changed
effects_idx, const_idx = _get_const_index(exog)
if dummy:
_check_discrete_args(at, method)
dummy_idx, dummy = _get_dummy_index(exog, const_idx)
else:
dummy_idx = None
if count:
_check_discrete_args(at, method)
count_idx, count = _get_count_index(exog, const_idx)
else:
count_idx = None
# attach dummy_idx and cout_idx
self.dummy_idx = dummy_idx
self.count_idx = count_idx
# get the exogenous variables
exog = _get_margeff_exog(exog, at, atexog, effects_idx)
# get base marginal effects, handled by sub-classes
effects = model._derivative_exog(params, exog, method,
dummy_idx, count_idx)
J = getattr(model, 'J', 1)
effects_idx = np.tile(effects_idx, J) # adjust for multi-equation.
effects = _effects_at(effects, at)
if at == 'all':
if J > 1:
K = model.K - np.any(~effects_idx) # subtract constant
self.margeff = effects[:, effects_idx].reshape(-1, K, J,
order='F')
else:
self.margeff = effects[:, effects_idx]
else:
# Set standard error of the marginal effects by Delta method.
margeff_cov, margeff_se = margeff_cov_with_se(model, params, exog,
results.cov_params(), at,
model._derivative_exog,
dummy_idx, count_idx,
method, J)
# reshape for multi-equation
if J > 1:
K = model.K - np.any(~effects_idx) # subtract constant
self.margeff = effects[effects_idx].reshape(K, J, order='F')
self.margeff_se = margeff_se[effects_idx].reshape(K, J,
order='F')
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
else:
# do not care about at constant
# hack truncate effects_idx again if necessary
# if eyex, then effects is truncated to be without extra params
effects_idx = effects_idx[:len(effects)]
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
self.margeff_se = margeff_se[effects_idx]
self.margeff = effects[effects_idx]
| bsd-3-clause | 7176ecfc0d7db041cd978277e79bd348 | 35.377049 | 83 | 0.560162 | 3.887299 | false | false | false | false |
statsmodels/statsmodels | statsmodels/tsa/arima/estimators/innovations.py | 3 | 9666 | """
Innovations algorithm for MA(q) and SARIMA(p,d,q)x(P,D,Q,s) model parameters.
Author: Chad Fulton
License: BSD-3
"""
import warnings
import numpy as np
from scipy.optimize import minimize
from statsmodels.tools.tools import Bunch
from statsmodels.tsa.innovations import arma_innovations
from statsmodels.tsa.stattools import acovf, innovations_algo
from statsmodels.tsa.statespace.tools import diff
from statsmodels.tsa.arima.specification import SARIMAXSpecification
from statsmodels.tsa.arima.params import SARIMAXParams
from statsmodels.tsa.arima.estimators.hannan_rissanen import hannan_rissanen
def innovations(endog, ma_order=0, demean=True):
"""
Estimate MA parameters using innovations algorithm.
Parameters
----------
endog : array_like or SARIMAXSpecification
Input time series array, assumed to be stationary.
ma_order : int, optional
Maximum moving average order. Default is 0.
demean : bool, optional
Whether to estimate and remove the mean from the process prior to
fitting the moving average coefficients. Default is True.
Returns
-------
parameters : list of SARIMAXParams objects
List elements correspond to estimates at different `ma_order`. For
example, parameters[0] is an `SARIMAXParams` instance corresponding to
`ma_order=0`.
other_results : Bunch
Includes one component, `spec`, containing the `SARIMAXSpecification`
instance corresponding to the input arguments.
Notes
-----
The primary reference is [1]_, section 5.1.3.
This procedure assumes that the series is stationary.
References
----------
.. [1] Brockwell, Peter J., and Richard A. Davis. 2016.
Introduction to Time Series and Forecasting. Springer.
"""
spec = max_spec = SARIMAXSpecification(endog, ma_order=ma_order)
endog = max_spec.endog
if demean:
endog = endog - endog.mean()
if not max_spec.is_ma_consecutive:
raise ValueError('Innovations estimation unavailable for models with'
' seasonal or otherwise non-consecutive MA orders.')
sample_acovf = acovf(endog, fft=True)
theta, v = innovations_algo(sample_acovf, nobs=max_spec.ma_order + 1)
ma_params = [theta[i, :i] for i in range(1, max_spec.ma_order + 1)]
sigma2 = v
out = []
for i in range(max_spec.ma_order + 1):
spec = SARIMAXSpecification(ma_order=i)
p = SARIMAXParams(spec=spec)
if i == 0:
p.params = sigma2[i]
else:
p.params = np.r_[ma_params[i - 1], sigma2[i]]
out.append(p)
# Construct other results
other_results = Bunch({
'spec': spec,
})
return out, other_results
def innovations_mle(endog, order=(0, 0, 0), seasonal_order=(0, 0, 0, 0),
demean=True, enforce_invertibility=True,
start_params=None, minimize_kwargs=None):
"""
Estimate SARIMA parameters by MLE using innovations algorithm.
Parameters
----------
endog : array_like
Input time series array.
order : tuple, optional
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters. Default is (0, 0, 0).
seasonal_order : tuple, optional
The (P,D,Q,s) order of the seasonal component of the model for the
AR parameters, differences, MA parameters, and periodicity. Default
is (0, 0, 0, 0).
demean : bool, optional
Whether to estimate and remove the mean from the process prior to
fitting the SARIMA coefficients. Default is True.
enforce_invertibility : bool, optional
Whether or not to transform the MA parameters to enforce invertibility
in the moving average component of the model. Default is True.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization. The
AR polynomial must be stationary. If `enforce_invertibility=True` the
MA poylnomial must be invertible. If not provided, default starting
parameters are computed using the Hannan-Rissanen method.
minimize_kwargs : dict, optional
Arguments to pass to scipy.optimize.minimize.
Returns
-------
parameters : SARIMAXParams object
other_results : Bunch
Includes four components: `spec`, containing the `SARIMAXSpecification`
instance corresponding to the input arguments; `minimize_kwargs`,
containing any keyword arguments passed to `minimize`; `start_params`,
containing the untransformed starting parameters passed to `minimize`;
and `minimize_results`, containing the output from `minimize`.
Notes
-----
The primary reference is [1]_, section 5.2.
Note: we do not include `enforce_stationarity` as an argument, because this
function requires stationarity.
TODO: support concentrating out the scale (should be easy: use sigma2=1
and then compute sigma2=np.sum(u**2 / v) / len(u); would then need to
redo llf computation in the Cython function).
TODO: add support for fixed parameters
TODO: add support for secondary optimization that does not enforce
stationarity / invertibility, starting from first step's parameters
References
----------
.. [1] Brockwell, Peter J., and Richard A. Davis. 2016.
Introduction to Time Series and Forecasting. Springer.
"""
spec = SARIMAXSpecification(
endog, order=order, seasonal_order=seasonal_order,
enforce_stationarity=True, enforce_invertibility=enforce_invertibility)
endog = spec.endog
if spec.is_integrated:
warnings.warn('Provided `endog` series has been differenced to'
' eliminate integration prior to ARMA parameter'
' estimation.')
endog = diff(endog, k_diff=spec.diff,
k_seasonal_diff=spec.seasonal_diff,
seasonal_periods=spec.seasonal_periods)
if demean:
endog = endog - endog.mean()
p = SARIMAXParams(spec=spec)
if start_params is None:
sp = SARIMAXParams(spec=spec)
# Estimate starting parameters via Hannan-Rissanen
hr, hr_results = hannan_rissanen(endog, ar_order=spec.ar_order,
ma_order=spec.ma_order, demean=False)
if spec.seasonal_periods == 0:
# If no seasonal component, then `hr` gives starting parameters
sp.params = hr.params
else:
# If we do have a seasonal component, estimate starting parameters
# for the seasonal lags using the residuals from the previous step
_ = SARIMAXSpecification(
endog, seasonal_order=seasonal_order,
enforce_stationarity=True,
enforce_invertibility=enforce_invertibility)
ar_order = np.array(spec.seasonal_ar_lags) * spec.seasonal_periods
ma_order = np.array(spec.seasonal_ma_lags) * spec.seasonal_periods
seasonal_hr, seasonal_hr_results = hannan_rissanen(
hr_results.resid, ar_order=ar_order, ma_order=ma_order,
demean=False)
# Set the starting parameters
sp.ar_params = hr.ar_params
sp.ma_params = hr.ma_params
sp.seasonal_ar_params = seasonal_hr.ar_params
sp.seasonal_ma_params = seasonal_hr.ma_params
sp.sigma2 = seasonal_hr.sigma2
# Then, require starting parameters to be stationary and invertible
if not sp.is_stationary:
sp.ar_params = [0] * sp.k_ar_params
sp.seasonal_ar_params = [0] * sp.k_seasonal_ar_params
if not sp.is_invertible and spec.enforce_invertibility:
sp.ma_params = [0] * sp.k_ma_params
sp.seasonal_ma_params = [0] * sp.k_seasonal_ma_params
start_params = sp.params
else:
sp = SARIMAXParams(spec=spec)
sp.params = start_params
if not sp.is_stationary:
raise ValueError('Given starting parameters imply a non-stationary'
' AR process. Innovations algorithm requires a'
' stationary process.')
if spec.enforce_invertibility and not sp.is_invertible:
raise ValueError('Given starting parameters imply a non-invertible'
' MA process with `enforce_invertibility=True`.')
def obj(params):
p.params = spec.constrain_params(params)
return -arma_innovations.arma_loglike(
endog, ar_params=-p.reduced_ar_poly.coef[1:],
ma_params=p.reduced_ma_poly.coef[1:], sigma2=p.sigma2)
# Untransform the starting parameters
unconstrained_start_params = spec.unconstrain_params(start_params)
# Perform the minimization
if minimize_kwargs is None:
minimize_kwargs = {}
if 'options' not in minimize_kwargs:
minimize_kwargs['options'] = {}
minimize_kwargs['options'].setdefault('maxiter', 100)
minimize_results = minimize(obj, unconstrained_start_params,
**minimize_kwargs)
# TODO: show warning if convergence failed.
# Reverse the transformation to get the optimal parameters
p.params = spec.constrain_params(minimize_results.x)
# Construct other results
other_results = Bunch({
'spec': spec,
'minimize_results': minimize_results,
'minimize_kwargs': minimize_kwargs,
'start_params': start_params
})
return p, other_results
| bsd-3-clause | 78b88de84be26c0d749be75847710081 | 37.50996 | 79 | 0.645251 | 3.897581 | false | false | false | false |
statsmodels/statsmodels | statsmodels/miscmodels/tests/results/results_ordinal_model.py | 4 | 5453 | """
Test Results for ordinal models from R MASS lib
"""
import numpy as np
import os
import pandas as pd
from statsmodels.tools.testing import Holder
# R (v3.4.4) code inspired from
# https://stats.idre.ucla.edu/r/dae/ordinal-logistic-regression/
# library(readr) # to open the file
# library(MASS) # to perform ordinal regression
#
# ## load the data, 400 rows with 3 exogs(2 binaries, 1 float)
# ##and target 3-ordinal variable
# ologit_ucla <- read_csv("ologit_ucla.csv")
# ologit_ucla$apply <- as.factor(ologit_ucla$apply)
# ologit_ucla$apply <- factor(ologit_ucla$apply,
# levels=c("unlikely", "somewhat likely", "very likely"))
#
# ## fit ordered logit model
# r_logit <- polr(apply ~ pared + public + gpa,
# data = ologit_ucla,
# method = 'logit', # or 'probit'
# Hess=TRUE)
#
# ## fit ordered probit model
# r_probit <- polr(apply ~ pared + public + gpa,
# data = ologit_ucla,
# method = 'probit',
# Hess=TRUE)
#
# ## fit ordered cloglog model
# r_cloglog <- polr(apply ~ pared + public + gpa,
# data = ologit_ucla,
# method = 'cloglog',
# Hess=TRUE)
#
# ## with r = r_logit or r_probit or r_cloglog
# ## we add p-values
# (ctable <- coef(summary(r)))
# p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
# (ctable <- cbind(ctable, "p value" = p))
# ## show 7 first predictions
# head(predict(r, subset(ologit_ucla,
# select=c("pared", "public","gpa")), type='prob'),7)
data_store = Holder()
cur_dir = os.path.dirname(os.path.abspath(__file__))
df = pd.read_csv(os.path.join(cur_dir, "ologit_ucla.csv"))
# df_unordered['apply'] is pd.Categorical with ordered = False
df_unordered = df.copy()
df_unordered['apply'] = pd.Categorical(df['apply'], ordered=False)
# but categories are set in order
df_unordered['apply'] = df_unordered['apply'].cat.set_categories(
['unlikely', 'somewhat likely', 'very likely'])
# df['apply'] is pd.Categorical with ordered = True
df['apply'] = pd.Categorical(df['apply'], ordered=True)
df['apply'] = df['apply'].cat.set_categories(
['unlikely', 'somewhat likely', 'very likely'])
data_store.df_unordered = df_unordered
data_store.df = df
data_store.nobs = 400
data_store.n_ordinal_cat = 3
res_ord_logit = Holder()
res_ord_logit.coefficients_val = \
np.array([1.04769011, -0.05878572, 0.61594057])
res_ord_logit.coefficients_stdE = np.array([0.2658, 0.2979, 0.2606])
res_ord_logit.coefficients_tval = np.array([3.9418, -0.1974, 2.3632])
res_ord_logit.coefficients_pval = \
np.array([8.087070e-05, 8.435464e-01, 1.811594e-02])
res_ord_logit.thresholds = np.array([2.203915, 4.299363])
res_ord_logit.prob_pred = np.array([[0.5488310, 0.3593310, 0.09183798],
[0.3055632, 0.4759496, 0.21848725],
[0.2293835, 0.4781951, 0.29242138],
[0.6161224, 0.3126888, 0.07118879],
[0.6560149, 0.2833901, 0.06059505],
[0.6609240, 0.2797117, 0.05936431],
[0.6518332, 0.2865114, 0.06165547]])
res_ord_logit.resid_prob = np.array(
[+0.90816202, 0.08707593, -0.77061649, 0.54493358, 0.59541984,
-0.33907603, 0.59017771, 0.55970937, -0.41190566, 0.01851403,
-0.73753054, 0.53932241, 0.87609730, 0.56880356, -0.73936739,
-0.42539653, -0.47329831, -0.29581150, 0.90792753, 0.42811409])
res_ord_logit.resid_prob_stats = [
3.5137670974297e-06, -0.7706164931682951, 0.9434781714439548,
0.2550630116905416]
res_ord_probit = Holder()
res_ord_probit.coefficients_val = np.array([0.59811, 0.01016, 0.35815])
res_ord_probit.coefficients_stdE = np.array([0.1579, 0.1728, 0.1568])
res_ord_probit.coefficients_tval = np.array([3.78881, 0.05878, 2.28479])
res_ord_probit.coefficients_pval = \
np.array([1.513681e-04, 9.531256e-01, 2.232519e-02])
res_ord_probit.thresholds = np.array([1.2968, 2.5028])
res_ord_probit.prob_pred = np.array([[0.5514181, 0.3576848, 0.09089707],
[0.3260107, 0.4488799, 0.22510933],
[0.2349733, 0.4506351, 0.31439162],
[0.6142501, 0.3184778, 0.06727214],
[0.6519891, 0.2928449, 0.05516602],
[0.6402204, 0.3009945, 0.05878509],
[0.6480094, 0.2956162, 0.05637442]])
res_ord_cloglog = Holder()
res_ord_cloglog.coefficients_val = np.array([0.5166455, 0.1081131, 0.3343895])
res_ord_cloglog.coefficients_stdE = np.array([0.1613525, 0.1680675, 0.1542065])
res_ord_cloglog.coefficients_tval = np.array([3.2019668, 0.6432721, 2.1684534])
res_ord_cloglog.coefficients_pval = \
np.array([1.364927e-03, 5.200475e-01, 3.012421e-02])
res_ord_cloglog.thresholds = np.array([0.8705304, 1.9744660])
res_ord_cloglog.prob_pred = np.array([[0.5519526, 0.3592524, 0.08879500],
[0.3855287, 0.3842645, 0.23020682],
[0.2899487, 0.3540202, 0.35603111],
[0.6067184, 0.3333548, 0.05992678],
[0.6411418, 0.3133969, 0.04546127],
[0.5940557, 0.3400072, 0.06593710],
[0.6374521, 0.3156622, 0.04688570]])
| bsd-3-clause | 267fc1198190f810fdc96ca380314aa6 | 43.696721 | 79 | 0.591234 | 2.67173 | false | false | false | false |
statsmodels/statsmodels | statsmodels/genmod/families/tests/test_link.py | 3 | 6190 | """
Test functions for genmod.families.links
"""
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_array_less
from scipy import stats
import pytest
import statsmodels.genmod.families as families
from statsmodels.tools import numdiff as nd
# Family instances
links = families.links
logit = links.Logit()
inverse_power = links.inverse_power()
sqrt = links.sqrt()
inverse_squared = links.inverse_squared()
identity = links.identity()
log = links.log()
probit = links.probit()
cauchy = links.cauchy()
cloglog = links.CLogLog()
loglog = links.LogLog()
negbinom = links.NegativeBinomial()
# TODO: parametrize all these tess
Links = [logit, inverse_power, sqrt, inverse_squared, identity,
log, probit, cauchy, cloglog, loglog, negbinom]
# links with defined second derivative of inverse link.
LinksISD = [inverse_power, sqrt, inverse_squared, identity,
cauchy, probit, loglog]
def get_domainvalue(link):
"""
Get a value in the domain for a given family.
"""
z = -np.log(np.random.uniform(0, 1))
if isinstance(link, links.CLogLog): # prone to overflow
z = min(z, 3)
elif isinstance(link, links.LogLog):
z = max(z, -3)
elif isinstance(link, links.NegativeBinomial):
# domain is negative numbers
z = -z
return z
def test_inverse():
# Logic check that link.inverse(link) and link(link.inverse)
# are the identity.
np.random.seed(3285)
for link in Links:
for k in range(10):
p = np.random.uniform(0, 1) # In domain for all families
d = link.inverse(link(p))
assert_allclose(d, p, atol=1e-8, err_msg=str(link))
z = get_domainvalue(link)
d = link(link.inverse(z))
assert_allclose(d, z, atol=1e-8, err_msg=str(link))
def test_deriv():
# Check link function derivatives using numeric differentiation.
np.random.seed(24235)
for link in Links:
for k in range(10):
p = np.random.uniform(0, 1)
d = link.deriv(p)
da = nd.approx_fprime(np.r_[p], link)
assert_allclose(d, da, rtol=1e-6, atol=1e-6,
err_msg=str(link))
if not isinstance(link, (type(inverse_power),
type(inverse_squared))):
# check monotonically increasing
assert_array_less(-d, 0)
def test_deriv2():
# Check link function second derivatives using numeric differentiation.
np.random.seed(24235)
for link in Links:
for k in range(10):
p = np.random.uniform(0, 1)
p = np.clip(p, 0.01, 0.99)
if isinstance(link, links.cauchy):
p = np.clip(p, 0.03, 0.97)
d = link.deriv2(p)
da = nd.approx_fprime(np.r_[p], link.deriv)
assert_allclose(d, da, rtol=5e-6, atol=1e-6,
err_msg=str(link))
def test_inverse_deriv():
# Logic check that inverse_deriv equals 1/link.deriv(link.inverse)
np.random.seed(24235)
for link in Links:
for k in range(10):
z = -np.log(np.random.uniform()) # In domain for all families
d = link.inverse_deriv(z)
f = 1 / link.deriv(link.inverse(z))
assert_allclose(d, f, rtol=1e-8, atol=1e-10,
err_msg=str(link))
def test_inverse_deriv2():
# Check second derivative of inverse link using numeric differentiation.
np.random.seed(24235)
for link in LinksISD:
for k in range(10):
z = get_domainvalue(link)
d2 = link.inverse_deriv2(z)
d2a = nd.approx_fprime(np.r_[z], link.inverse_deriv)
assert_allclose(d2, d2a, rtol=5e-6, atol=1e-6,
err_msg=str(link))
def test_invlogit_stability():
z = [1123.4910007309222, 1483.952316802719, 1344.86033748641,
706.339159002542, 1167.9986375146532, 663.8345826933115,
1496.3691686913917, 1563.0763842182257, 1587.4309332296314,
697.1173174974248, 1333.7256198289665, 1388.7667560586933,
819.7605431778434, 1479.9204150555015, 1078.5642245164856,
480.10338454985896, 1112.691659145772, 534.1061908007274,
918.2011296406588, 1280.8808515887802, 758.3890788775948,
673.503699841035, 1556.7043357878208, 819.5269028006679,
1262.5711060356423, 1098.7271535253608, 1482.811928490097,
796.198809756532, 893.7946963941745, 470.3304989319786,
1427.77079226037, 1365.2050226373822, 1492.4193201661922,
871.9922191949931, 768.4735925445908, 732.9222777654679,
812.2382651982667, 495.06449978924525]
zinv = logit.inverse(z)
assert_equal(zinv, np.ones_like(z))
class MyCLogLog(links.Link):
def __call__(self, p):
# p = self._clean(p)
return np.log(-np.log(1 - p))
def inverse(self, z):
return 1 - np.exp(-np.exp(z))
def deriv(self, p):
# p = self._clean(p)
return 1. / ((p - 1) * (np.log(1 - p)))
class CasesCDFLink():
# just as namespace to hold cases for test_cdflink
link_pairs = [
(links.CDFLink(dbn=stats.gumbel_l), links.cloglog()),
(links.CDFLink(dbn=stats.gumbel_r), links.loglog()),
(links.CDFLink(dbn=stats.norm), links.probit()),
(links.CDFLink(dbn=stats.logistic), links.logit()),
(links.CDFLink(dbn=stats.t(1)), links.cauchy()),
# approximation of t by normal is not good enough for rtol, atol
# (links.CDFLink(dbn=stats.t(1000000)), links.probit()),
(MyCLogLog(), links.cloglog()), # not a cdflink, but compares
]
methods = ['__call__', 'deriv', 'inverse', 'inverse_deriv', 'deriv2',
'inverse_deriv2']
p = np.linspace(0, 1, 6)
eps = 1e-3
p = np.clip(p, eps, 1 - eps)
@pytest.mark.parametrize("m", CasesCDFLink.methods)
@pytest.mark.parametrize("link1, link2", CasesCDFLink.link_pairs)
def test_cdflink(m, link1, link2):
p = CasesCDFLink.p
res1 = getattr(link1, m)(p)
res2 = getattr(link2, m)(p)
assert_allclose(res1, res2, atol=1e-8, rtol=1e-8)
| bsd-3-clause | 6d43977543c8bcb2d23c9b24742f2ed2 | 31.408377 | 76 | 0.611955 | 3.035802 | false | true | false | false |
statsmodels/statsmodels | statsmodels/sandbox/examples/example_sysreg.py | 3 | 8058 | """Example: statsmodels.sandbox.sysreg
"""
#TODO: this is going to change significantly once we have a panel data structure
from statsmodels.compat.python import asbytes, lmap
import numpy as np
import statsmodels.api as sm
from statsmodels.sandbox.regression.gmm import IV2SLS
from statsmodels.sandbox.sysreg import SUR, Sem2SLS
#for Python 3 compatibility
# Seemingly Unrelated Regressions (SUR) Model
# This example uses the subset of the Grunfeld data in Greene's Econometric
# Analysis Chapter 14 (5th Edition)
grun_data = sm.datasets.grunfeld.load()
firms = ['General Motors', 'Chrysler', 'General Electric', 'Westinghouse',
'US Steel']
#for Python 3 compatibility
firms = lmap(asbytes, firms)
grun_exog = grun_data.exog
grun_endog = grun_data.endog
# Right now takes SUR takes a list of arrays
# The array alternates between the LHS of an equation and RHS side of an
# equation
# This is very likely to change
grun_sys = []
for i in firms:
index = grun_exog['firm'] == i
grun_sys.append(grun_endog[index])
exog = grun_exog[index][['value','capital']].view(float).reshape(-1,2)
exog = sm.add_constant(exog, prepend=True)
grun_sys.append(exog)
# Note that the results in Greene (5th edition) uses a slightly different
# version of the Grunfeld data. To reproduce Table 14.1 the following changes
# are necessary.
grun_sys[-2][5] = 261.6
grun_sys[-2][-3] = 645.2
grun_sys[-1][11,2] = 232.6
grun_mod = SUR(grun_sys)
grun_res = grun_mod.fit()
print("Results for the 2-step GLS")
print("Compare to Greene Table 14.1, 5th edition")
print(grun_res.params)
# or you can do an iterative fit
# you have to define a new model though this will be fixed
# TODO: note the above
print("Results for iterative GLS (equivalent to MLE)")
print("Compare to Greene Table 14.3")
#TODO: these are slightly off, could be a convergence issue
# or might use a different default DOF correction?
grun_imod = SUR(grun_sys)
grun_ires = grun_imod.fit(igls=True)
print(grun_ires.params)
# Two-Stage Least Squares for Simultaneous Equations
#TODO: we are going to need *some kind* of formula framework
# This follows the simple macroeconomic model given in
# Greene Example 15.1 (5th Edition)
# The data however is from statsmodels and is not the same as
# Greene's
# The model is
# consumption: c_{t} = \alpha_{0} + \alpha_{1}y_{t} + \alpha_{2}c_{t-1} + \epsilon_{t1}
# investment: i_{t} = \beta_{0} + \beta_{1}r_{t} + \beta_{2}\left(y_{t}-y_{t-1}\right) + \epsilon_{t2}
# demand: y_{t} = c_{t} + I_{t} + g_{t}
# See Greene's Econometric Analysis for more information
# Load the data
macrodata = sm.datasets.macrodata.load().data
# Not needed, but make sure the data is sorted
macrodata = np.sort(macrodata, order=['year','quarter'])
# Impose the demand restriction
y = macrodata['realcons'] + macrodata['realinv'] + macrodata['realgovt']
# Build the system
macro_sys = []
# First equation LHS
macro_sys.append(macrodata['realcons'][1:]) # leave off first date
# First equation RHS
exog1 = np.column_stack((y[1:],macrodata['realcons'][:-1]))
#TODO: it might be nice to have "lag" and "lead" functions
exog1 = sm.add_constant(exog1, prepend=True)
macro_sys.append(exog1)
# Second equation LHS
macro_sys.append(macrodata['realinv'][1:])
# Second equation RHS
exog2 = np.column_stack((macrodata['tbilrate'][1:], np.diff(y)))
exog2 = sm.add_constant(exog2, prepend=True)
macro_sys.append(exog2)
# We need to say that y_{t} in the RHS of equation 1 is an endogenous regressor
# We will call these independent endogenous variables
# Right now, we use a dictionary to declare these
indep_endog = {0 : [1]}
# We also need to create a design of our instruments
# This will be done automatically in the future
instruments = np.column_stack((macrodata[['realgovt',
'tbilrate']][1:].view(float).reshape(-1,2),macrodata['realcons'][:-1],
y[:-1]))
instruments = sm.add_constant(instruments, prepend=True)
macro_mod = Sem2SLS(macro_sys, indep_endog=indep_endog, instruments=instruments)
# Right now this only returns parameters
macro_params = macro_mod.fit()
print("The parameters for the first equation are correct.")
print("The parameters for the second equation are not.")
print(macro_params)
#TODO: Note that the above is incorrect, because we have no way of telling the
# model that *part* of the y_{t} - y_{t-1} is an independent endogenous variable
# To correct for this we would have to do the following
y_instrumented = macro_mod.wexog[0][:,1]
whitened_ydiff = y_instrumented - y[:-1]
wexog = np.column_stack((macrodata['tbilrate'][1:],whitened_ydiff))
wexog = sm.add_constant(wexog, prepend=True)
correct_params = sm.GLS(macrodata['realinv'][1:], wexog).fit().params
print("If we correctly instrument everything, then these are the parameters")
print("for the second equation")
print(correct_params)
print("Compare to output of R script statsmodels/sandbox/tests/macrodata.s")
print('\nUsing IV2SLS')
miv = IV2SLS(macro_sys[0], macro_sys[1], instruments)
resiv = miv.fit()
print("equation 1")
print(resiv.params)
miv2 = IV2SLS(macro_sys[2], macro_sys[3], instruments)
resiv2 = miv2.fit()
print("equation 2")
print(resiv2.params)
### Below is the same example using Greene's data ###
run_greene = 0
if run_greene:
try:
data3 = np.genfromtxt('/home/skipper/school/MetricsII/Greene \
TableF5-1.txt', names=True)
except:
raise ValueError("Based on Greene TableF5-1. You should download it "
"from his web site and edit this script accordingly.")
# Example 15.1 in Greene 5th Edition
# c_t = constant + y_t + c_t-1
# i_t = constant + r_t + (y_t - y_t-1)
# y_t = c_t + i_t + g_t
sys3 = []
sys3.append(data3['realcons'][1:]) # have to leave off a beg. date
# impose 3rd equation on y
y = data3['realcons'] + data3['realinvs'] + data3['realgovt']
exog1 = np.column_stack((y[1:],data3['realcons'][:-1]))
exog1 = sm.add_constant(exog1, prepend=False)
sys3.append(exog1)
sys3.append(data3['realinvs'][1:])
exog2 = np.column_stack((data3['tbilrate'][1:],
np.diff(y)))
# realint is missing 1st observation
exog2 = sm.add_constant(exog2, prepend=False)
sys3.append(exog2)
indep_endog = {0 : [0]} # need to be able to say that y_1 is an instrument..
instruments = np.column_stack((data3[['realgovt',
'tbilrate']][1:].view(float).reshape(-1,2),data3['realcons'][:-1],
y[:-1]))
instruments = sm.add_constant(instruments, prepend=False)
sem_mod = Sem2SLS(sys3, indep_endog = indep_endog, instruments=instruments)
sem_params = sem_mod.fit() # first equation is right, but not second?
# should y_t in the diff be instrumented?
# how would R know this in the script?
# well, let's check...
y_instr = sem_mod.wexog[0][:,0]
wyd = y_instr - y[:-1]
wexog = np.column_stack((data3['tbilrate'][1:],wyd))
wexog = sm.add_constant(wexog, prepend=False)
params = sm.GLS(data3['realinvs'][1:], wexog).fit().params
print("These are the simultaneous equation estimates for Greene's \
example 13-1 (Also application 13-1 in 6th edition.")
print(sem_params)
print("The first set of parameters is correct. The second set is not.")
print("Compare to the solution manual at \
http://pages.stern.nyu.edu/~wgreene/Text/econometricanalysis.htm")
print("The reason is the restriction on (y_t - y_1)")
print("Compare to R script GreeneEx15_1.s")
print("Somehow R carries y.1 in yd to know that it needs to be \
instrumented")
print("If we replace our estimate with the instrumented one")
print(params)
print("We get the right estimate")
print("Without a formula framework we have to be able to do restrictions.")
# yep!, but how in the world does R know this when we just fed it yd??
# must be implicit in the formula framework...
# we are going to need to keep the two equations separate and use
# a restrictions matrix. Ugh, is a formula framework really, necessary to get
# around this?
| bsd-3-clause | 5d993fd49a53f57eb5e9a746fd612c32 | 37.555024 | 102 | 0.69521 | 3.06155 | false | false | false | false |
statsmodels/statsmodels | statsmodels/tsa/arima/estimators/statespace.py | 6 | 4950 | """
State space approach to estimating SARIMAX models.
Author: Chad Fulton
License: BSD-3
"""
import numpy as np
from statsmodels.tools.tools import add_constant, Bunch
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.arima.specification import SARIMAXSpecification
from statsmodels.tsa.arima.params import SARIMAXParams
def statespace(endog, exog=None, order=(0, 0, 0),
seasonal_order=(0, 0, 0, 0), include_constant=True,
enforce_stationarity=True, enforce_invertibility=True,
concentrate_scale=False, start_params=None, fit_kwargs=None):
"""
Estimate SARIMAX parameters using state space methods.
Parameters
----------
endog : array_like
Input time series array.
order : tuple, optional
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters. Default is (0, 0, 0).
seasonal_order : tuple, optional
The (P,D,Q,s) order of the seasonal component of the model for the
AR parameters, differences, MA parameters, and periodicity. Default
is (0, 0, 0, 0).
include_constant : bool, optional
Whether to add a constant term in `exog` if it's not already there.
The estimate of the constant will then appear as one of the `exog`
parameters. If `exog` is None, then the constant will represent the
mean of the process.
enforce_stationarity : bool, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
enforce_invertibility : bool, optional
Whether or not to transform the MA parameters to enforce invertibility
in the moving average component of the model. Default is True.
concentrate_scale : bool, optional
Whether or not to concentrate the scale (variance of the error term)
out of the likelihood. This reduces the number of parameters estimated
by maximum likelihood by one.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization. The
AR polynomial must be stationary. If `enforce_invertibility=True` the
MA poylnomial must be invertible. If not provided, default starting
parameters are computed using the Hannan-Rissanen method.
fit_kwargs : dict, optional
Arguments to pass to the state space model's `fit` method.
Returns
-------
parameters : SARIMAXParams object
other_results : Bunch
Includes two components, `spec`, containing the `SARIMAXSpecification`
instance corresponding to the input arguments; and
`state_space_results`, corresponding to the results from the underlying
state space model and Kalman filter / smoother.
Notes
-----
The primary reference is [1]_.
References
----------
.. [1] Durbin, James, and Siem Jan Koopman. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
# Handle including the constant (need to do it now so that the constant
# parameter can be included in the specification as part of `exog`.)
if include_constant:
exog = np.ones_like(endog) if exog is None else add_constant(exog)
# Create the specification
spec = SARIMAXSpecification(
endog, exog=exog, order=order, seasonal_order=seasonal_order,
enforce_stationarity=enforce_stationarity,
enforce_invertibility=enforce_invertibility,
concentrate_scale=concentrate_scale)
endog = spec.endog
exog = spec.exog
p = SARIMAXParams(spec=spec)
# Check start parameters
if start_params is not None:
sp = SARIMAXParams(spec=spec)
sp.params = start_params
if spec.enforce_stationarity and not sp.is_stationary:
raise ValueError('Given starting parameters imply a non-stationary'
' AR process with `enforce_stationarity=True`.')
if spec.enforce_invertibility and not sp.is_invertible:
raise ValueError('Given starting parameters imply a non-invertible'
' MA process with `enforce_invertibility=True`.')
# Create and fit the state space model
mod = SARIMAX(endog, exog=exog, order=spec.order,
seasonal_order=spec.seasonal_order,
enforce_stationarity=spec.enforce_stationarity,
enforce_invertibility=spec.enforce_invertibility,
concentrate_scale=spec.concentrate_scale)
if fit_kwargs is None:
fit_kwargs = {}
fit_kwargs.setdefault('disp', 0)
res_ss = mod.fit(start_params=start_params, **fit_kwargs)
# Construct results
p.params = res_ss.params
res = Bunch({
'spec': spec,
'statespace_results': res_ss,
})
return p, res
| bsd-3-clause | 67d1b21ddabc84af931744a91b55cb78 | 39.57377 | 79 | 0.671919 | 4.044118 | false | false | false | false |
statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | 3 | 19776 | # -*- coding: utf-8 -*-
"""
This models contains the Kernels for Kernel smoothing.
Hopefully in the future they may be reused/extended for other kernel based
method
References:
----------
Pointwise Kernel Confidence Bounds
(smoothconf)
http://fedc.wiwi.hu-berlin.de/xplore/ebooks/html/anr/anrhtmlframe62.html
"""
# pylint: disable-msg=C0103
# pylint: disable-msg=W0142
# pylint: disable-msg=E1101
# pylint: disable-msg=E0611
from statsmodels.compat.python import lzip, lfilter
import numpy as np
import scipy.integrate
from scipy.special import factorial
from numpy import exp, multiply, square, divide, subtract, inf
class NdKernel:
"""Generic N-dimensial kernel
Parameters
----------
n : int
The number of series for kernel estimates
kernels : list
kernels
Can be constructed from either
a) a list of n kernels which will be treated as
indepent marginals on a gaussian copula (specified by H)
or b) a single univariate kernel which will be applied radially to the
mahalanobis distance defined by H.
In the case of the Gaussian these are both equivalent, and the second constructiong
is prefered.
"""
def __init__(self, n, kernels = None, H = None):
if kernels is None:
kernels = Gaussian()
self._kernels = kernels
self.weights = None
if H is None:
H = np.matrix( np.identity(n))
self._H = H
self._Hrootinv = np.linalg.cholesky( H.I )
def getH(self):
"""Getter for kernel bandwidth, H"""
return self._H
def setH(self, value):
"""Setter for kernel bandwidth, H"""
self._H = value
H = property(getH, setH, doc="Kernel bandwidth matrix")
def density(self, xs, x):
n = len(xs)
#xs = self.in_domain( xs, xs, x )[0]
if len(xs)>0: ## Need to do product of marginal distributions
#w = np.sum([self(self._Hrootinv * (xx-x).T ) for xx in xs])/n
#vectorized does not work:
if self.weights is not None:
w = np.mean(self((xs-x) * self._Hrootinv).T * self.weights)/sum(self.weights)
else:
w = np.mean(self((xs-x) * self._Hrootinv )) #transposed
#w = np.mean([self(xd) for xd in ((xs-x) * self._Hrootinv)] ) #transposed
return w
else:
return np.nan
def _kernweight(self, x ):
"""returns the kernel weight for the independent multivariate kernel"""
if isinstance( self._kernels, CustomKernel ):
## Radial case
#d = x.T * x
#x is matrix, 2d, element wise sqrt looks wrong
#d = np.sqrt( x.T * x )
x = np.asarray(x)
#d = np.sqrt( (x * x).sum(-1) )
d = (x * x).sum(-1)
return self._kernels( np.asarray(d) )
def __call__(self, x):
"""
This simply returns the value of the kernel function at x
Does the same as weight if the function is normalised
"""
return self._kernweight(x)
class CustomKernel:
"""
Generic 1D Kernel object.
Can be constructed by selecting a standard named Kernel,
or providing a lambda expression and domain.
The domain allows some algorithms to run faster for finite domain kernels.
"""
# MC: Not sure how this will look in the end - or even still exist.
# Main purpose of this is to allow custom kernels and to allow speed up
# from finite support.
def __init__(self, shape, h = 1.0, domain = None, norm = None):
"""
shape should be a function taking and returning numeric type.
For sanity it should always return positive or zero but this is not
enforced in case you want to do weird things. Bear in mind that the
statistical tests etc. may not be valid for non-positive kernels.
The bandwidth of the kernel is supplied as h.
You may specify a domain as a list of 2 values [min, max], in which case
kernel will be treated as zero outside these values. This will speed up
calculation.
You may also specify the normalisation constant for the supplied Kernel.
If you do this number will be stored and used as the normalisation
without calculation. It is recommended you do this if you know the
constant, to speed up calculation. In particular if the shape function
provided is already normalised you should provide norm = 1.0.
Warning: I think several calculations assume that the kernel is
normalized. No tests for non-normalized kernel.
"""
self._normconst = norm # a value or None, if None, then calculate
self.domain = domain
self.weights = None
if callable(shape):
self._shape = shape
else:
raise TypeError("shape must be a callable object/function")
self._h = h
self._L2Norm = None
self._kernel_var = None
self._normal_reference_constant = None
self._order = None
def geth(self):
"""Getter for kernel bandwidth, h"""
return self._h
def seth(self, value):
"""Setter for kernel bandwidth, h"""
self._h = value
h = property(geth, seth, doc="Kernel Bandwidth")
def in_domain(self, xs, ys, x):
"""
Returns the filtered (xs, ys) based on the Kernel domain centred on x
"""
# Disable black-list functions: filter used for speed instead of
# list-comprehension
# pylint: disable-msg=W0141
def isInDomain(xy):
"""Used for filter to check if point is in the domain"""
u = (xy[0]-x)/self.h
return np.all((u >= self.domain[0]) & (u <= self.domain[1]))
if self.domain is None:
return (xs, ys)
else:
filtered = lfilter(isInDomain, lzip(xs, ys))
if len(filtered) > 0:
xs, ys = lzip(*filtered)
return (xs, ys)
else:
return ([], [])
def density(self, xs, x):
"""Returns the kernel density estimate for point x based on x-values
xs
"""
xs = np.asarray(xs)
n = len(xs) # before in_domain?
if self.weights is not None:
xs, weights = self.in_domain( xs, self.weights, x )
else:
xs = self.in_domain( xs, xs, x )[0]
xs = np.asarray(xs)
#print 'len(xs)', len(xs), x
if xs.ndim == 1:
xs = xs[:,None]
if len(xs)>0:
h = self.h
if self.weights is not None:
w = 1 / h * np.sum(self((xs-x)/h).T * weights, axis=1)
else:
w = 1. / (h * n) * np.sum(self((xs-x)/h), axis=0)
return w
else:
return np.nan
def density_var(self, density, nobs):
"""approximate pointwise variance for kernel density
not verified
Parameters
----------
density : array_lie
pdf of the kernel density
nobs : int
number of observations used in the KDE estimation
Returns
-------
kde_var : ndarray
estimated variance of the density estimate
Notes
-----
This uses the asymptotic normal approximation to the distribution of
the density estimate.
"""
return np.asarray(density) * self.L2Norm / self.h / nobs
def density_confint(self, density, nobs, alpha=0.05):
"""approximate pointwise confidence interval for kernel density
The confidence interval is centered at the estimated density and
ignores the bias of the density estimate.
not verified
Parameters
----------
density : array_lie
pdf of the kernel density
nobs : int
number of observations used in the KDE estimation
Returns
-------
conf_int : ndarray
estimated confidence interval of the density estimate, lower bound
in first column and upper bound in second column
Notes
-----
This uses the asymptotic normal approximation to the distribution of
the density estimate. The lower bound can be negative for density
values close to zero.
"""
from scipy import stats
crit = stats.norm.isf(alpha / 2.)
density = np.asarray(density)
half_width = crit * np.sqrt(self.density_var(density, nobs))
conf_int = np.column_stack((density - half_width, density + half_width))
return conf_int
def smooth(self, xs, ys, x):
"""Returns the kernel smoothing estimate for point x based on x-values
xs and y-values ys.
Not expected to be called by the user.
"""
xs, ys = self.in_domain(xs, ys, x)
if len(xs)>0:
w = np.sum(self((xs-x)/self.h))
#TODO: change the below to broadcasting when shape is sorted
v = np.sum([yy*self((xx-x)/self.h) for xx, yy in zip(xs, ys)])
return v / w
else:
return np.nan
def smoothvar(self, xs, ys, x):
"""Returns the kernel smoothing estimate of the variance at point x.
"""
xs, ys = self.in_domain(xs, ys, x)
if len(xs) > 0:
fittedvals = np.array([self.smooth(xs, ys, xx) for xx in xs])
sqresid = square( subtract(ys, fittedvals) )
w = np.sum(self((xs-x)/self.h))
v = np.sum([rr*self((xx-x)/self.h) for xx, rr in zip(xs, sqresid)])
return v / w
else:
return np.nan
def smoothconf(self, xs, ys, x, alpha=0.05):
"""Returns the kernel smoothing estimate with confidence 1sigma bounds
"""
xs, ys = self.in_domain(xs, ys, x)
if len(xs) > 0:
fittedvals = np.array([self.smooth(xs, ys, xx) for xx in xs])
#fittedvals = self.smooth(xs, ys, x) # x or xs in Haerdle
sqresid = square(
subtract(ys, fittedvals)
)
w = np.sum(self((xs-x)/self.h))
#var = sqresid.sum() / (len(sqresid) - 0) # nonlocal var ? JP just trying
v = np.sum([rr*self((xx-x)/self.h) for xx, rr in zip(xs, sqresid)])
var = v / w
sd = np.sqrt(var)
K = self.L2Norm
yhat = self.smooth(xs, ys, x)
from scipy import stats
crit = stats.norm.isf(alpha / 2)
err = crit * sd * np.sqrt(K) / np.sqrt(w * self.h * self.norm_const)
return (yhat - err, yhat, yhat + err)
else:
return (np.nan, np.nan, np.nan)
@property
def L2Norm(self):
"""Returns the integral of the square of the kernal from -inf to inf"""
if self._L2Norm is None:
L2Func = lambda x: (self.norm_const*self._shape(x))**2
if self.domain is None:
self._L2Norm = scipy.integrate.quad(L2Func, -inf, inf)[0]
else:
self._L2Norm = scipy.integrate.quad(L2Func, self.domain[0],
self.domain[1])[0]
return self._L2Norm
@property
def norm_const(self):
"""
Normalising constant for kernel (integral from -inf to inf)
"""
if self._normconst is None:
if self.domain is None:
quadres = scipy.integrate.quad(self._shape, -inf, inf)
else:
quadres = scipy.integrate.quad(self._shape, self.domain[0],
self.domain[1])
self._normconst = 1.0/(quadres[0])
return self._normconst
@property
def kernel_var(self):
"""Returns the second moment of the kernel"""
if self._kernel_var is None:
func = lambda x: x**2 * self.norm_const * self._shape(x)
if self.domain is None:
self._kernel_var = scipy.integrate.quad(func, -inf, inf)[0]
else:
self._kernel_var = scipy.integrate.quad(func, self.domain[0],
self.domain[1])[0]
return self._kernel_var
def moments(self, n):
if n > 2:
msg = "Only first and second moment currently implemented"
raise NotImplementedError(msg)
if n == 1:
return 0
if n == 2:
return self.kernel_var
@property
def normal_reference_constant(self):
"""
Constant used for silverman normal reference asymtotic bandwidth
calculation.
C = 2((pi^(1/2)*(nu!)^3 R(k))/(2nu(2nu)!kap_nu(k)^2))^(1/(2nu+1))
nu = kernel order
kap_nu = nu'th moment of kernel
R = kernel roughness (square of L^2 norm)
Note: L2Norm property returns square of norm.
"""
nu = self._order
if not nu == 2:
msg = "Only implemented for second order kernels"
raise NotImplementedError(msg)
if self._normal_reference_constant is None:
C = np.pi**(.5) * factorial(nu)**3 * self.L2Norm
C /= (2 * nu * factorial(2 * nu) * self.moments(nu)**2)
C = 2*C**(1.0/(2*nu+1))
self._normal_reference_constant = C
return self._normal_reference_constant
def weight(self, x):
"""This returns the normalised weight at distance x"""
return self.norm_const*self._shape(x)
def __call__(self, x):
"""
This simply returns the value of the kernel function at x
Does the same as weight if the function is normalised
"""
return self._shape(x)
class Uniform(CustomKernel):
def __init__(self, h=1.0):
CustomKernel.__init__(self, shape=lambda x: 0.5 * np.ones(x.shape), h=h,
domain=[-1.0, 1.0], norm = 1.0)
self._L2Norm = 0.5
self._kernel_var = 1. / 3
self._order = 2
class Triangular(CustomKernel):
def __init__(self, h=1.0):
CustomKernel.__init__(self, shape=lambda x: 1 - abs(x), h=h,
domain=[-1.0, 1.0], norm = 1.0)
self._L2Norm = 2.0/3.0
self._kernel_var = 1. / 6
self._order = 2
class Epanechnikov(CustomKernel):
def __init__(self, h=1.0):
CustomKernel.__init__(self, shape=lambda x: 0.75*(1 - x*x), h=h,
domain=[-1.0, 1.0], norm = 1.0)
self._L2Norm = 0.6
self._kernel_var = 0.2
self._order = 2
class Biweight(CustomKernel):
def __init__(self, h=1.0):
CustomKernel.__init__(self, shape=lambda x: 0.9375*(1 - x*x)**2, h=h,
domain=[-1.0, 1.0], norm = 1.0)
self._L2Norm = 5.0/7.0
self._kernel_var = 1. / 7
self._order = 2
def smooth(self, xs, ys, x):
"""Returns the kernel smoothing estimate for point x based on x-values
xs and y-values ys.
Not expected to be called by the user.
Special implementation optimized for Biweight.
"""
xs, ys = self.in_domain(xs, ys, x)
if len(xs) > 0:
w = np.sum(square(subtract(1, square(divide(subtract(xs, x),
self.h)))))
v = np.sum(multiply(ys, square(subtract(1, square(divide(
subtract(xs, x), self.h))))))
return v / w
else:
return np.nan
def smoothvar(self, xs, ys, x):
"""
Returns the kernel smoothing estimate of the variance at point x.
"""
xs, ys = self.in_domain(xs, ys, x)
if len(xs) > 0:
fittedvals = np.array([self.smooth(xs, ys, xx) for xx in xs])
rs = square(subtract(ys, fittedvals))
w = np.sum(square(subtract(1.0, square(divide(subtract(xs, x),
self.h)))))
v = np.sum(multiply(rs, square(subtract(1, square(divide(
subtract(xs, x), self.h))))))
return v / w
else:
return np.nan
def smoothconf_(self, xs, ys, x):
"""Returns the kernel smoothing estimate with confidence 1sigma bounds
"""
xs, ys = self.in_domain(xs, ys, x)
if len(xs) > 0:
fittedvals = np.array([self.smooth(xs, ys, xx) for xx in xs])
rs = square(subtract(ys, fittedvals))
w = np.sum(square(subtract(1.0, square(divide(subtract(xs, x),
self.h)))))
v = np.sum(multiply(rs, square(subtract(1, square(divide(
subtract(xs, x), self.h))))))
var = v / w
sd = np.sqrt(var)
K = self.L2Norm
yhat = self.smooth(xs, ys, x)
err = sd * K / np.sqrt(0.9375 * w * self.h)
return (yhat - err, yhat, yhat + err)
else:
return (np.nan, np.nan, np.nan)
class Triweight(CustomKernel):
def __init__(self, h=1.0):
CustomKernel.__init__(self, shape=lambda x: 1.09375*(1 - x*x)**3, h=h,
domain=[-1.0, 1.0], norm = 1.0)
self._L2Norm = 350.0/429.0
self._kernel_var = 1. / 9
self._order = 2
class Gaussian(CustomKernel):
"""
Gaussian (Normal) Kernel
K(u) = 1 / (sqrt(2*pi)) exp(-0.5 u**2)
"""
def __init__(self, h=1.0):
CustomKernel.__init__(self, shape = lambda x: 0.3989422804014327 *
np.exp(-x**2/2.0), h = h, domain = None, norm = 1.0)
self._L2Norm = 1.0/(2.0*np.sqrt(np.pi))
self._kernel_var = 1.0
self._order = 2
def smooth(self, xs, ys, x):
"""Returns the kernel smoothing estimate for point x based on x-values
xs and y-values ys.
Not expected to be called by the user.
Special implementation optimized for Gaussian.
"""
w = np.sum(exp(multiply(square(divide(subtract(xs, x),
self.h)),-0.5)))
v = np.sum(multiply(ys, exp(multiply(square(divide(subtract(xs, x),
self.h)), -0.5))))
return v/w
class Cosine(CustomKernel):
"""
Cosine Kernel
K(u) = pi/4 cos(0.5 * pi * u) between -1.0 and 1.0
"""
def __init__(self, h=1.0):
CustomKernel.__init__(self, shape=lambda x: 0.78539816339744828 *
np.cos(np.pi/2.0 * x), h=h, domain=[-1.0, 1.0], norm = 1.0)
self._L2Norm = np.pi**2/16.0
self._kernel_var = 0.1894305308612978 # = 1 - 8 / np.pi**2
self._order = 2
class Cosine2(CustomKernel):
"""
Cosine2 Kernel
K(u) = 1 + cos(2 * pi * u) between -0.5 and 0.5
Note: this is the same Cosine kernel that Stata uses
"""
def __init__(self, h=1.0):
CustomKernel.__init__(self, shape=lambda x: 1 + np.cos(2.0 * np.pi * x)
, h=h, domain=[-0.5, 0.5], norm = 1.0)
self._L2Norm = 1.5
self._kernel_var = 0.03267274151216444 # = 1/12. - 0.5 / np.pi**2
self._order = 2
class Tricube(CustomKernel):
"""
Tricube Kernel
K(u) = 0.864197530864 * (1 - abs(x)**3)**3 between -1.0 and 1.0
"""
def __init__(self,h=1.0):
CustomKernel.__init__(self,shape=lambda x: 0.864197530864 * (1 - abs(x)**3)**3,
h=h, domain=[-1.0, 1.0], norm = 1.0)
self._L2Norm = 175.0/247.0
self._kernel_var = 35.0/243.0
self._order = 2
| bsd-3-clause | 496a1e070feb5ec6ae12eae841d9e478 | 33.214533 | 93 | 0.535042 | 3.729913 | false | false | false | false |
statsmodels/statsmodels | statsmodels/tsa/statespace/tests/test_pickle.py | 6 | 5110 | """
Tests for python wrapper of state space representation and filtering
Author: Chad Fulton
License: Simplified-BSD
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
"""
import pickle
import numpy as np
import pandas as pd
from numpy.testing import assert_equal, assert_allclose
import pytest
from statsmodels.tsa.statespace import sarimax
from statsmodels.tsa.statespace.kalman_filter import KalmanFilter
from statsmodels.tsa.statespace.representation import Representation
from statsmodels.tsa.statespace.structural import UnobservedComponents
from .results import results_kalman_filter
@pytest.fixture()
def data():
true = results_kalman_filter.uc_uni
data_ = pd.DataFrame(
true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP']
)
data_['lgdp'] = np.log(data_['GDP'])
return data_
def test_pickle_fit_sarimax(data):
# Fit an ARIMA(1,1,0) to log GDP
mod = sarimax.SARIMAX(data['lgdp'], order=(1, 1, 0))
pkl_mod = pickle.loads(pickle.dumps(mod))
res = mod.fit(disp=-1, full_output=True, method='newton')
pkl_res = pkl_mod.fit(disp=-1, full_output=True, method='newton')
assert_allclose(res.llf_obs, pkl_res.llf_obs)
assert_allclose(res.tvalues, pkl_res.tvalues)
assert_allclose(res.smoothed_state, pkl_res.smoothed_state)
assert_allclose(res.resid.values, pkl_res.resid.values)
assert_allclose(res.impulse_responses(10), res.impulse_responses(10))
def test_unobserved_components_pickle():
# Tests for missing data
nobs = 20
k_endog = 1
np.random.seed(1208)
endog = np.random.normal(size=(nobs, k_endog))
endog[:4, 0] = np.nan
exog2 = np.random.normal(size=(nobs, 2))
index = pd.date_range('1970-01-01', freq='QS', periods=nobs)
endog_pd = pd.DataFrame(endog, index=index)
exog2_pd = pd.DataFrame(exog2, index=index)
models = [
UnobservedComponents(endog, 'llevel', exog=exog2),
UnobservedComponents(endog_pd, 'llevel', exog=exog2_pd),
]
for mod in models:
# Smoke tests
pkl_mod = pickle.loads(pickle.dumps(mod))
assert_equal(mod.start_params, pkl_mod.start_params)
res = mod.fit(disp=False)
pkl_res = pkl_mod.fit(disp=False)
assert_allclose(res.llf_obs, pkl_res.llf_obs)
assert_allclose(res.tvalues, pkl_res.tvalues)
assert_allclose(res.smoothed_state, pkl_res.smoothed_state)
assert_allclose(res.resid, pkl_res.resid)
assert_allclose(res.impulse_responses(10), res.impulse_responses(10))
def test_kalman_filter_pickle(data):
# Construct the statespace representation
true = results_kalman_filter.uc_uni
k_states = 4
model = KalmanFilter(k_endog=1, k_states=k_states)
model.bind(data['lgdp'].values)
model.design[:, :, 0] = [1, 1, 0, 0]
model.transition[([0, 0, 1, 1, 2, 3],
[0, 3, 1, 2, 1, 3],
[0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
model.selection = np.eye(model.k_states)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
true['parameters']
)
model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
model.state_cov[
np.diag_indices(k_states) + (np.zeros(k_states, dtype=int),)] = [
sigma_v ** 2, sigma_e ** 2, 0, sigma_w ** 2
]
# Initialization
initial_state = np.zeros((k_states,))
initial_state_cov = np.eye(k_states) * 100
# Initialization: modification
initial_state_cov = np.dot(
np.dot(model.transition[:, :, 0], initial_state_cov),
model.transition[:, :, 0].T
)
model.initialize_known(initial_state, initial_state_cov)
pkl_mod = pickle.loads(pickle.dumps(model))
results = model.filter()
pkl_results = pkl_mod.filter()
assert_allclose(results.llf_obs[true['start']:].sum(),
pkl_results.llf_obs[true['start']:].sum())
assert_allclose(results.filtered_state[0][true['start']:],
pkl_results.filtered_state[0][true['start']:])
assert_allclose(results.filtered_state[1][true['start']:],
pkl_results.filtered_state[1][true['start']:])
assert_allclose(results.filtered_state[3][true['start']:],
pkl_results.filtered_state[3][true['start']:])
def test_representation_pickle():
nobs = 10
k_endog = 2
arr = np.arange(nobs * k_endog).reshape(k_endog, nobs) * 1.
endog = np.asfortranarray(arr)
mod = Representation(endog, k_states=2)
pkl_mod = pickle.loads(pickle.dumps(mod))
assert_equal(mod.nobs, pkl_mod.nobs)
assert_equal(mod.k_endog, pkl_mod.k_endog)
mod._initialize_representation()
pkl_mod._initialize_representation()
assert_equal(mod.design, pkl_mod.design)
assert_equal(mod.obs_intercept, pkl_mod.obs_intercept)
assert_equal(mod.initial_variance, pkl_mod.initial_variance)
| bsd-3-clause | 3d011abb4c6091f74e2ec4d944c02fa7 | 32.84106 | 77 | 0.647162 | 3.07646 | false | true | false | false |
statsmodels/statsmodels | statsmodels/examples/tsa/ar1cholesky.py | 6 | 1041 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 21 15:42:18 2010
Author: josef-pktd
"""
import numpy as np
from scipy import linalg
def tiny2zero(x, eps = 1e-15):
'''replace abs values smaller than eps by zero, makes copy
'''
mask = np.abs(x.copy()) < eps
x[mask] = 0
return x
nobs = 5
autocov = 0.8**np.arange(nobs)
#from statsmodels.tsa import arima_process as ap
#autocov = ap.arma_acf([1, -0.8, 0.2], [1])[:10]
autocov = np.array([ 3., 2., 1., 0.4, 0.12, 0.016, -0.0112,
0.016 , -0.0112 , -0.01216 , -0.007488 , -0.0035584])/3.
autocov = autocov[:nobs]
sigma = linalg.toeplitz(autocov)
sigmainv = linalg.inv(sigma)
c = linalg.cholesky(sigma, lower=True)
ci = linalg.cholesky(sigmainv, lower=True)
print(sigma)
print(tiny2zero(ci/ci.max()))
"this is the text book transformation"
print('coefficient for first observation', np.sqrt(1-autocov[1]**2))
ci2 = ci[::-1,::-1].T
print(tiny2zero(ci2/ci2.max()))
print(np.dot(ci/ci.max(), np.ones(nobs)))
print(np.dot(ci2/ci2.max(), np.ones(nobs)))
| bsd-3-clause | ce14f761da84d543c4c73b4bd1b362f7 | 23.785714 | 70 | 0.636888 | 2.502404 | false | false | false | false |
statsmodels/statsmodels | statsmodels/sandbox/distributions/extras.py | 3 | 38846 | '''Various extensions to distributions
* skew normal and skew t distribution by Azzalini, A. & Capitanio, A.
* Gram-Charlier expansion distribution (using 4 moments),
* distributions based on non-linear transformation
- Transf_gen
- ExpTransf_gen, LogTransf_gen
- TransfTwo_gen
(defines as examples: square, negative square and abs transformations)
- this versions are without __new__
* mnvormcdf, mvstdnormcdf : cdf, rectangular integral for multivariate normal
distribution
TODO:
* Where is Transf_gen for general monotonic transformation ? found and added it
* write some docstrings, some parts I do not remember
* add Box-Cox transformation, parametrized ?
this is only partially cleaned, still includes test examples as functions
main changes
* add transf_gen (2010-05-09)
* added separate example and tests (2010-05-09)
* collect transformation function into classes
Example
-------
>>> logtg = Transf_gen(stats.t, np.exp, np.log,
numargs = 1, a=0, name = 'lnnorm',
longname = 'Exp transformed normal',
extradoc = '\ndistribution of y = exp(x), with x standard normal'
'precision for moment andstats is not very high, 2-3 decimals')
>>> logtg.cdf(5, 6)
0.92067704211191848
>>> stats.t.cdf(np.log(5), 6)
0.92067704211191848
>>> logtg.pdf(5, 6)
0.021798547904239293
>>> stats.t.pdf(np.log(5), 6)
0.10899273954837908
>>> stats.t.pdf(np.log(5), 6)/5. #derivative
0.021798547909675815
Author: josef-pktd
License: BSD
'''
import numpy as np
from numpy import poly1d,sqrt, exp
import scipy
from scipy import stats, special
from scipy.stats import distributions
from statsmodels.stats.moment_helpers import mvsk2mc, mc2mvsk
try:
from scipy.stats._mvn import mvndst
except ImportError:
# Must be using SciPy <1.8.0 where this function was moved (it's not a
# public SciPy function, but we need it here)
from scipy.stats.mvn import mvndst
#note copied from distr_skewnorm_0.py
class SkewNorm_gen(distributions.rv_continuous):
'''univariate Skew-Normal distribution of Azzalini
class follows scipy.stats.distributions pattern
but with __init__
'''
def __init__(self):
#super(SkewNorm_gen,self).__init__(
distributions.rv_continuous.__init__(self,
name = 'Skew Normal distribution', shapes = 'alpha',
extradoc = ''' ''' )
def _argcheck(self, alpha):
return 1 #(alpha >= 0)
def _rvs(self, alpha):
# see http://azzalini.stat.unipd.it/SN/faq.html
delta = alpha/np.sqrt(1+alpha**2)
u0 = stats.norm.rvs(size=self._size)
u1 = delta*u0 + np.sqrt(1-delta**2)*stats.norm.rvs(size=self._size)
return np.where(u0>0, u1, -u1)
def _munp(self, n, alpha):
# use pdf integration with _mom0_sc if only _pdf is defined.
# default stats calculation uses ppf, which is much slower
return self._mom0_sc(n, alpha)
def _pdf(self,x,alpha):
# 2*normpdf(x)*normcdf(alpha*x)
return 2.0/np.sqrt(2*np.pi)*np.exp(-x**2/2.0) * special.ndtr(alpha*x)
def _stats_skip(self,x,alpha,moments='mvsk'):
#skip for now to force moment integration as check
pass
skewnorm = SkewNorm_gen()
# generated the same way as distributions in stats.distributions
class SkewNorm2_gen(distributions.rv_continuous):
'''univariate Skew-Normal distribution of Azzalini
class follows scipy.stats.distributions pattern
'''
def _argcheck(self, alpha):
return 1 #where(alpha>=0, 1, 0)
def _pdf(self,x,alpha):
# 2*normpdf(x)*normcdf(alpha*x
return 2.0/np.sqrt(2*np.pi)*np.exp(-x**2/2.0) * special.ndtr(alpha*x)
skewnorm2 = SkewNorm2_gen(name = 'Skew Normal distribution', shapes = 'alpha',
extradoc = ''' -inf < alpha < inf''')
class ACSkewT_gen(distributions.rv_continuous):
'''univariate Skew-T distribution of Azzalini
class follows scipy.stats.distributions pattern
but with __init__
'''
def __init__(self):
#super(SkewT_gen,self).__init__(
distributions.rv_continuous.__init__(self,
name = 'Skew T distribution', shapes = 'df, alpha',
extradoc = '''
Skewed T distribution by Azzalini, A. & Capitanio, A. (2003)_
the pdf is given by:
pdf(x) = 2.0 * t.pdf(x, df) * t.cdf(df+1, alpha*x*np.sqrt((1+df)/(x**2+df)))
with alpha >=0
Note: different from skewed t distribution by Hansen 1999
.._
Azzalini, A. & Capitanio, A. (2003), Distributions generated by perturbation of
symmetry with emphasis on a multivariate skew-t distribution,
appears in J.Roy.Statist.Soc, series B, vol.65, pp.367-389
''' )
def _argcheck(self, df, alpha):
return (alpha == alpha)*(df>0)
## def _arg_check(self, alpha):
## return np.where(alpha>=0, 0, 1)
## def _argcheck(self, alpha):
## return np.where(alpha>=0, 1, 0)
def _rvs(self, df, alpha):
# see http://azzalini.stat.unipd.it/SN/faq.html
#delta = alpha/np.sqrt(1+alpha**2)
V = stats.chi2.rvs(df, size=self._size)
z = skewnorm.rvs(alpha, size=self._size)
return z/np.sqrt(V/df)
def _munp(self, n, df, alpha):
# use pdf integration with _mom0_sc if only _pdf is defined.
# default stats calculation uses ppf
return self._mom0_sc(n, df, alpha)
def _pdf(self, x, df, alpha):
# 2*normpdf(x)*normcdf(alpha*x)
return 2.0*distributions.t._pdf(x, df) * special.stdtr(df+1, alpha*x*np.sqrt((1+df)/(x**2+df)))
##
##def mvsk2cm(*args):
## mu,sig,sk,kur = args
## # Get central moments
## cnt = [None]*4
## cnt[0] = mu
## cnt[1] = sig #*sig
## cnt[2] = sk * sig**1.5
## cnt[3] = (kur+3.0) * sig**2.0
## return cnt
##
##
##def mvsk2m(args):
## mc, mc2, skew, kurt = args#= self._stats(*args,**mdict)
## mnc = mc
## mnc2 = mc2 + mc*mc
## mc3 = skew*(mc2**1.5) # 3rd central moment
## mnc3 = mc3+3*mc*mc2+mc**3 # 3rd non-central moment
## mc4 = (kurt+3.0)*(mc2**2.0) # 4th central moment
## mnc4 = mc4+4*mc*mc3+6*mc*mc*mc2+mc**4
## return (mc, mc2, mc3, mc4), (mnc, mnc2, mnc3, mnc4)
##
##def mc2mvsk(args):
## mc, mc2, mc3, mc4 = args
## skew = mc3 / mc2**1.5
## kurt = mc4 / mc2**2.0 - 3.0
## return (mc, mc2, skew, kurt)
##
##def m2mc(args):
## mnc, mnc2, mnc3, mnc4 = args
## mc = mnc
## mc2 = mnc2 - mnc*mnc
## #mc3 = skew*(mc2**1.5) # 3rd central moment
## mc3 = mnc3 - (3*mc*mc2+mc**3) # 3rd central moment
## #mc4 = (kurt+3.0)*(mc2**2.0) # 4th central moment
## mc4 = mnc4 - (4*mc*mc3+6*mc*mc*mc2+mc**4)
## return (mc, mc2, mc3, mc4)
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None]*N
plist[0] = poly1d(1)
for n in range(1,N):
plist[n] = plist[n-1].deriv() - poly1d([1,0])*plist[n-1]
return plist
def pdf_moments_st(cnt):
"""Return the Gaussian expanded pdf function given the list of central
moments (first one is mean).
version of scipy.stats, any changes ?
the scipy.stats version has a bug and returns normal distribution
"""
N = len(cnt)
if N < 2:
raise ValueError("At least two moments must be given to "
"approximate the pdf.")
totp = poly1d(1)
sig = sqrt(cnt[1])
mu = cnt[0]
if N > 2:
Dvals = _hermnorm(N+1)
for k in range(3,N+1):
# Find Ck
Ck = 0.0
for n in range((k-3)/2):
m = k-2*n
if m % 2: # m is odd
momdiff = cnt[m-1]
else:
momdiff = cnt[m-1] - sig*sig*scipy.factorial2(m-1)
Ck += Dvals[k][m] / sig**m * momdiff
# Add to totp
raise SystemError
print(Dvals)
print(Ck)
totp = totp + Ck*Dvals[k]
def thisfunc(x):
xn = (x-mu)/sig
return totp(xn)*exp(-xn*xn/2.0)/sqrt(2*np.pi)/sig
return thisfunc, totp
def pdf_mvsk(mvsk):
"""Return the Gaussian expanded pdf function given the list of 1st, 2nd
moment and skew and Fisher (excess) kurtosis.
Parameters
----------
mvsk : list of mu, mc2, skew, kurt
distribution is matched to these four moments
Returns
-------
pdffunc : function
function that evaluates the pdf(x), where x is the non-standardized
random variable.
Notes
-----
Changed so it works only if four arguments are given. Uses explicit
formula, not loop.
This implements a Gram-Charlier expansion of the normal distribution
where the first 2 moments coincide with those of the normal distribution
but skew and kurtosis can deviate from it.
In the Gram-Charlier distribution it is possible that the density
becomes negative. This is the case when the deviation from the
normal distribution is too large.
References
----------
https://en.wikipedia.org/wiki/Edgeworth_series
Johnson N.L., S. Kotz, N. Balakrishnan: Continuous Univariate
Distributions, Volume 1, 2nd ed., p.30
"""
N = len(mvsk)
if N < 4:
raise ValueError("Four moments must be given to "
"approximate the pdf.")
mu, mc2, skew, kurt = mvsk
totp = poly1d(1)
sig = sqrt(mc2)
if N > 2:
Dvals = _hermnorm(N+1)
C3 = skew/6.0
C4 = kurt/24.0
# Note: Hermite polynomial for order 3 in _hermnorm is negative
# instead of positive
totp = totp - C3*Dvals[3] + C4*Dvals[4]
def pdffunc(x):
xn = (x-mu)/sig
return totp(xn)*np.exp(-xn*xn/2.0)/np.sqrt(2*np.pi)/sig
return pdffunc
def pdf_moments(cnt):
"""Return the Gaussian expanded pdf function given the list of central
moments (first one is mean).
Changed so it works only if four arguments are given. Uses explicit
formula, not loop.
Notes
-----
This implements a Gram-Charlier expansion of the normal distribution
where the first 2 moments coincide with those of the normal distribution
but skew and kurtosis can deviate from it.
In the Gram-Charlier distribution it is possible that the density
becomes negative. This is the case when the deviation from the
normal distribution is too large.
References
----------
https://en.wikipedia.org/wiki/Edgeworth_series
Johnson N.L., S. Kotz, N. Balakrishnan: Continuous Univariate
Distributions, Volume 1, 2nd ed., p.30
"""
N = len(cnt)
if N < 2:
raise ValueError("At least two moments must be given to "
"approximate the pdf.")
mc, mc2, mc3, mc4 = cnt
skew = mc3 / mc2**1.5
kurt = mc4 / mc2**2.0 - 3.0 # Fisher kurtosis, excess kurtosis
totp = poly1d(1)
sig = sqrt(cnt[1])
mu = cnt[0]
if N > 2:
Dvals = _hermnorm(N+1)
## for k in range(3,N+1):
## # Find Ck
## Ck = 0.0
## for n in range((k-3)/2):
## m = k-2*n
## if m % 2: # m is odd
## momdiff = cnt[m-1]
## else:
## momdiff = cnt[m-1] - sig*sig*scipy.factorial2(m-1)
## Ck += Dvals[k][m] / sig**m * momdiff
## # Add to totp
## raise
## print Dvals
## print Ck
## totp = totp + Ck*Dvals[k]
C3 = skew/6.0
C4 = kurt/24.0
totp = totp - C3*Dvals[3] + C4*Dvals[4]
def thisfunc(x):
xn = (x-mu)/sig
return totp(xn)*np.exp(-xn*xn/2.0)/np.sqrt(2*np.pi)/sig
return thisfunc
class NormExpan_gen(distributions.rv_continuous):
'''Gram-Charlier Expansion of Normal distribution
class follows scipy.stats.distributions pattern
but with __init__
'''
def __init__(self,args, **kwds):
#todo: replace with super call
distributions.rv_continuous.__init__(self,
name = 'Normal Expansion distribution', shapes = ' ',
extradoc = '''
The distribution is defined as the Gram-Charlier expansion of
the normal distribution using the first four moments. The pdf
is given by
pdf(x) = (1+ skew/6.0 * H(xc,3) + kurt/24.0 * H(xc,4))*normpdf(xc)
where xc = (x-mu)/sig is the standardized value of the random variable
and H(xc,3) and H(xc,4) are Hermite polynomials
Note: This distribution has to be parametrized during
initialization and instantiation, and does not have a shape
parameter after instantiation (similar to frozen distribution
except for location and scale.) Location and scale can be used
as with other distributions, however note, that they are relative
to the initialized distribution.
''' )
#print args, kwds
mode = kwds.get('mode', 'sample')
if mode == 'sample':
mu,sig,sk,kur = stats.describe(args)[2:]
self.mvsk = (mu,sig,sk,kur)
cnt = mvsk2mc((mu,sig,sk,kur))
elif mode == 'mvsk':
cnt = mvsk2mc(args)
self.mvsk = args
elif mode == 'centmom':
cnt = args
self.mvsk = mc2mvsk(cnt)
else:
raise ValueError("mode must be 'mvsk' or centmom")
self.cnt = cnt
#self.mvsk = (mu,sig,sk,kur)
#self._pdf = pdf_moments(cnt)
self._pdf = pdf_mvsk(self.mvsk)
def _munp(self,n):
# use pdf integration with _mom0_sc if only _pdf is defined.
# default stats calculation uses ppf
return self._mom0_sc(n)
def _stats_skip(self):
# skip for now to force numerical integration of pdf for testing
return self.mvsk
## copied from nonlinear_transform_gen.py
''' A class for the distribution of a non-linear monotonic transformation of a continuous random variable
simplest usage:
example: create log-gamma distribution, i.e. y = log(x),
where x is gamma distributed (also available in scipy.stats)
loggammaexpg = Transf_gen(stats.gamma, np.log, np.exp)
example: what is the distribution of the discount factor y=1/(1+x)
where interest rate x is normally distributed with N(mux,stdx**2)')?
(just to come up with a story that implies a nice transformation)
invnormalg = Transf_gen(stats.norm, inversew, inversew_inv, decr=True, a=-np.inf)
This class does not work well for distributions with difficult shapes,
e.g. 1/x where x is standard normal, because of the singularity and jump at zero.
Note: I'm working from my version of scipy.stats.distribution.
But this script runs under scipy 0.6.0 (checked with numpy: 1.2.0rc2 and python 2.4)
This is not yet thoroughly tested, polished or optimized
TODO:
* numargs handling is not yet working properly, numargs needs to be specified (default = 0 or 1)
* feeding args and kwargs to underlying distribution is untested and incomplete
* distinguish args and kwargs for the transformed and the underlying distribution
- currently all args and no kwargs are transmitted to underlying distribution
- loc and scale only work for transformed, but not for underlying distribution
- possible to separate args for transformation and underlying distribution parameters
* add _rvs as method, will be faster in many cases
Created on Tuesday, October 28, 2008, 12:40:37 PM
Author: josef-pktd
License: BSD
'''
def get_u_argskwargs(**kwargs):
#Todo: What's this? wrong spacing, used in Transf_gen TransfTwo_gen
u_kwargs = dict((k.replace('u_','',1),v) for k,v in kwargs.items()
if k.startswith('u_'))
u_args = u_kwargs.pop('u_args',None)
return u_args, u_kwargs
class Transf_gen(distributions.rv_continuous):
'''a class for non-linear monotonic transformation of a continuous random variable
'''
def __init__(self, kls, func, funcinv, *args, **kwargs):
#print args
#print kwargs
self.func = func
self.funcinv = funcinv
#explicit for self.__dict__.update(kwargs)
#need to set numargs because inspection does not work
self.numargs = kwargs.pop('numargs', 0)
#print self.numargs
name = kwargs.pop('name','transfdist')
longname = kwargs.pop('longname','Non-linear transformed distribution')
extradoc = kwargs.pop('extradoc',None)
a = kwargs.pop('a', -np.inf)
b = kwargs.pop('b', np.inf)
self.decr = kwargs.pop('decr', False)
#defines whether it is a decreasing (True)
# or increasing (False) monotonic transformation
self.u_args, self.u_kwargs = get_u_argskwargs(**kwargs)
self.kls = kls #(self.u_args, self.u_kwargs)
# possible to freeze the underlying distribution
super(Transf_gen,self).__init__(a=a, b=b, name = name,
longname = longname, extradoc = extradoc)
def _rvs(self, *args, **kwargs):
self.kls._size = self._size
return self.funcinv(self.kls._rvs(*args))
def _cdf(self,x,*args, **kwargs):
#print args
if not self.decr:
return self.kls._cdf(self.funcinv(x),*args, **kwargs)
#note scipy _cdf only take *args not *kwargs
else:
return 1.0 - self.kls._cdf(self.funcinv(x),*args, **kwargs)
def _ppf(self, q, *args, **kwargs):
if not self.decr:
return self.func(self.kls._ppf(q,*args, **kwargs))
else:
return self.func(self.kls._ppf(1-q,*args, **kwargs))
def inverse(x):
return np.divide(1.0,x)
mux, stdx = 0.05, 0.1
mux, stdx = 9.0, 1.0
def inversew(x):
return 1.0/(1+mux+x*stdx)
def inversew_inv(x):
return (1.0/x - 1.0 - mux)/stdx #.np.divide(1.0,x)-10
def identit(x):
return x
invdnormalg = Transf_gen(stats.norm, inversew, inversew_inv, decr=True, #a=-np.inf,
numargs = 0, name = 'discf', longname = 'normal-based discount factor',
extradoc = '\ndistribution of discount factor y=1/(1+x)) with x N(0.05,0.1**2)')
lognormalg = Transf_gen(stats.norm, np.exp, np.log,
numargs = 2, a=0, name = 'lnnorm',
longname = 'Exp transformed normal',
extradoc = '\ndistribution of y = exp(x), with x standard normal'
'precision for moment andstats is not very high, 2-3 decimals')
loggammaexpg = Transf_gen(stats.gamma, np.log, np.exp, numargs=1)
## copied form nonlinear_transform_short.py
'''univariate distribution of a non-linear monotonic transformation of a
random variable
'''
class ExpTransf_gen(distributions.rv_continuous):
'''Distribution based on log/exp transformation
the constructor can be called with a distribution class
and generates the distribution of the transformed random variable
'''
def __init__(self, kls, *args, **kwargs):
#print args
#print kwargs
#explicit for self.__dict__.update(kwargs)
if 'numargs' in kwargs:
self.numargs = kwargs['numargs']
else:
self.numargs = 1
if 'name' in kwargs:
name = kwargs['name']
else:
name = 'Log transformed distribution'
if 'a' in kwargs:
a = kwargs['a']
else:
a = 0
super(ExpTransf_gen,self).__init__(a=0, name = name)
self.kls = kls
def _cdf(self,x,*args):
pass
#print args
return self.kls.cdf(np.log(x),*args)
def _ppf(self, q, *args):
return np.exp(self.kls.ppf(q,*args))
class LogTransf_gen(distributions.rv_continuous):
'''Distribution based on log/exp transformation
the constructor can be called with a distribution class
and generates the distribution of the transformed random variable
'''
def __init__(self, kls, *args, **kwargs):
#explicit for self.__dict__.update(kwargs)
if 'numargs' in kwargs:
self.numargs = kwargs['numargs']
else:
self.numargs = 1
if 'name' in kwargs:
name = kwargs['name']
else:
name = 'Log transformed distribution'
if 'a' in kwargs:
a = kwargs['a']
else:
a = 0
super(LogTransf_gen,self).__init__(a=a, name = name)
self.kls = kls
def _cdf(self,x, *args):
#print args
return self.kls._cdf(np.exp(x),*args)
def _ppf(self, q, *args):
return np.log(self.kls._ppf(q,*args))
## copied from transformtwo.py
'''
Created on Apr 28, 2009
@author: Josef Perktold
'''
''' A class for the distribution of a non-linear u-shaped or hump shaped transformation of a
continuous random variable
This is a companion to the distributions of non-linear monotonic transformation to the case
when the inverse mapping is a 2-valued correspondence, for example for absolute value or square
simplest usage:
example: create squared distribution, i.e. y = x**2,
where x is normal or t distributed
This class does not work well for distributions with difficult shapes,
e.g. 1/x where x is standard normal, because of the singularity and jump at zero.
This verifies for normal - chi2, normal - halfnorm, foldnorm, and t - F
TODO:
* numargs handling is not yet working properly,
numargs needs to be specified (default = 0 or 1)
* feeding args and kwargs to underlying distribution works in t distribution example
* distinguish args and kwargs for the transformed and the underlying distribution
- currently all args and no kwargs are transmitted to underlying distribution
- loc and scale only work for transformed, but not for underlying distribution
- possible to separate args for transformation and underlying distribution parameters
* add _rvs as method, will be faster in many cases
'''
class TransfTwo_gen(distributions.rv_continuous):
'''Distribution based on a non-monotonic (u- or hump-shaped transformation)
the constructor can be called with a distribution class, and functions
that define the non-linear transformation.
and generates the distribution of the transformed random variable
Note: the transformation, it's inverse and derivatives need to be fully
specified: func, funcinvplus, funcinvminus, derivplus, derivminus.
Currently no numerical derivatives or inverse are calculated
This can be used to generate distribution instances similar to the
distributions in scipy.stats.
'''
#a class for non-linear non-monotonic transformation of a continuous random variable
def __init__(self, kls, func, funcinvplus, funcinvminus, derivplus,
derivminus, *args, **kwargs):
#print args
#print kwargs
self.func = func
self.funcinvplus = funcinvplus
self.funcinvminus = funcinvminus
self.derivplus = derivplus
self.derivminus = derivminus
#explicit for self.__dict__.update(kwargs)
#need to set numargs because inspection does not work
self.numargs = kwargs.pop('numargs', 0)
#print self.numargs
name = kwargs.pop('name','transfdist')
longname = kwargs.pop('longname','Non-linear transformed distribution')
extradoc = kwargs.pop('extradoc',None)
a = kwargs.pop('a', -np.inf) # attached to self in super
b = kwargs.pop('b', np.inf) # self.a, self.b would be overwritten
self.shape = kwargs.pop('shape', False)
#defines whether it is a `u` shaped or `hump' shaped
# transformation
self.u_args, self.u_kwargs = get_u_argskwargs(**kwargs)
self.kls = kls #(self.u_args, self.u_kwargs)
# possible to freeze the underlying distribution
super(TransfTwo_gen,self).__init__(a=a, b=b, name = name,
shapes = kls.shapes,
longname = longname, extradoc = extradoc)
# add enough info for self.freeze() to be able to reconstruct the instance
self._ctor_param.update(
dict(kls=kls, func=func, funcinvplus=funcinvplus,
funcinvminus=funcinvminus, derivplus=derivplus,
derivminus=derivminus, shape=self.shape)
)
def _rvs(self, *args):
self.kls._size = self._size #size attached to self, not function argument
return self.func(self.kls._rvs(*args))
def _pdf(self,x,*args, **kwargs):
#print args
if self.shape == 'u':
signpdf = 1
elif self.shape == 'hump':
signpdf = -1
else:
raise ValueError('shape can only be `u` or `hump`')
return signpdf * (self.derivplus(x)*self.kls._pdf(self.funcinvplus(x),*args, **kwargs) -
self.derivminus(x)*self.kls._pdf(self.funcinvminus(x),*args, **kwargs))
#note scipy _cdf only take *args not *kwargs
def _cdf(self,x,*args, **kwargs):
#print args
if self.shape == 'u':
return self.kls._cdf(self.funcinvplus(x),*args, **kwargs) - \
self.kls._cdf(self.funcinvminus(x),*args, **kwargs)
#note scipy _cdf only take *args not *kwargs
else:
return 1.0 - self._sf(x,*args, **kwargs)
def _sf(self,x,*args, **kwargs):
#print args
if self.shape == 'hump':
return self.kls._cdf(self.funcinvplus(x),*args, **kwargs) - \
self.kls._cdf(self.funcinvminus(x),*args, **kwargs)
#note scipy _cdf only take *args not *kwargs
else:
return 1.0 - self._cdf(x, *args, **kwargs)
def _munp(self, n,*args, **kwargs):
return self._mom0_sc(n,*args)
# ppf might not be possible in general case?
# should be possible in symmetric case
# def _ppf(self, q, *args, **kwargs):
# if self.shape == 'u':
# return self.func(self.kls._ppf(q,*args, **kwargs))
# elif self.shape == 'hump':
# return self.func(self.kls._ppf(1-q,*args, **kwargs))
#TODO: rename these functions to have unique names
class SquareFunc:
'''class to hold quadratic function with inverse function and derivative
using instance methods instead of class methods, if we want extension
to parametrized function
'''
def inverseplus(self, x):
return np.sqrt(x)
def inverseminus(self, x):
return 0.0 - np.sqrt(x)
def derivplus(self, x):
return 0.5/np.sqrt(x)
def derivminus(self, x):
return 0.0 - 0.5/np.sqrt(x)
def squarefunc(self, x):
return np.power(x,2)
sqfunc = SquareFunc()
squarenormalg = TransfTwo_gen(stats.norm, sqfunc.squarefunc, sqfunc.inverseplus,
sqfunc.inverseminus, sqfunc.derivplus, sqfunc.derivminus,
shape='u', a=0.0, b=np.inf,
numargs = 0, name = 'squarenorm', longname = 'squared normal distribution',
extradoc = '\ndistribution of the square of a normal random variable' +\
' y=x**2 with x N(0.0,1)')
#u_loc=l, u_scale=s)
squaretg = TransfTwo_gen(stats.t, sqfunc.squarefunc, sqfunc.inverseplus,
sqfunc.inverseminus, sqfunc.derivplus, sqfunc.derivminus,
shape='u', a=0.0, b=np.inf,
numargs = 1, name = 'squarenorm', longname = 'squared t distribution',
extradoc = '\ndistribution of the square of a t random variable' +\
' y=x**2 with x t(dof,0.0,1)')
def inverseplus(x):
return np.sqrt(-x)
def inverseminus(x):
return 0.0 - np.sqrt(-x)
def derivplus(x):
return 0.0 - 0.5/np.sqrt(-x)
def derivminus(x):
return 0.5/np.sqrt(-x)
def negsquarefunc(x):
return -np.power(x,2)
negsquarenormalg = TransfTwo_gen(stats.norm, negsquarefunc, inverseplus, inverseminus,
derivplus, derivminus, shape='hump', a=-np.inf, b=0.0,
numargs = 0, name = 'negsquarenorm', longname = 'negative squared normal distribution',
extradoc = '\ndistribution of the negative square of a normal random variable' +\
' y=-x**2 with x N(0.0,1)')
#u_loc=l, u_scale=s)
def inverseplus(x):
return x
def inverseminus(x):
return 0.0 - x
def derivplus(x):
return 1.0
def derivminus(x):
return 0.0 - 1.0
def absfunc(x):
return np.abs(x)
absnormalg = TransfTwo_gen(stats.norm, np.abs, inverseplus, inverseminus,
derivplus, derivminus, shape='u', a=0.0, b=np.inf,
numargs = 0, name = 'absnorm', longname = 'absolute of normal distribution',
extradoc = '\ndistribution of the absolute value of a normal random variable' +\
' y=abs(x) with x N(0,1)')
#copied from mvncdf.py
'''multivariate normal probabilities and cumulative distribution function
a wrapper for scipy.stats._mvn.mvndst
SUBROUTINE MVNDST( N, LOWER, UPPER, INFIN, CORREL, MAXPTS,
& ABSEPS, RELEPS, ERROR, VALUE, INFORM )
*
* A subroutine for computing multivariate normal probabilities.
* This subroutine uses an algorithm given in the paper
* "Numerical Computation of Multivariate Normal Probabilities", in
* J. of Computational and Graphical Stat., 1(1992), pp. 141-149, by
* Alan Genz
* Department of Mathematics
* Washington State University
* Pullman, WA 99164-3113
* Email : AlanGenz@wsu.edu
*
* Parameters
*
* N INTEGER, the number of variables.
* LOWER REAL, array of lower integration limits.
* UPPER REAL, array of upper integration limits.
* INFIN INTEGER, array of integration limits flags:
* if INFIN(I) < 0, Ith limits are (-infinity, infinity);
* if INFIN(I) = 0, Ith limits are (-infinity, UPPER(I)];
* if INFIN(I) = 1, Ith limits are [LOWER(I), infinity);
* if INFIN(I) = 2, Ith limits are [LOWER(I), UPPER(I)].
* CORREL REAL, array of correlation coefficients; the correlation
* coefficient in row I column J of the correlation matrix
* should be stored in CORREL( J + ((I-2)*(I-1))/2 ), for J < I.
* The correlation matrix must be positive semidefinite.
* MAXPTS INTEGER, maximum number of function values allowed. This
* parameter can be used to limit the time. A sensible
* strategy is to start with MAXPTS = 1000*N, and then
* increase MAXPTS if ERROR is too large.
* ABSEPS REAL absolute error tolerance.
* RELEPS REAL relative error tolerance.
* ERROR REAL estimated absolute error, with 99% confidence level.
* VALUE REAL estimated value for the integral
* INFORM INTEGER, termination status parameter:
* if INFORM = 0, normal completion with ERROR < EPS;
* if INFORM = 1, completion with ERROR > EPS and MAXPTS
* function vaules used; increase MAXPTS to
* decrease ERROR;
* if INFORM = 2, N > 500 or N < 1.
*
>>> mvndst([0.0,0.0],[10.0,10.0],[0,0],[0.5])
(2e-016, 1.0, 0)
>>> mvndst([0.0,0.0],[100.0,100.0],[0,0],[0.0])
(2e-016, 1.0, 0)
>>> mvndst([0.0,0.0],[1.0,1.0],[0,0],[0.0])
(2e-016, 0.70786098173714096, 0)
>>> mvndst([0.0,0.0],[0.001,1.0],[0,0],[0.0])
(2e-016, 0.42100802096993045, 0)
>>> mvndst([0.0,0.0],[0.001,10.0],[0,0],[0.0])
(2e-016, 0.50039894221391101, 0)
>>> mvndst([0.0,0.0],[0.001,100.0],[0,0],[0.0])
(2e-016, 0.50039894221391101, 0)
>>> mvndst([0.0,0.0],[0.01,100.0],[0,0],[0.0])
(2e-016, 0.5039893563146316, 0)
>>> mvndst([0.0,0.0],[0.1,100.0],[0,0],[0.0])
(2e-016, 0.53982783727702899, 0)
>>> mvndst([0.0,0.0],[0.1,100.0],[2,2],[0.0])
(2e-016, 0.019913918638514494, 0)
>>> mvndst([0.0,0.0],[0.0,0.0],[0,0],[0.0])
(2e-016, 0.25, 0)
>>> mvndst([0.0,0.0],[0.0,0.0],[-1,0],[0.0])
(2e-016, 0.5, 0)
>>> mvndst([0.0,0.0],[0.0,0.0],[-1,0],[0.5])
(2e-016, 0.5, 0)
>>> mvndst([0.0,0.0],[0.0,0.0],[0,0],[0.5])
(2e-016, 0.33333333333333337, 0)
>>> mvndst([0.0,0.0],[0.0,0.0],[0,0],[0.99])
(2e-016, 0.47747329317779391, 0)
'''
informcode = {0: 'normal completion with ERROR < EPS',
1: '''completion with ERROR > EPS and MAXPTS function values used;
increase MAXPTS to decrease ERROR;''',
2: 'N > 500 or N < 1'}
def mvstdnormcdf(lower, upper, corrcoef, **kwds):
'''standardized multivariate normal cumulative distribution function
This is a wrapper for scipy.stats._mvn.mvndst which calculates
a rectangular integral over a standardized multivariate normal
distribution.
This function assumes standardized scale, that is the variance in each dimension
is one, but correlation can be arbitrary, covariance = correlation matrix
Parameters
----------
lower, upper : array_like, 1d
lower and upper integration limits with length equal to the number
of dimensions of the multivariate normal distribution. It can contain
-np.inf or np.inf for open integration intervals
corrcoef : float or array_like
specifies correlation matrix in one of three ways, see notes
optional keyword parameters to influence integration
* maxpts : int, maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with `maxpts` = 1000*N, and then
increase `maxpts` if ERROR is too large.
* abseps : float absolute error tolerance.
* releps : float relative error tolerance.
Returns
-------
cdfvalue : float
value of the integral
Notes
-----
The correlation matrix corrcoef can be given in 3 different ways
If the multivariate normal is two-dimensional than only the
correlation coefficient needs to be provided.
For general dimension the correlation matrix can be provided either
as a one-dimensional array of the upper triangular correlation
coefficients stacked by rows, or as full square correlation matrix
See Also
--------
mvnormcdf : cdf of multivariate normal distribution without
standardization
Examples
--------
>>> print(mvstdnormcdf([-np.inf,-np.inf], [0.0,np.inf], 0.5))
0.5
>>> corr = [[1.0, 0, 0.5],[0,1,0],[0.5,0,1]]
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0], [0.0,0.0,0.0], corr, abseps=1e-6))
0.166666399198
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0],corr, abseps=1e-8))
something wrong completion with ERROR > EPS and MAXPTS function values used;
increase MAXPTS to decrease ERROR; 1.048330348e-006
0.166666546218
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0], corr, \
maxpts=100000, abseps=1e-8))
0.166666588293
'''
n = len(lower)
#do not know if converting to array is necessary,
#but it makes ndim check possible
lower = np.array(lower)
upper = np.array(upper)
corrcoef = np.array(corrcoef)
correl = np.zeros(int(n*(n-1)/2.0)) #dtype necessary?
if (lower.ndim != 1) or (upper.ndim != 1):
raise ValueError('can handle only 1D bounds')
if len(upper) != n:
raise ValueError('bounds have different lengths')
if n==2 and corrcoef.size==1:
correl = corrcoef
#print 'case scalar rho', n
elif corrcoef.ndim == 1 and len(corrcoef) == n*(n-1)/2.0:
#print 'case flat corr', corrcoeff.shape
correl = corrcoef
elif corrcoef.shape == (n,n):
#print 'case square corr', correl.shape
correl = corrcoef[np.tril_indices(n, -1)]
# for ii in range(n):
# for jj in range(ii):
# correl[ jj + ((ii-2)*(ii-1))/2] = corrcoef[ii,jj]
else:
raise ValueError('corrcoef has incorrect dimension')
if 'maxpts' not in kwds:
if n >2:
kwds['maxpts'] = 10000*n
lowinf = np.isneginf(lower)
uppinf = np.isposinf(upper)
infin = 2.0*np.ones(n)
np.putmask(infin,lowinf,0)# infin.putmask(0,lowinf)
np.putmask(infin,uppinf,1) #infin.putmask(1,uppinf)
#this has to be last
np.putmask(infin,lowinf*uppinf,-1)
## #remove infs
## np.putmask(lower,lowinf,-100)# infin.putmask(0,lowinf)
## np.putmask(upper,uppinf,100) #infin.putmask(1,uppinf)
#print lower,',',upper,',',infin,',',correl
#print correl.shape
#print kwds.items()
error, cdfvalue, inform = mvndst(lower,upper,infin,correl,**kwds)
if inform:
print('something wrong', informcode[inform], error)
return cdfvalue
def mvnormcdf(upper, mu, cov, lower=None, **kwds):
'''multivariate normal cumulative distribution function
This is a wrapper for scipy.stats._mvn.mvndst which calculates
a rectangular integral over a multivariate normal distribution.
Parameters
----------
lower, upper : array_like, 1d
lower and upper integration limits with length equal to the number
of dimensions of the multivariate normal distribution. It can contain
-np.inf or np.inf for open integration intervals
mu : array_lik, 1d
list or array of means
cov : array_like, 2d
specifies covariance matrix
optional keyword parameters to influence integration
* maxpts : int, maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with `maxpts` = 1000*N, and then
increase `maxpts` if ERROR is too large.
* abseps : float absolute error tolerance.
* releps : float relative error tolerance.
Returns
-------
cdfvalue : float
value of the integral
Notes
-----
This function normalizes the location and scale of the multivariate
normal distribution and then uses `mvstdnormcdf` to call the integration.
See Also
--------
mvstdnormcdf : location and scale standardized multivariate normal cdf
'''
upper = np.array(upper)
if lower is None:
lower = -np.ones(upper.shape) * np.inf
else:
lower = np.array(lower)
cov = np.array(cov)
stdev = np.sqrt(np.diag(cov)) # standard deviation vector
#do I need to make sure stdev is float and not int?
#is this correct to normalize to corr?
lower = (lower - mu)/stdev
upper = (upper - mu)/stdev
divrow = np.atleast_2d(stdev)
corr = cov/divrow/divrow.T
#v/np.sqrt(np.atleast_2d(np.diag(covv)))/np.sqrt(np.atleast_2d(np.diag(covv))).T
return mvstdnormcdf(lower, upper, corr, **kwds)
| bsd-3-clause | b865b0a31cfc9b9a2879bb65924a6cae | 33.165347 | 105 | 0.614066 | 3.343029 | false | false | false | false |
statsmodels/statsmodels | statsmodels/miscmodels/tmodel.py | 2 | 7295 | """Linear Model with Student-t distributed errors
Because the t distribution has fatter tails than the normal distribution, it
can be used to model observations with heavier tails and observations that have
some outliers. For the latter case, the t-distribution provides more robust
estimators for mean or mean parameters (what about var?).
References
----------
Kenneth L. Lange, Roderick J. A. Little, Jeremy M. G. Taylor (1989)
Robust Statistical Modeling Using the t Distribution
Journal of the American Statistical Association
Vol. 84, No. 408 (Dec., 1989), pp. 881-896
Published by: American Statistical Association
Stable URL: http://www.jstor.org/stable/2290063
not read yet
Created on 2010-09-24
Author: josef-pktd
License: BSD
TODO
----
* add starting values based on OLS
* bugs: store_params does not seem to be defined, I think this was a module
global for debugging - commented out
* parameter restriction: check whether version with some fixed parameters works
"""
#mostly copied from the examples directory written for trying out generic mle.
import numpy as np
from scipy import special, stats
from statsmodels.base.model import GenericLikelihoodModel
from statsmodels.tsa.arma_mle import Arma
#redefine some shortcuts
np_log = np.log
np_pi = np.pi
sps_gamln = special.gammaln
class TLinearModel(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Linear Model with t-distributed errors
This is an example for generic MLE.
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
'''
def initialize(self):
print("running Tmodel initialize")
# TODO: here or in __init__
self.k_vars = self.exog.shape[1]
if not hasattr(self, 'fix_df'):
self.fix_df = False
if self.fix_df is False:
# df will be estimated, no parameter restrictions
self.fixed_params = None
self.fixed_paramsmask = None
self.k_params = self.exog.shape[1] + 2
extra_params_names = ['df', 'scale']
else:
# df fixed
self.k_params = self.exog.shape[1] + 1
fixdf = np.nan * np.zeros(self.exog.shape[1] + 2)
fixdf[-2] = self.fix_df
self.fixed_params = fixdf
self.fixed_paramsmask = np.isnan(fixdf)
extra_params_names = ['scale']
super(TLinearModel, self).initialize()
# Note: this needs to be after super initialize
# super initialize sets default df_resid,
#_set_extra_params_names adjusts it
self._set_extra_params_names(extra_params_names)
self._set_start_params()
def _set_start_params(self, start_params=None, use_kurtosis=False):
if start_params is not None:
self.start_params = start_params
else:
from statsmodels.regression.linear_model import OLS
res_ols = OLS(self.endog, self.exog).fit()
start_params = 0.1*np.ones(self.k_params)
start_params[:self.k_vars] = res_ols.params
if self.fix_df is False:
if use_kurtosis:
kurt = stats.kurtosis(res_ols.resid)
df = 6./kurt + 4
else:
df = 5
start_params[-2] = df
#TODO adjust scale for df
start_params[-1] = np.sqrt(res_ols.scale)
self.start_params = start_params
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
def nloglikeobs(self, params):
"""
Loglikelihood of linear model with t distributed errors.
Parameters
----------
params : ndarray
The parameters of the model. The last 2 parameters are degrees of
freedom and scale.
Returns
-------
loglike : ndarray
The log likelihood of the model evaluated at `params` for each
observation defined by self.endog and self.exog.
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
The t distribution is the standard t distribution and not a standardized
t distribution, which means that the scale parameter is not equal to the
standard deviation.
self.fixed_params and self.expandparams can be used to fix some
parameters. (I doubt this has been tested in this model.)
"""
#print len(params),
#store_params.append(params)
if self.fixed_params is not None:
#print 'using fixed'
params = self.expandparams(params)
beta = params[:-2]
df = params[-2]
scale = np.abs(params[-1]) #TODO check behavior around zero
loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
#next part is stats.t._logpdf
lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)
lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)
lPx -= np_log(scale) # correction for scale
return -lPx
def predict(self, params, exog=None):
if exog is None:
exog = self.exog
return np.dot(exog, params[:self.exog.shape[1]])
class TArma(Arma):
'''Univariate Arma Model with t-distributed errors
This inherit all methods except loglike from tsa.arma_mle.Arma
This uses the standard t-distribution, the implied variance of
the error is not equal to scale, but ::
error_variance = df/(df-2)*scale**2
Notes
-----
This might be replaced by a standardized t-distribution with scale**2
equal to variance
'''
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
#add for Jacobian calculation bsejac in GenericMLE, copied from loglike
def nloglikeobs(self, params):
"""
Loglikelihood for arma model for each observation, t-distribute
Notes
-----
The ancillary parameter is assumed to be the last element of
the params vector
"""
errorsest = self.geterrors(params[:-2])
#sigma2 = np.maximum(params[-1]**2, 1e-6) #do I need this
#axis = 0
#nobs = len(errorsest)
df = params[-2]
scale = np.abs(params[-1])
llike = - stats.t._logpdf(errorsest/scale, df) + np_log(scale)
return llike
#TODO rename fit_mle -> fit, fit -> fit_ls
def fit_mle(self, order, start_params=None, method='nm', maxiter=5000,
tol=1e-08, **kwds):
nar, nma = order
if start_params is not None:
if len(start_params) != nar + nma + 2:
raise ValueError('start_param need sum(order) + 2 elements')
else:
start_params = np.concatenate((0.05*np.ones(nar + nma), [5, 1]))
res = super(TArma, self).fit_mle(order=order,
start_params=start_params,
method=method, maxiter=maxiter,
tol=tol, **kwds)
return res
| bsd-3-clause | 93c78ddd4594dc2b691c5072c8c44380 | 30.580087 | 107 | 0.602879 | 3.817373 | false | false | false | false |
statsmodels/statsmodels | docs/source/plots/graphics_mosaicplot_mosaic.py | 17 | 1943 | # -*- coding: utf-8 -*-
from itertools import product
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from statsmodels.graphics.mosaicplot import mosaic
# The most simple use case is to take a dictionary and plot the result
data = {'a': 10, 'b': 15, 'c': 16}
mosaic(data, title='basic dictionary')
plt.show()
# A more useful example is given by a dictionary with multiple indices. In
# this case we use a wider gap to a better visual separation of the resulting
# plot
data = {('a', 'b'): 1, ('a', 'c'): 2, ('d', 'b'): 3, ('d', 'c'): 4}
mosaic(data, gap=0.05, title='complete dictionary')
plt.show()
# The same data can be given as a simple or hierarchical indexed Series
rand = np.random.random
tuples = list(product(['bar', 'baz', 'foo', 'qux'], ['one', 'two']))
index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
data = pd.Series(rand(8), index=index)
mosaic(data, title='hierarchical index series')
plt.show()
# The third accepted data structure is the np array, for which a very simple
# index will be created.
rand = np.random.random
data = 1+rand((2, 2))
mosaic(data, title='random non-labeled array')
plt.show()
# If you need to modify the labeling and the coloring you can give a function
# to create the labels and one with the graphical properties starting from the
# key tuple
def props(key):
return {'color': 'r' if 'a' in key else 'gray'}
def labelizer(key):
return {('a',): 'first', ('b',): 'second', ('c',): 'third'}[key]
data = {'a': 10, 'b': 15, 'c': 16}
mosaic(data, title='colored dictionary', properties=props, labelizer=labelizer)
plt.show()
# Using a DataFrame as source, specifying the name of the columns of interest
gender = ['male', 'male', 'male', 'female', 'female', 'female']
pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
data = pd.DataFrame({'gender': gender, 'pet': pet})
mosaic(data, ['pet', 'gender'], title='DataFrame as Source')
plt.show()
| bsd-3-clause | 9d0b18f002c44adb0c033946afe3ec6a | 31.932203 | 79 | 0.67473 | 3.138934 | false | false | false | false |
statsmodels/statsmodels | statsmodels/sandbox/datarich/factormodels.py | 3 | 6854 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 14 08:21:41 2010
Author: josef-pktd
License: BSD (3-clause)
"""
import numpy as np
import statsmodels.api as sm
from statsmodels.sandbox.tools import pca
from statsmodels.sandbox.tools.cross_val import LeaveOneOut
#converting example Principal Component Regression to a class
#from sandbox/example_pca_regression.py
class FactorModelUnivariate:
'''
Todo:
check treatment of const, make it optional ?
add hasconst (0 or 1), needed when selecting nfact+hasconst
options are arguments in calc_factors, should be more public instead
cross-validation is slow for large number of observations
'''
def __init__(self, endog, exog):
#do this in a superclass?
self.endog = np.asarray(endog)
self.exog = np.asarray(exog)
def calc_factors(self, x=None, keepdim=0, addconst=True):
'''get factor decomposition of exogenous variables
This uses principal component analysis to obtain the factors. The number
of factors kept is the maximum that will be considered in the regression.
'''
if x is None:
x = self.exog
else:
x = np.asarray(x)
xred, fact, evals, evecs = pca(x, keepdim=keepdim, normalize=1)
self.exog_reduced = xred
#self.factors = fact
if addconst:
self.factors = sm.add_constant(fact, prepend=True)
self.hasconst = 1 #needs to be int
else:
self.factors = fact
self.hasconst = 0 #needs to be int
self.evals = evals
self.evecs = evecs
def fit_fixed_nfact(self, nfact):
if not hasattr(self, 'factors_wconst'):
self.calc_factors()
return sm.OLS(self.endog, self.factors[:,:nfact+1]).fit()
def fit_find_nfact(self, maxfact=None, skip_crossval=True, cv_iter=None):
'''estimate the model and selection criteria for up to maxfact factors
The selection criteria that are calculated are AIC, BIC, and R2_adj. and
additionally cross-validation prediction error sum of squares if `skip_crossval`
is false. Cross-validation is not used by default because it can be
time consuming to calculate.
By default the cross-validation method is Leave-one-out on the full dataset.
A different cross-validation sample can be specified as an argument to
cv_iter.
Results are attached in `results_find_nfact`
'''
#print 'OLS on Factors'
if not hasattr(self, 'factors'):
self.calc_factors()
hasconst = self.hasconst
if maxfact is None:
maxfact = self.factors.shape[1] - hasconst
if (maxfact+hasconst) < 1:
raise ValueError('nothing to do, number of factors (incl. constant) should ' +
'be at least 1')
#temporary safety
maxfact = min(maxfact, 10)
y0 = self.endog
results = []
#xred, fact, eva, eve = pca(x0, keepdim=0, normalize=1)
for k in range(1, maxfact+hasconst): #k includes now the constnat
#xred, fact, eva, eve = pca(x0, keepdim=k, normalize=1)
# this is faster and same result
fact = self.factors[:,:k]
res = sm.OLS(y0, fact).fit()
## print 'k =', k
## print res.params
## print 'aic: ', res.aic
## print 'bic: ', res.bic
## print 'llf: ', res.llf
## print 'R2 ', res.rsquared
## print 'R2 adj', res.rsquared_adj
if not skip_crossval:
if cv_iter is None:
cv_iter = LeaveOneOut(len(y0))
prederr2 = 0.
for inidx, outidx in cv_iter:
res_l1o = sm.OLS(y0[inidx], fact[inidx,:]).fit()
#print data.endog[outidx], res.model.predict(data.exog[outidx,:]),
prederr2 += (y0[outidx] -
res_l1o.model.predict(res_l1o.params, fact[outidx,:]))**2.
else:
prederr2 = np.nan
results.append([k, res.aic, res.bic, res.rsquared_adj, prederr2])
self.results_find_nfact = results = np.array(results)
self.best_nfact = np.r_[(np.argmin(results[:,1:3],0), np.argmax(results[:,3],0),
np.argmin(results[:,-1],0))]
def summary_find_nfact(self):
'''provides a summary for the selection of the number of factors
Returns
-------
sumstr : str
summary of the results for selecting the number of factors
'''
if not hasattr(self, 'results_find_nfact'):
self.fit_find_nfact()
results = self.results_find_nfact
sumstr = ''
sumstr += '\n' + 'Best result for k, by AIC, BIC, R2_adj, L1O'
# best = np.r_[(np.argmin(results[:,1:3],0), np.argmax(results[:,3],0),
# np.argmin(results[:,-1],0))]
sumstr += '\n' + ' '*19 + '%5d %4d %6d %5d' % tuple(self.best_nfact)
from statsmodels.iolib.table import SimpleTable
headers = 'k, AIC, BIC, R2_adj, L1O'.split(', ')
numformat = ['%6d'] + ['%10.3f']*4 #'%10.4f'
txt_fmt1 = dict(data_fmts = numformat)
tabl = SimpleTable(results, headers, None, txt_fmt=txt_fmt1)
sumstr += '\n' + "PCA regression on simulated data,"
sumstr += '\n' + "DGP: 2 factors and 4 explanatory variables"
sumstr += '\n' + tabl.__str__()
sumstr += '\n' + "Notes: k is number of components of PCA,"
sumstr += '\n' + " constant is added additionally"
sumstr += '\n' + " k=0 means regression on constant only"
sumstr += '\n' + " L1O: sum of squared prediction errors for leave-one-out"
return sumstr
if __name__ == '__main__':
examples = [1]
if 1 in examples:
nobs = 500
f0 = np.c_[np.random.normal(size=(nobs,2)), np.ones((nobs,1))]
f2xcoef = np.c_[np.repeat(np.eye(2),2,0),np.arange(4)[::-1]].T
f2xcoef = np.array([[ 1., 1., 0., 0.],
[ 0., 0., 1., 1.],
[ 3., 2., 1., 0.]])
f2xcoef = np.array([[ 0.1, 3., 1., 0.],
[ 0., 0., 1.5, 0.1],
[ 3., 2., 1., 0.]])
x0 = np.dot(f0, f2xcoef)
x0 += 0.1*np.random.normal(size=x0.shape)
ytrue = np.dot(f0,[1., 1., 1.])
y0 = ytrue + 0.1*np.random.normal(size=ytrue.shape)
mod = FactorModelUnivariate(y0, x0)
print(mod.summary_find_nfact())
print("with cross validation - slower")
mod.fit_find_nfact(maxfact=None, skip_crossval=False, cv_iter=None)
print(mod.summary_find_nfact())
| bsd-3-clause | 58091dd7da530eea41f121161498322e | 35.652406 | 91 | 0.55194 | 3.47038 | false | false | false | false |
statsmodels/statsmodels | statsmodels/graphics/tukeyplot.py | 3 | 2392 | import matplotlib.lines as lines
import matplotlib.pyplot as plt
import numpy as np
def tukeyplot(results, dim=None, yticklabels=None):
npairs = len(results)
fig = plt.figure()
fsp = fig.add_subplot(111)
fsp.axis([-50,50,0.5,10.5])
fsp.set_title('95 % family-wise confidence level')
fsp.title.set_y(1.025)
fsp.set_yticks(np.arange(1,11))
fsp.set_yticklabels(['V-T','V-S','T-S','V-P','T-P','S-P','V-M',
'T-M','S-M','P-M'])
#fsp.yaxis.set_major_locator(mticker.MaxNLocator(npairs))
fsp.yaxis.grid(True, linestyle='-', color='gray')
fsp.set_xlabel('Differences in mean levels of Var', labelpad=8)
fsp.xaxis.tick_bottom()
fsp.yaxis.tick_left()
xticklines = fsp.get_xticklines()
for xtickline in xticklines:
xtickline.set_marker(lines.TICKDOWN)
xtickline.set_markersize(10)
xlabels = fsp.get_xticklabels()
for xlabel in xlabels:
xlabel.set_y(-.04)
yticklines = fsp.get_yticklines()
for ytickline in yticklines:
ytickline.set_marker(lines.TICKLEFT)
ytickline.set_markersize(10)
ylabels = fsp.get_yticklabels()
for ylabel in ylabels:
ylabel.set_x(-.04)
for pair in range(npairs):
data = .5+results[pair]/100.
#fsp.axhline(y=npairs-pair, xmin=data[0], xmax=data[1], linewidth=1.25,
fsp.axhline(y=npairs-pair, xmin=data.mean(), xmax=data[1], linewidth=1.25,
color='blue', marker="|", markevery=1)
fsp.axhline(y=npairs-pair, xmin=data[0], xmax=data.mean(), linewidth=1.25,
color='blue', marker="|", markevery=1)
#for pair in range(npairs):
# data = .5+results[pair]/100.
# data = results[pair]
# data = np.r_[data[0],data.mean(),data[1]]
# l = plt.plot(data, [npairs-pair]*len(data), color='black',
# linewidth=.5, marker="|", markevery=1)
fsp.axvline(x=0, linestyle="--", color='black')
fig.subplots_adjust(bottom=.125)
results = np.array([[-10.04391794, 26.34391794],
[-21.45225794, 14.93557794],
[ 5.61441206, 42.00224794],
[-13.40225794, 22.98557794],
[-29.60225794, 6.78557794],
[ -2.53558794, 33.85224794],
[-21.55225794, 14.83557794],
[ 8.87275206, 45.26058794],
[-10.14391794, 26.24391794],
[-37.21058794, -0.82275206]])
#plt.show()
| bsd-3-clause | e2fd9e9274ea51de322ac211019be9aa | 30.893333 | 82 | 0.599916 | 2.724374 | false | false | false | false |
statsmodels/statsmodels | statsmodels/examples/ex_feasible_gls_het.py | 6 | 4220 | # -*- coding: utf-8 -*-
"""Examples for linear model with heteroscedasticity estimated by feasible GLS
These are examples to check the results during development.
The assumptions:
We have a linear model y = X*beta where the variance of an observation depends
on some explanatory variable Z (`exog_var`).
linear_model.WLS estimated the model for a given weight matrix
here we want to estimate also the weight matrix by two step or iterative WLS
Created on Wed Dec 21 12:28:17 2011
Author: Josef Perktold
There might be something fishy with the example, but I do not see it.
Or maybe it's supposed to be this way because in the first case I do not
include a constant and in the second case I include some of the same
regressors as in the main equation.
"""
import numpy as np
from numpy.testing import assert_almost_equal
from statsmodels.regression.linear_model import OLS
from statsmodels.regression.feasible_gls import GLSHet, GLSHet2
examples = ['ex1']
if 'ex1' in examples:
#from tut_ols_wls
nsample = 1000
sig = 0.5
x1 = np.linspace(0, 20, nsample)
X = np.c_[x1, (x1-5)**2, np.ones(nsample)]
np.random.seed(0)#9876789) #9876543)
beta = [0.5, -0.015, 1.]
y_true2 = np.dot(X, beta)
w = np.ones(nsample)
w[nsample*6//10:] = 4 #Note this is the squared value
#y2[:nsample*6/10] = y_true2[:nsample*6/10] + sig*1. * np.random.normal(size=nsample*6/10)
#y2[nsample*6/10:] = y_true2[nsample*6/10:] + sig*4. * np.random.normal(size=nsample*4/10)
y2 = y_true2 + sig*np.sqrt(w)* np.random.normal(size=nsample)
X2 = X[:,[0,2]]
X2 = X
res_ols = OLS(y2, X2).fit()
print('OLS beta estimates')
print(res_ols.params)
print('OLS stddev of beta')
print(res_ols.bse)
print('\nWLS')
mod0 = GLSHet2(y2, X2, exog_var=w)
res0 = mod0.fit()
print('new version')
mod1 = GLSHet(y2, X2, exog_var=w)
res1 = mod1.iterative_fit(2)
print('WLS beta estimates')
print(res1.params)
print(res0.params)
print('WLS stddev of beta')
print(res1.bse)
#compare with previous version GLSHet2, refactoring check
#assert_almost_equal(res1.params, np.array([ 0.37642521, 1.51447662]))
#this fails ??? more iterations? different starting weights?
print(res1.model.weights/res1.model.weights.max())
#why is the error so small in the estimated weights ?
assert_almost_equal(res1.model.weights/res1.model.weights.max(), 1./w, 14)
print('residual regression params')
print(res1.results_residual_regression.params)
print('scale of model ?')
print(res1.scale)
print('unweighted residual variance, note unweighted mean is not zero')
print(res1.resid.var())
#Note weighted mean is zero:
#(res1.model.weights * res1.resid).mean()
doplots = False
if doplots:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x1, y2, 'o')
plt.plot(x1, y_true2, 'b-', label='true')
plt.plot(x1, res1.fittedvalues, 'r-', label='fwls')
plt.plot(x1, res_ols.fittedvalues, '--', label='ols')
plt.legend()
#z = (w[:,None] == [1,4]).astype(float) #dummy variable
z = (w[:,None] == np.unique(w)).astype(float) #dummy variable
mod2 = GLSHet(y2, X2, exog_var=z)
res2 = mod2.iterative_fit(2)
print(res2.params)
import statsmodels.api as sm
z = sm.add_constant(w)
mod3 = GLSHet(y2, X2, exog_var=z)
res3 = mod3.iterative_fit(8)
print(res3.params)
print("np.array(res3.model.history['ols_params'])")
print(np.array(res3.model.history['ols_params']))
print("np.array(res3.model.history['self_params'])")
print(np.array(res3.model.history['self_params']))
print(np.unique(res2.model.weights)) #for discrete z only, only a few uniques
print(np.unique(res3.model.weights))
if doplots:
plt.figure()
plt.plot(x1, y2, 'o')
plt.plot(x1, y_true2, 'b-', label='true')
plt.plot(x1, res1.fittedvalues, '-', label='fwls1')
plt.plot(x1, res2.fittedvalues, '-', label='fwls2')
plt.plot(x1, res3.fittedvalues, '-', label='fwls3')
plt.plot(x1, res_ols.fittedvalues, '--', label='ols')
plt.legend()
plt.show()
| bsd-3-clause | 89fc6de584dda2aea196dc3ee832ce05 | 33.308943 | 94 | 0.651896 | 2.944871 | false | false | false | false |
statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kde2.py | 3 | 3192 | # -*- coding: utf-8 -*-
from statsmodels.compat.python import lzip
import numpy as np
from statsmodels.tools.validation import array_like
from . import kernels
#TODO: should this be a function?
class KDE:
"""
Kernel Density Estimator
Parameters
----------
x : array_like
N-dimensional array from which the density is to be estimated
kernel : Kernel Class
Should be a class from *
"""
#TODO: amend docs for Nd case?
def __init__(self, x, kernel=None):
x = array_like(x, "x", maxdim=2, contiguous=True)
if x.ndim == 1:
x = x[:,None]
nobs, n_series = x.shape
if kernel is None:
kernel = kernels.Gaussian() # no meaningful bandwidth yet
if n_series > 1:
if isinstance( kernel, kernels.CustomKernel ):
kernel = kernels.NdKernel(n_series, kernels = kernel)
self.kernel = kernel
self.n = n_series #TODO change attribute
self.x = x
def density(self, x):
return self.kernel.density(self.x, x)
def __call__(self, x, h="scott"):
return np.array([self.density(xx) for xx in x])
def evaluate(self, x, h="silverman"):
density = self.kernel.density
return np.array([density(xx) for xx in x])
if __name__ == "__main__":
from numpy import random
import matplotlib.pyplot as plt
import statsmodels.nonparametric.bandwidths as bw
from statsmodels.sandbox.nonparametric.testdata import kdetest
# 1-D case
random.seed(142)
x = random.standard_t(4.2, size = 50)
h = bw.bw_silverman(x)
#NOTE: try to do it with convolution
support = np.linspace(-10,10,512)
kern = kernels.Gaussian(h = h)
kde = KDE( x, kern)
print(kde.density(1.015469))
print(0.2034675)
Xs = np.arange(-10,10,0.1)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(Xs, kde(Xs), "-")
ax.set_ylim(-10, 10)
ax.set_ylim(0,0.4)
# 2-D case
x = lzip(kdetest.faithfulData["eruptions"], kdetest.faithfulData["waiting"])
x = np.array(x)
x = (x - x.mean(0))/x.std(0)
nobs = x.shape[0]
H = kdetest.Hpi
kern = kernels.NdKernel( 2 )
kde = KDE( x, kern )
print(kde.density( np.matrix( [1,2 ]))) #.T
plt.figure()
plt.plot(x[:,0], x[:,1], 'o')
n_grid = 50
xsp = np.linspace(x.min(0)[0], x.max(0)[0], n_grid)
ysp = np.linspace(x.min(0)[1], x.max(0)[1], n_grid)
# xsorted = np.sort(x)
# xlow = xsorted[nobs/4]
# xupp = xsorted[3*nobs/4]
# xsp = np.linspace(xlow[0], xupp[0], n_grid)
# ysp = np.linspace(xlow[1], xupp[1], n_grid)
xr, yr = np.meshgrid(xsp, ysp)
kde_vals = np.array([kde.density( np.matrix( [xi, yi ]) ) for xi, yi in
zip(xr.ravel(), yr.ravel())])
plt.contour(xsp, ysp, kde_vals.reshape(n_grid, n_grid))
plt.show()
# 5 D case
# random.seed(142)
# mu = [1.0, 4.0, 3.5, -2.4, 0.0]
# sigma = np.matrix(
# [[ 0.6 - 0.1*abs(i-j) if i != j else 1.0 for j in xrange(5)] for i in xrange(5)])
# x = random.multivariate_normal(mu, sigma, size = 100)
# kern = kernel.Gaussian()
# kde = KernelEstimate( x, kern )
| bsd-3-clause | 6c0196547828c51cd336ec832a0c14da | 26.756522 | 90 | 0.577068 | 2.891304 | false | false | false | false |
statsmodels/statsmodels | examples/incomplete/glsar.py | 6 | 4706 | """
Generalized Least Squares with AR Errors
6 examples for GLSAR with artificial data
"""
# .. note: These examples were written mostly to cross-check results. It is
# still being written, and GLSAR is still being worked on.
import numpy as np
import numpy.testing as npt
from scipy import signal
import statsmodels.api as sm
from statsmodels.regression.linear_model import GLSAR, yule_walker
examples_all = range(10) + ['test_copy']
examples = examples_all # [5]
if 0 in examples:
print('\n Example 0')
X = np.arange(1, 8)
X = sm.add_constant(X, prepend=False)
Y = np.array((1, 3, 4, 5, 8, 10, 9))
rho = 2
model = GLSAR(Y, X, 2)
for i in range(6):
results = model.fit()
print('AR coefficients:', model.rho)
rho, sigma = yule_walker(results.resid, order=model.order)
model = GLSAR(Y, X, rho)
par0 = results.params
print('params fit', par0)
model0if = GLSAR(Y, X, 2)
res = model0if.iterative_fit(6)
print('iterativefit beta', res.params)
results.tvalues # TODO: is this correct? it does equal params/bse
# but is not the same as the AR example (which was wrong)
print(results.t_test([0, 1])) # are sd and t correct? vs
print(results.f_test(np.eye(2)))
rhotrue = np.array([0.5, 0.2])
nlags = np.size(rhotrue)
beta = np.array([0.1, 2])
noiseratio = 0.5
nsample = 2000
x = np.arange(nsample)
X1 = sm.add_constant(x, prepend=False)
wnoise = noiseratio * np.random.randn(nsample + nlags)
# .. noise = noise[1:] + rhotrue*noise[:-1] # wrong this is not AR
# .. find my drafts for univariate ARMA functions
# generate AR(p)
if np.size(rhotrue) == 1:
# replace with scipy.signal.lfilter, keep for testing
arnoise = np.zeros(nsample + 1)
for i in range(1, nsample + 1):
arnoise[i] = rhotrue * arnoise[i - 1] + wnoise[i]
noise = arnoise[1:]
an = signal.lfilter([1], np.hstack((1, -rhotrue)), wnoise[1:])
print('simulate AR(1) difference', np.max(np.abs(noise - an)))
else:
noise = signal.lfilter([1], np.hstack((1, -rhotrue)), wnoise)[nlags:]
# generate GLS model with AR noise
y1 = np.dot(X1, beta) + noise
if 1 in examples:
print('\nExample 1: iterative_fit and repeated calls')
mod1 = GLSAR(y1, X1, 1)
res = mod1.iterative_fit()
print(res.params)
print(mod1.rho)
mod1 = GLSAR(y1, X1, 2)
for i in range(5):
res1 = mod1.iterative_fit(2)
print(mod1.rho)
print(res1.params)
if 2 in examples:
print('\nExample 2: iterative fitting of first model')
print('with AR(0)', par0)
parold = par0
mod0 = GLSAR(Y, X, 1)
for i in range(5):
res0 = mod0.iterative_fit(1)
print('rho', mod0.rho)
parnew = res0.params
print('params', parnew,)
print('params change in iteration', parnew - parold)
parold = parnew
# generate pure AR(p) process
Y = noise
# example with no regressor,
# results now have same estimated rho as yule-walker directly
if 3 in examples:
print('\nExample 3: pure AR(2), GLSAR versus Yule_Walker')
model3 = GLSAR(Y, rho=2)
for i in range(5):
results = model3.fit()
print("AR coefficients:", model3.rho, results.params)
rho, sigma = yule_walker(results.resid, order=model3.order)
model3 = GLSAR(Y, rho=rho)
if 'test_copy' in examples:
xx = X.copy()
rhoyw, sigmayw = yule_walker(xx[:, 0], order=2)
print(rhoyw, sigmayw)
print((xx == X).all()) # test for unchanged array (fixed)
yy = Y.copy()
rhoyw, sigmayw = yule_walker(yy, order=2)
print(rhoyw, sigmayw)
print((yy == Y).all()) # test for unchanged array (fixed)
if 4 in examples:
print('\nExample 4: demeaned pure AR(2), GLSAR versus Yule_Walker')
Ydemeaned = Y - Y.mean()
model4 = GLSAR(Ydemeaned, rho=2)
for i in range(5):
results = model4.fit()
print("AR coefficients:", model3.rho, results.params)
rho, sigma = yule_walker(results.resid, order=model4.order)
model4 = GLSAR(Ydemeaned, rho=rho)
if 5 in examples:
print('\nExample 5: pure AR(2), GLSAR iterative_fit versus Yule_Walker')
model3a = GLSAR(Y, rho=1)
res3a = model3a.iterative_fit(5)
print(res3a.params)
print(model3a.rho)
rhoyw, sigmayw = yule_walker(Y, order=1)
print(rhoyw, sigmayw)
npt.assert_array_almost_equal(model3a.rho, rhoyw, 15)
for i in range(6):
model3b = GLSAR(Y, rho=0.1)
print(i, model3b.iterative_fit(i).params, model3b.rho)
model3b = GLSAR(Y, rho=0.1)
for i in range(6):
print(i, model3b.iterative_fit(2).params, model3b.rho)
print(np.array(res.history['params']))
print(np.array(res.history['rho']))
| bsd-3-clause | 147a764f2bb78b59d6ed3bfea6d33a59 | 29.75817 | 76 | 0.634509 | 2.857316 | false | true | false | false |
statsmodels/statsmodels | statsmodels/stats/contingency_tables.py | 3 | 44260 | """
Methods for analyzing two-way contingency tables (i.e. frequency
tables for observations that are cross-classified with respect to two
categorical variables).
The main classes are:
* Table : implements methods that can be applied to any two-way
contingency table.
* SquareTable : implements methods that can be applied to a square
two-way contingency table.
* Table2x2 : implements methods that can be applied to a 2x2
contingency table.
* StratifiedTable : implements methods that can be applied to a
collection of 2x2 contingency tables.
Also contains functions for conducting McNemar's test and Cochran's q
test.
Note that the inference procedures may depend on how the data were
sampled. In general the observed units are independent and
identically distributed.
"""
import warnings
import numpy as np
import pandas as pd
from scipy import stats
from statsmodels import iolib
from statsmodels.tools import sm_exceptions
from statsmodels.tools.decorators import cache_readonly
def _make_df_square(table):
"""
Reindex a pandas DataFrame so that it becomes square, meaning that
the row and column indices contain the same values, in the same
order. The row and column index are extended to achieve this.
"""
if not isinstance(table, pd.DataFrame):
return table
# If the table is not square, make it square
if not table.index.equals(table.columns):
ix = list(set(table.index) | set(table.columns))
ix.sort()
table = table.reindex(index=ix, columns=ix, fill_value=0)
# Ensures that the rows and columns are in the same order.
table = table.reindex(table.columns)
return table
class _Bunch:
def __repr__(self):
return "<bunch containing results, print to see contents>"
def __str__(self):
ky = [k for k, _ in self.__dict__.items()]
ky.sort()
m = max([len(k) for k in ky])
tab = []
f = "{:" + str(m) + "} {}"
for k in ky:
tab.append(f.format(k, self.__dict__[k]))
return "\n".join(tab)
class Table:
"""
A two-way contingency table.
Parameters
----------
table : array_like
A contingency table.
shift_zeros : bool
If True and any cell count is zero, add 0.5 to all values
in the table.
Attributes
----------
table_orig : array_like
The original table is cached as `table_orig`.
See Also
--------
statsmodels.graphics.mosaicplot.mosaic
scipy.stats.chi2_contingency
Notes
-----
The inference procedures used here are all based on a sampling
model in which the units are independent and identically
distributed, with each unit being classified with respect to two
categorical variables.
References
----------
Definitions of residuals:
https://onlinecourses.science.psu.edu/stat504/node/86
"""
def __init__(self, table, shift_zeros=True):
self.table_orig = table
self.table = np.asarray(table, dtype=np.float64)
if shift_zeros and (self.table.min() == 0):
self.table[self.table == 0] = 0.5
def __str__(self):
s = ("A %dx%d contingency table with counts:\n" %
tuple(self.table.shape))
s += np.array_str(self.table)
return s
@classmethod
def from_data(cls, data, shift_zeros=True):
"""
Construct a Table object from data.
Parameters
----------
data : array_like
The raw data, from which a contingency table is constructed
using the first two columns.
shift_zeros : bool
If True and any cell count is zero, add 0.5 to all values
in the table.
Returns
-------
A Table instance.
"""
if isinstance(data, pd.DataFrame):
table = pd.crosstab(data.iloc[:, 0], data.iloc[:, 1])
else:
table = pd.crosstab(data[:, 0], data[:, 1])
return cls(table, shift_zeros)
def test_nominal_association(self):
"""
Assess independence for nominal factors.
Assessment of independence between rows and columns using
chi^2 testing. The rows and columns are treated as nominal
(unordered) categorical variables.
Returns
-------
A bunch containing the following attributes:
statistic : float
The chi^2 test statistic.
df : int
The degrees of freedom of the reference distribution
pvalue : float
The p-value for the test.
"""
statistic = np.asarray(self.chi2_contribs).sum()
df = np.prod(np.asarray(self.table.shape) - 1)
pvalue = 1 - stats.chi2.cdf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.df = df
b.pvalue = pvalue
return b
def test_ordinal_association(self, row_scores=None, col_scores=None):
"""
Assess independence between two ordinal variables.
This is the 'linear by linear' association test, which uses
weights or scores to target the test to have more power
against ordered alternatives.
Parameters
----------
row_scores : array_like
An array of numeric row scores
col_scores : array_like
An array of numeric column scores
Returns
-------
A bunch with the following attributes:
statistic : float
The test statistic.
null_mean : float
The expected value of the test statistic under the null
hypothesis.
null_sd : float
The standard deviation of the test statistic under the
null hypothesis.
zscore : float
The Z-score for the test statistic.
pvalue : float
The p-value for the test.
Notes
-----
The scores define the trend to which the test is most sensitive.
Using the default row and column scores gives the
Cochran-Armitage trend test.
"""
if row_scores is None:
row_scores = np.arange(self.table.shape[0])
if col_scores is None:
col_scores = np.arange(self.table.shape[1])
if len(row_scores) != self.table.shape[0]:
msg = ("The length of `row_scores` must match the first " +
"dimension of `table`.")
raise ValueError(msg)
if len(col_scores) != self.table.shape[1]:
msg = ("The length of `col_scores` must match the second " +
"dimension of `table`.")
raise ValueError(msg)
# The test statistic
statistic = np.dot(row_scores, np.dot(self.table, col_scores))
# Some needed quantities
n_obs = self.table.sum()
rtot = self.table.sum(1)
um = np.dot(row_scores, rtot)
u2m = np.dot(row_scores**2, rtot)
ctot = self.table.sum(0)
vn = np.dot(col_scores, ctot)
v2n = np.dot(col_scores**2, ctot)
# The null mean and variance of the test statistic
e_stat = um * vn / n_obs
v_stat = (u2m - um**2 / n_obs) * (v2n - vn**2 / n_obs) / (n_obs - 1)
sd_stat = np.sqrt(v_stat)
zscore = (statistic - e_stat) / sd_stat
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
b = _Bunch()
b.statistic = statistic
b.null_mean = e_stat
b.null_sd = sd_stat
b.zscore = zscore
b.pvalue = pvalue
return b
@cache_readonly
def marginal_probabilities(self):
"""
Estimate marginal probability distributions for the rows and columns.
Returns
-------
row : ndarray
Marginal row probabilities
col : ndarray
Marginal column probabilities
"""
n = self.table.sum()
row = self.table.sum(1) / n
col = self.table.sum(0) / n
if isinstance(self.table_orig, pd.DataFrame):
row = pd.Series(row, self.table_orig.index)
col = pd.Series(col, self.table_orig.columns)
return row, col
@cache_readonly
def independence_probabilities(self):
"""
Returns fitted joint probabilities under independence.
The returned table is outer(row, column), where row and
column are the estimated marginal distributions
of the rows and columns.
"""
row, col = self.marginal_probabilities
itab = np.outer(row, col)
if isinstance(self.table_orig, pd.DataFrame):
itab = pd.DataFrame(itab, self.table_orig.index,
self.table_orig.columns)
return itab
@cache_readonly
def fittedvalues(self):
"""
Returns fitted cell counts under independence.
The returned cell counts are estimates under a model
where the rows and columns of the table are independent.
"""
probs = self.independence_probabilities
fit = self.table.sum() * probs
return fit
@cache_readonly
def resid_pearson(self):
"""
Returns Pearson residuals.
The Pearson residuals are calculated under a model where
the rows and columns of the table are independent.
"""
fit = self.fittedvalues
resids = (self.table - fit) / np.sqrt(fit)
return resids
@cache_readonly
def standardized_resids(self):
"""
Returns standardized residuals under independence.
"""
row, col = self.marginal_probabilities
sresids = self.resid_pearson / np.sqrt(np.outer(1 - row, 1 - col))
return sresids
@cache_readonly
def chi2_contribs(self):
"""
Returns the contributions to the chi^2 statistic for independence.
The returned table contains the contribution of each cell to the chi^2
test statistic for the null hypothesis that the rows and columns
are independent.
"""
return self.resid_pearson**2
@cache_readonly
def local_log_oddsratios(self):
"""
Returns local log odds ratios.
The local log odds ratios are the log odds ratios
calculated for contiguous 2x2 sub-tables.
"""
ta = self.table.copy()
a = ta[0:-1, 0:-1]
b = ta[0:-1, 1:]
c = ta[1:, 0:-1]
d = ta[1:, 1:]
tab = np.log(a) + np.log(d) - np.log(b) - np.log(c)
rslt = np.empty(self.table.shape, np.float64)
rslt *= np.nan
rslt[0:-1, 0:-1] = tab
if isinstance(self.table_orig, pd.DataFrame):
rslt = pd.DataFrame(rslt, index=self.table_orig.index,
columns=self.table_orig.columns)
return rslt
@cache_readonly
def local_oddsratios(self):
"""
Returns local odds ratios.
See documentation for local_log_oddsratios.
"""
return np.exp(self.local_log_oddsratios)
@cache_readonly
def cumulative_log_oddsratios(self):
"""
Returns cumulative log odds ratios.
The cumulative log odds ratios for a contingency table
with ordered rows and columns are calculated by collapsing
all cells to the left/right and above/below a given point,
to obtain a 2x2 table from which a log odds ratio can be
calculated.
"""
ta = self.table.cumsum(0).cumsum(1)
a = ta[0:-1, 0:-1]
b = ta[0:-1, -1:] - a
c = ta[-1:, 0:-1] - a
d = ta[-1, -1] - (a + b + c)
tab = np.log(a) + np.log(d) - np.log(b) - np.log(c)
rslt = np.empty(self.table.shape, np.float64)
rslt *= np.nan
rslt[0:-1, 0:-1] = tab
if isinstance(self.table_orig, pd.DataFrame):
rslt = pd.DataFrame(rslt, index=self.table_orig.index,
columns=self.table_orig.columns)
return rslt
@cache_readonly
def cumulative_oddsratios(self):
"""
Returns the cumulative odds ratios for a contingency table.
See documentation for cumulative_log_oddsratio.
"""
return np.exp(self.cumulative_log_oddsratios)
class SquareTable(Table):
"""
Methods for analyzing a square contingency table.
Parameters
----------
table : array_like
A square contingency table, or DataFrame that is converted
to a square form.
shift_zeros : bool
If True and any cell count is zero, add 0.5 to all values
in the table.
Notes
-----
These methods should only be used when the rows and columns of the
table have the same categories. If `table` is provided as a
Pandas DataFrame, the row and column indices will be extended to
create a square table, inserting zeros where a row or column is
missing. Otherwise the table should be provided in a square form,
with the (implicit) row and column categories appearing in the
same order.
"""
def __init__(self, table, shift_zeros=True):
table = _make_df_square(table) # Non-pandas passes through
k1, k2 = table.shape
if k1 != k2:
raise ValueError('table must be square')
super(SquareTable, self).__init__(table, shift_zeros)
def symmetry(self, method="bowker"):
"""
Test for symmetry of a joint distribution.
This procedure tests the null hypothesis that the joint
distribution is symmetric around the main diagonal, that is
.. math::
p_{i, j} = p_{j, i} for all i, j
Returns
-------
Bunch
A bunch with attributes
* statistic : float
chisquare test statistic
* p-value : float
p-value of the test statistic based on chisquare distribution
* df : int
degrees of freedom of the chisquare distribution
Notes
-----
The implementation is based on the SAS documentation. R includes
it in `mcnemar.test` if the table is not 2 by 2. However a more
direct generalization of the McNemar test to larger tables is
provided by the homogeneity test (TableSymmetry.homogeneity).
The p-value is based on the chi-square distribution which requires
that the sample size is not very small to be a good approximation
of the true distribution. For 2x2 contingency tables the exact
distribution can be obtained with `mcnemar`
See Also
--------
mcnemar
homogeneity
"""
if method.lower() != "bowker":
raise ValueError("method for symmetry testing must be 'bowker'")
k = self.table.shape[0]
upp_idx = np.triu_indices(k, 1)
tril = self.table.T[upp_idx] # lower triangle in column order
triu = self.table[upp_idx] # upper triangle in row order
statistic = ((tril - triu)**2 / (tril + triu + 1e-20)).sum()
df = k * (k-1) / 2.
pvalue = stats.chi2.sf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
b.df = df
return b
def homogeneity(self, method="stuart_maxwell"):
"""
Compare row and column marginal distributions.
Parameters
----------
method : str
Either 'stuart_maxwell' or 'bhapkar', leading to two different
estimates of the covariance matrix for the estimated
difference between the row margins and the column margins.
Returns
-------
Bunch
A bunch with attributes:
* statistic : float
The chi^2 test statistic
* pvalue : float
The p-value of the test statistic
* df : int
The degrees of freedom of the reference distribution
Notes
-----
For a 2x2 table this is equivalent to McNemar's test. More
generally the procedure tests the null hypothesis that the
marginal distribution of the row factor is equal to the
marginal distribution of the column factor. For this to be
meaningful, the two factors must have the same sample space
(i.e. the same categories).
"""
if self.table.shape[0] < 1:
raise ValueError('table is empty')
elif self.table.shape[0] == 1:
b = _Bunch()
b.statistic = 0
b.pvalue = 1
b.df = 0
return b
method = method.lower()
if method not in ["bhapkar", "stuart_maxwell"]:
raise ValueError("method '%s' for homogeneity not known" % method)
n_obs = self.table.sum()
pr = self.table.astype(np.float64) / n_obs
# Compute margins, eliminate last row/column so there is no
# degeneracy
row = pr.sum(1)[0:-1]
col = pr.sum(0)[0:-1]
pr = pr[0:-1, 0:-1]
# The estimated difference between row and column margins.
d = col - row
# The degrees of freedom of the chi^2 reference distribution.
df = pr.shape[0]
if method == "bhapkar":
vmat = -(pr + pr.T) - np.outer(d, d)
dv = col + row - 2*np.diag(pr) - d**2
np.fill_diagonal(vmat, dv)
elif method == "stuart_maxwell":
vmat = -(pr + pr.T)
dv = row + col - 2*np.diag(pr)
np.fill_diagonal(vmat, dv)
try:
statistic = n_obs * np.dot(d, np.linalg.solve(vmat, d))
except np.linalg.LinAlgError:
warnings.warn("Unable to invert covariance matrix",
sm_exceptions.SingularMatrixWarning)
b = _Bunch()
b.statistic = np.nan
b.pvalue = np.nan
b.df = df
return b
pvalue = 1 - stats.chi2.cdf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
b.df = df
return b
def summary(self, alpha=0.05, float_format="%.3f"):
"""
Produce a summary of the analysis.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the interval.
float_format : str
Used to format numeric values in the table.
method : str
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
fmt = float_format
headers = ["Statistic", "P-value", "DF"]
stubs = ["Symmetry", "Homogeneity"]
sy = self.symmetry()
hm = self.homogeneity()
data = [[fmt % sy.statistic, fmt % sy.pvalue, '%d' % sy.df],
[fmt % hm.statistic, fmt % hm.pvalue, '%d' % hm.df]]
tab = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
return tab
class Table2x2(SquareTable):
"""
Analyses that can be performed on a 2x2 contingency table.
Parameters
----------
table : array_like
A 2x2 contingency table
shift_zeros : bool
If true, 0.5 is added to all cells of the table if any cell is
equal to zero.
Notes
-----
The inference procedures used here are all based on a sampling
model in which the units are independent and identically
distributed, with each unit being classified with respect to two
categorical variables.
Note that for the risk ratio, the analysis is not symmetric with
respect to the rows and columns of the contingency table. The two
rows define population subgroups, column 0 is the number of
'events', and column 1 is the number of 'non-events'.
"""
def __init__(self, table, shift_zeros=True):
if type(table) is list:
table = np.asarray(table)
if (table.ndim != 2) or (table.shape[0] != 2) or (table.shape[1] != 2):
raise ValueError("Table2x2 takes a 2x2 table as input.")
super(Table2x2, self).__init__(table, shift_zeros)
@classmethod
def from_data(cls, data, shift_zeros=True):
"""
Construct a Table object from data.
Parameters
----------
data : array_like
The raw data, the first column defines the rows and the
second column defines the columns.
shift_zeros : bool
If True, and if there are any zeros in the contingency
table, add 0.5 to all four cells of the table.
"""
if isinstance(data, pd.DataFrame):
table = pd.crosstab(data.iloc[:, 0], data.iloc[:, 1])
else:
table = pd.crosstab(data[:, 0], data[:, 1])
return cls(table, shift_zeros)
@cache_readonly
def log_oddsratio(self):
"""
Returns the log odds ratio for a 2x2 table.
"""
f = self.table.flatten()
return np.dot(np.log(f), np.r_[1, -1, -1, 1])
@cache_readonly
def oddsratio(self):
"""
Returns the odds ratio for a 2x2 table.
"""
return (self.table[0, 0] * self.table[1, 1] /
(self.table[0, 1] * self.table[1, 0]))
@cache_readonly
def log_oddsratio_se(self):
"""
Returns the standard error for the log odds ratio.
"""
return np.sqrt(np.sum(1 / self.table))
def oddsratio_pvalue(self, null=1):
"""
P-value for a hypothesis test about the odds ratio.
Parameters
----------
null : float
The null value of the odds ratio.
"""
return self.log_oddsratio_pvalue(np.log(null))
def log_oddsratio_pvalue(self, null=0):
"""
P-value for a hypothesis test about the log odds ratio.
Parameters
----------
null : float
The null value of the log odds ratio.
"""
zscore = (self.log_oddsratio - null) / self.log_oddsratio_se
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
return pvalue
def log_oddsratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence level for the log odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : str
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
f = -stats.norm.ppf(alpha / 2)
lor = self.log_oddsratio
se = self.log_oddsratio_se
lcb = lor - f * se
ucb = lor + f * se
return lcb, ucb
def oddsratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : str
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
lcb, ucb = self.log_oddsratio_confint(alpha, method=method)
return np.exp(lcb), np.exp(ucb)
@cache_readonly
def riskratio(self):
"""
Returns the risk ratio for a 2x2 table.
The risk ratio is calculated with respect to the rows.
"""
p = self.table[:, 0] / self.table.sum(1)
return p[0] / p[1]
@cache_readonly
def log_riskratio(self):
"""
Returns the log of the risk ratio.
"""
return np.log(self.riskratio)
@cache_readonly
def log_riskratio_se(self):
"""
Returns the standard error of the log of the risk ratio.
"""
n = self.table.sum(1)
p = self.table[:, 0] / n
va = np.sum((1 - p) / (n*p))
return np.sqrt(va)
def riskratio_pvalue(self, null=1):
"""
p-value for a hypothesis test about the risk ratio.
Parameters
----------
null : float
The null value of the risk ratio.
"""
return self.log_riskratio_pvalue(np.log(null))
def log_riskratio_pvalue(self, null=0):
"""
p-value for a hypothesis test about the log risk ratio.
Parameters
----------
null : float
The null value of the log risk ratio.
"""
zscore = (self.log_riskratio - null) / self.log_riskratio_se
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
return pvalue
def log_riskratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the log risk ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : str
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
f = -stats.norm.ppf(alpha / 2)
lrr = self.log_riskratio
se = self.log_riskratio_se
lcb = lrr - f * se
ucb = lrr + f * se
return lcb, ucb
def riskratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the risk ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : str
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
lcb, ucb = self.log_riskratio_confint(alpha, method=method)
return np.exp(lcb), np.exp(ucb)
def summary(self, alpha=0.05, float_format="%.3f", method="normal"):
"""
Summarizes results for a 2x2 table analysis.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the confidence
intervals.
float_format : str
Used to format the numeric values in the table.
method : str
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
def fmt(x):
if isinstance(x, str):
return x
return float_format % x
headers = ["Estimate", "SE", "LCB", "UCB", "p-value"]
stubs = ["Odds ratio", "Log odds ratio", "Risk ratio",
"Log risk ratio"]
lcb1, ucb1 = self.oddsratio_confint(alpha, method)
lcb2, ucb2 = self.log_oddsratio_confint(alpha, method)
lcb3, ucb3 = self.riskratio_confint(alpha, method)
lcb4, ucb4 = self.log_riskratio_confint(alpha, method)
data = [[fmt(x) for x in [self.oddsratio, "", lcb1, ucb1,
self.oddsratio_pvalue()]],
[fmt(x) for x in [self.log_oddsratio, self.log_oddsratio_se,
lcb2, ucb2, self.oddsratio_pvalue()]],
[fmt(x) for x in [self.riskratio, "", lcb3, ucb3,
self.riskratio_pvalue()]],
[fmt(x) for x in [self.log_riskratio, self.log_riskratio_se,
lcb4, ucb4, self.riskratio_pvalue()]]]
tab = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
return tab
class StratifiedTable:
"""
Analyses for a collection of 2x2 contingency tables.
Such a collection may arise by stratifying a single 2x2 table with
respect to another factor. This class implements the
'Cochran-Mantel-Haenszel' and 'Breslow-Day' procedures for
analyzing collections of 2x2 contingency tables.
Parameters
----------
tables : list or ndarray
Either a list containing several 2x2 contingency tables, or
a 2x2xk ndarray in which each slice along the third axis is a
2x2 contingency table.
Notes
-----
This results are based on a sampling model in which the units are
independent both within and between strata.
"""
def __init__(self, tables, shift_zeros=False):
if isinstance(tables, np.ndarray):
sp = tables.shape
if (len(sp) != 3) or (sp[0] != 2) or (sp[1] != 2):
raise ValueError("If an ndarray, argument must be 2x2xn")
table = tables * 1. # use atleast float dtype
else:
if any([np.asarray(x).shape != (2, 2) for x in tables]):
m = "If `tables` is a list, all of its elements should be 2x2"
raise ValueError(m)
# Create a data cube
table = np.dstack(tables).astype(np.float64)
if shift_zeros:
zx = (table == 0).sum(0).sum(0)
ix = np.flatnonzero(zx > 0)
if len(ix) > 0:
table = table.copy()
table[:, :, ix] += 0.5
self.table = table
self._cache = {}
# Quantities to precompute. Table entries are [[a, b], [c,
# d]], 'ad' is 'a * d', 'apb' is 'a + b', 'dma' is 'd - a',
# etc.
self._apb = table[0, 0, :] + table[0, 1, :]
self._apc = table[0, 0, :] + table[1, 0, :]
self._bpd = table[0, 1, :] + table[1, 1, :]
self._cpd = table[1, 0, :] + table[1, 1, :]
self._ad = table[0, 0, :] * table[1, 1, :]
self._bc = table[0, 1, :] * table[1, 0, :]
self._apd = table[0, 0, :] + table[1, 1, :]
self._dma = table[1, 1, :] - table[0, 0, :]
self._n = table.sum(0).sum(0)
@classmethod
def from_data(cls, var1, var2, strata, data):
"""
Construct a StratifiedTable object from data.
Parameters
----------
var1 : int or string
The column index or name of `data` specifying the variable
defining the rows of the contingency table. The variable
must have only two distinct values.
var2 : int or string
The column index or name of `data` specifying the variable
defining the columns of the contingency table. The variable
must have only two distinct values.
strata : int or string
The column index or name of `data` specifying the variable
defining the strata.
data : array_like
The raw data. A cross-table for analysis is constructed
from the first two columns.
Returns
-------
StratifiedTable
"""
if not isinstance(data, pd.DataFrame):
data1 = pd.DataFrame(index=np.arange(data.shape[0]),
columns=[var1, var2, strata])
data1[data1.columns[var1]] = data[:, var1]
data1[data1.columns[var2]] = data[:, var2]
data1[data1.columns[strata]] = data[:, strata]
else:
data1 = data[[var1, var2, strata]]
gb = data1.groupby(strata).groups
tables = []
for g in gb:
ii = gb[g]
tab = pd.crosstab(data1.loc[ii, var1], data1.loc[ii, var2])
if (tab.shape != np.r_[2, 2]).any():
msg = "Invalid table dimensions"
raise ValueError(msg)
tables.append(np.asarray(tab))
return cls(tables)
def test_null_odds(self, correction=False):
"""
Test that all tables have odds ratio equal to 1.
This is the 'Mantel-Haenszel' test.
Parameters
----------
correction : bool
If True, use the continuity correction when calculating the
test statistic.
Returns
-------
Bunch
A bunch containing the chi^2 test statistic and p-value.
"""
statistic = np.sum(self.table[0, 0, :] -
self._apb * self._apc / self._n)
statistic = np.abs(statistic)
if correction:
statistic -= 0.5
statistic = statistic**2
denom = self._apb * self._apc * self._bpd * self._cpd
denom /= (self._n**2 * (self._n - 1))
denom = np.sum(denom)
statistic /= denom
# df is always 1
pvalue = 1 - stats.chi2.cdf(statistic, 1)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
@cache_readonly
def oddsratio_pooled(self):
"""
The pooled odds ratio.
The value is an estimate of a common odds ratio across all of the
stratified tables.
"""
odds_ratio = np.sum(self._ad / self._n) / np.sum(self._bc / self._n)
return odds_ratio
@cache_readonly
def logodds_pooled(self):
"""
Returns the logarithm of the pooled odds ratio.
See oddsratio_pooled for more information.
"""
return np.log(self.oddsratio_pooled)
@cache_readonly
def riskratio_pooled(self):
"""
Estimate of the pooled risk ratio.
"""
acd = self.table[0, 0, :] * self._cpd
cab = self.table[1, 0, :] * self._apb
rr = np.sum(acd / self._n) / np.sum(cab / self._n)
return rr
@cache_readonly
def logodds_pooled_se(self):
"""
Estimated standard error of the pooled log odds ratio
References
----------
J. Robins, N. Breslow, S. Greenland. "Estimators of the
Mantel-Haenszel Variance Consistent in Both Sparse Data and
Large-Strata Limiting Models." Biometrics 42, no. 2 (1986): 311-23.
"""
adns = np.sum(self._ad / self._n)
bcns = np.sum(self._bc / self._n)
lor_va = np.sum(self._apd * self._ad / self._n**2) / adns**2
mid = self._apd * self._bc / self._n**2
mid += (1 - self._apd / self._n) * self._ad / self._n
mid = np.sum(mid)
mid /= (adns * bcns)
lor_va += mid
lor_va += np.sum((1 - self._apd / self._n) *
self._bc / self._n) / bcns**2
lor_va /= 2
lor_se = np.sqrt(lor_va)
return lor_se
def logodds_pooled_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the pooled log odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
interval.
method : str
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
Returns
-------
lcb : float
The lower confidence limit.
ucb : float
The upper confidence limit.
"""
lor = np.log(self.oddsratio_pooled)
lor_se = self.logodds_pooled_se
f = -stats.norm.ppf(alpha / 2)
lcb = lor - f * lor_se
ucb = lor + f * lor_se
return lcb, ucb
def oddsratio_pooled_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the pooled odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
interval.
method : str
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
Returns
-------
lcb : float
The lower confidence limit.
ucb : float
The upper confidence limit.
"""
lcb, ucb = self.logodds_pooled_confint(alpha, method=method)
lcb = np.exp(lcb)
ucb = np.exp(ucb)
return lcb, ucb
def test_equal_odds(self, adjust=False):
"""
Test that all odds ratios are identical.
This is the 'Breslow-Day' testing procedure.
Parameters
----------
adjust : bool
Use the 'Tarone' adjustment to achieve the chi^2
asymptotic distribution.
Returns
-------
A bunch containing the following attributes:
statistic : float
The chi^2 test statistic.
p-value : float
The p-value for the test.
"""
table = self.table
r = self.oddsratio_pooled
a = 1 - r
b = r * (self._apb + self._apc) + self._dma
c = -r * self._apb * self._apc
# Expected value of first cell
dr = np.sqrt(b**2 - 4*a*c)
e11 = (-b + dr) / (2*a)
# Variance of the first cell
v11 = (1 / e11 + 1 / (self._apc - e11) + 1 / (self._apb - e11) +
1 / (self._dma + e11))
v11 = 1 / v11
statistic = np.sum((table[0, 0, :] - e11)**2 / v11)
if adjust:
adj = table[0, 0, :].sum() - e11.sum()
adj = adj**2
adj /= np.sum(v11)
statistic -= adj
pvalue = 1 - stats.chi2.cdf(statistic, table.shape[2] - 1)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
def summary(self, alpha=0.05, float_format="%.3f", method="normal"):
"""
A summary of all the main results.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence intervals.
float_format : str
Used for formatting numeric values in the summary.
method : str
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
def fmt(x):
if isinstance(x, str):
return x
return float_format % x
co_lcb, co_ucb = self.oddsratio_pooled_confint(
alpha=alpha, method=method)
clo_lcb, clo_ucb = self.logodds_pooled_confint(
alpha=alpha, method=method)
headers = ["Estimate", "LCB", "UCB"]
stubs = ["Pooled odds", "Pooled log odds", "Pooled risk ratio", ""]
data = [[fmt(x) for x in [self.oddsratio_pooled, co_lcb, co_ucb]],
[fmt(x) for x in [self.logodds_pooled, clo_lcb, clo_ucb]],
[fmt(x) for x in [self.riskratio_pooled, "", ""]],
['', '', '']]
tab1 = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
headers = ["Statistic", "P-value", ""]
stubs = ["Test of OR=1", "Test constant OR"]
rslt1 = self.test_null_odds()
rslt2 = self.test_equal_odds()
data = [[fmt(x) for x in [rslt1.statistic, rslt1.pvalue, ""]],
[fmt(x) for x in [rslt2.statistic, rslt2.pvalue, ""]]]
tab2 = iolib.SimpleTable(data, headers, stubs, data_aligns="r")
tab1.extend(tab2)
headers = ["", "", ""]
stubs = ["Number of tables", "Min n", "Max n", "Avg n", "Total n"]
ss = self.table.sum(0).sum(0)
data = [["%d" % self.table.shape[2], '', ''],
["%d" % min(ss), '', ''],
["%d" % max(ss), '', ''],
["%.0f" % np.mean(ss), '', ''],
["%d" % sum(ss), '', '', '']]
tab3 = iolib.SimpleTable(data, headers, stubs, data_aligns="r")
tab1.extend(tab3)
return tab1
def mcnemar(table, exact=True, correction=True):
"""
McNemar test of homogeneity.
Parameters
----------
table : array_like
A square contingency table.
exact : bool
If exact is true, then the binomial distribution will be used.
If exact is false, then the chisquare distribution will be
used, which is the approximation to the distribution of the
test statistic for large sample sizes.
correction : bool
If true, then a continuity correction is used for the chisquare
distribution (if exact is false.)
Returns
-------
A bunch with attributes:
statistic : float or int, array
The test statistic is the chisquare statistic if exact is
false. If the exact binomial distribution is used, then this
contains the min(n1, n2), where n1, n2 are cases that are zero
in one sample but one in the other sample.
pvalue : float or array
p-value of the null hypothesis of equal marginal distributions.
Notes
-----
This is a special case of Cochran's Q test, and of the homogeneity
test. The results when the chisquare distribution is used are
identical, except for continuity correction.
"""
table = _make_df_square(table)
table = np.asarray(table, dtype=np.float64)
n1, n2 = table[0, 1], table[1, 0]
if exact:
statistic = np.minimum(n1, n2)
# binom is symmetric with p=0.5
# SciPy 1.7+ requires int arguments
int_sum = int(n1 + n2)
if int_sum != (n1 + n2):
raise ValueError(
"exact can only be used with tables containing integers."
)
pvalue = stats.binom.cdf(statistic, int_sum, 0.5) * 2
pvalue = np.minimum(pvalue, 1) # limit to 1 if n1==n2
else:
corr = int(correction) # convert bool to 0 or 1
statistic = (np.abs(n1 - n2) - corr)**2 / (1. * (n1 + n2))
df = 1
pvalue = stats.chi2.sf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
def cochrans_q(x, return_object=True):
"""
Cochran's Q test for identical binomial proportions.
Parameters
----------
x : array_like, 2d (N, k)
data with N cases and k variables
return_object : bool
Return values as bunch instead of as individual values.
Returns
-------
Returns a bunch containing the following attributes, or the
individual values according to the value of `return_object`.
statistic : float
test statistic
pvalue : float
pvalue from the chisquare distribution
Notes
-----
Cochran's Q is a k-sample extension of the McNemar test. If there
are only two groups, then Cochran's Q test and the McNemar test
are equivalent.
The procedure tests that the probability of success is the same
for every group. The alternative hypothesis is that at least two
groups have a different probability of success.
In Wikipedia terminology, rows are blocks and columns are
treatments. The number of rows N, should be large for the
chisquare distribution to be a good approximation.
The Null hypothesis of the test is that all treatments have the
same effect.
References
----------
https://en.wikipedia.org/wiki/Cochran_test
SAS Manual for NPAR TESTS
"""
x = np.asarray(x, dtype=np.float64)
gruni = np.unique(x)
N, k = x.shape
count_row_success = (x == gruni[-1]).sum(1, float)
count_col_success = (x == gruni[-1]).sum(0, float)
count_row_ss = count_row_success.sum()
count_col_ss = count_col_success.sum()
assert count_row_ss == count_col_ss # just a calculation check
# From the SAS manual
q_stat = ((k-1) * (k * np.sum(count_col_success**2) - count_col_ss**2)
/ (k * count_row_ss - np.sum(count_row_success**2)))
# Note: the denominator looks just like k times the variance of
# the columns
# Wikipedia uses a different, but equivalent expression
# q_stat = (k-1) * (k * np.sum(count_row_success**2) - count_row_ss**2)
# / (k * count_col_ss - np.sum(count_col_success**2))
df = k - 1
pvalue = stats.chi2.sf(q_stat, df)
if return_object:
b = _Bunch()
b.statistic = q_stat
b.df = df
b.pvalue = pvalue
return b
return q_stat, pvalue, df
| bsd-3-clause | 4d4f232dbce492d2a09f2c97f89e245c | 29.92942 | 79 | 0.559218 | 3.895098 | false | false | false | false |
statsmodels/statsmodels | statsmodels/tsa/statespace/kalman_smoother.py | 3 | 75873 | """
State Space Representation and Kalman Filter, Smoother
Author: Chad Fulton
License: Simplified-BSD
"""
import numpy as np
from types import SimpleNamespace
from statsmodels.tsa.statespace.representation import OptionWrapper
from statsmodels.tsa.statespace.kalman_filter import (KalmanFilter,
FilterResults)
from statsmodels.tsa.statespace.tools import (
reorder_missing_matrix, reorder_missing_vector, copy_index_matrix)
from statsmodels.tsa.statespace import tools, initialization
SMOOTHER_STATE = 0x01 # Durbin and Koopman (2012), Chapter 4.4.2
SMOOTHER_STATE_COV = 0x02 # ibid., Chapter 4.4.3
SMOOTHER_DISTURBANCE = 0x04 # ibid., Chapter 4.5
SMOOTHER_DISTURBANCE_COV = 0x08 # ibid., Chapter 4.5
SMOOTHER_STATE_AUTOCOV = 0x10 # ibid., Chapter 4.7
SMOOTHER_ALL = (
SMOOTHER_STATE | SMOOTHER_STATE_COV | SMOOTHER_DISTURBANCE |
SMOOTHER_DISTURBANCE_COV | SMOOTHER_STATE_AUTOCOV
)
SMOOTH_CONVENTIONAL = 0x01
SMOOTH_CLASSICAL = 0x02
SMOOTH_ALTERNATIVE = 0x04
SMOOTH_UNIVARIATE = 0x08
class KalmanSmoother(KalmanFilter):
r"""
State space representation of a time series process, with Kalman filter
and smoother.
Parameters
----------
k_endog : {array_like, int}
The observed time-series process :math:`y` if array like or the
number of variables in the process if an integer.
k_states : int
The dimension of the unobserved state process.
k_posdef : int, optional
The dimension of a guaranteed positive definite covariance matrix
describing the shocks in the measurement equation. Must be less than
or equal to `k_states`. Default is `k_states`.
results_class : class, optional
Default results class to use to save filtering output. Default is
`SmootherResults`. If specified, class must extend from
`SmootherResults`.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices, for Kalman filtering options, or for Kalman smoothing
options. See `Representation` for more details.
"""
smoother_outputs = [
'smoother_state', 'smoother_state_cov', 'smoother_state_autocov',
'smoother_disturbance', 'smoother_disturbance_cov', 'smoother_all',
]
smoother_state = OptionWrapper('smoother_output', SMOOTHER_STATE)
smoother_state_cov = OptionWrapper('smoother_output', SMOOTHER_STATE_COV)
smoother_disturbance = (
OptionWrapper('smoother_output', SMOOTHER_DISTURBANCE)
)
smoother_disturbance_cov = (
OptionWrapper('smoother_output', SMOOTHER_DISTURBANCE_COV)
)
smoother_state_autocov = (
OptionWrapper('smoother_output', SMOOTHER_STATE_AUTOCOV)
)
smoother_all = OptionWrapper('smoother_output', SMOOTHER_ALL)
smooth_methods = [
'smooth_conventional', 'smooth_alternative', 'smooth_classical'
]
smooth_conventional = OptionWrapper('smooth_method', SMOOTH_CONVENTIONAL)
"""
(bool) Flag for conventional (Durbin and Koopman, 2012) Kalman smoothing.
"""
smooth_alternative = OptionWrapper('smooth_method', SMOOTH_ALTERNATIVE)
"""
(bool) Flag for alternative (modified Bryson-Frazier) smoothing.
"""
smooth_classical = OptionWrapper('smooth_method', SMOOTH_CLASSICAL)
"""
(bool) Flag for classical (see e.g. Anderson and Moore, 1979) smoothing.
"""
smooth_univariate = OptionWrapper('smooth_method', SMOOTH_UNIVARIATE)
"""
(bool) Flag for univariate smoothing (uses modified Bryson-Frazier timing).
"""
# Default smoother options
smoother_output = SMOOTHER_ALL
smooth_method = 0
def __init__(self, k_endog, k_states, k_posdef=None, results_class=None,
kalman_smoother_classes=None, **kwargs):
# Set the default results class
if results_class is None:
results_class = SmootherResults
super(KalmanSmoother, self).__init__(
k_endog, k_states, k_posdef, results_class=results_class, **kwargs
)
# Options
self.prefix_kalman_smoother_map = (
kalman_smoother_classes
if kalman_smoother_classes is not None
else tools.prefix_kalman_smoother_map.copy())
# Setup the underlying Kalman smoother storage
self._kalman_smoothers = {}
# Set the smoother options
self.set_smoother_output(**kwargs)
self.set_smooth_method(**kwargs)
def _clone_kwargs(self, endog, **kwargs):
# See Representation._clone_kwargs for docstring
kwargs = super(KalmanSmoother, self)._clone_kwargs(endog, **kwargs)
# Get defaults for options
kwargs.setdefault('smoother_output', self.smoother_output)
kwargs.setdefault('smooth_method', self.smooth_method)
return kwargs
@property
def _kalman_smoother(self):
prefix = self.prefix
if prefix in self._kalman_smoothers:
return self._kalman_smoothers[prefix]
return None
def _initialize_smoother(self, smoother_output=None, smooth_method=None,
prefix=None, **kwargs):
if smoother_output is None:
smoother_output = self.smoother_output
if smooth_method is None:
smooth_method = self.smooth_method
# Make sure we have the required Kalman filter
prefix, dtype, create_filter, create_statespace = (
self._initialize_filter(prefix, **kwargs)
)
# Determine if we need to (re-)create the smoother
# (definitely need to recreate if we recreated the filter)
create_smoother = (create_filter or
prefix not in self._kalman_smoothers)
if not create_smoother:
kalman_smoother = self._kalman_smoothers[prefix]
create_smoother = (kalman_smoother.kfilter is not
self._kalman_filters[prefix])
# If the dtype-specific _kalman_smoother does not exist (or if we
# need to re-create it), create it
if create_smoother:
# Setup the smoother
cls = self.prefix_kalman_smoother_map[prefix]
self._kalman_smoothers[prefix] = cls(
self._statespaces[prefix], self._kalman_filters[prefix],
smoother_output, smooth_method
)
# Otherwise, update the smoother parameters
else:
self._kalman_smoothers[prefix].set_smoother_output(
smoother_output, False)
self._kalman_smoothers[prefix].set_smooth_method(smooth_method)
return prefix, dtype, create_smoother, create_filter, create_statespace
def set_smoother_output(self, smoother_output=None, **kwargs):
"""
Set the smoother output
The smoother can produce several types of results. The smoother output
variable controls which are calculated and returned.
Parameters
----------
smoother_output : int, optional
Bitmask value to set the smoother output to. See notes for details.
**kwargs
Keyword arguments may be used to influence the smoother output by
setting individual boolean flags. See notes for details.
Notes
-----
The smoother output is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
SMOOTHER_STATE = 0x01
Calculate and return the smoothed states.
SMOOTHER_STATE_COV = 0x02
Calculate and return the smoothed state covariance matrices.
SMOOTHER_STATE_AUTOCOV = 0x10
Calculate and return the smoothed state lag-one autocovariance
matrices.
SMOOTHER_DISTURBANCE = 0x04
Calculate and return the smoothed state and observation
disturbances.
SMOOTHER_DISTURBANCE_COV = 0x08
Calculate and return the covariance matrices for the smoothed state
and observation disturbances.
SMOOTHER_ALL
Calculate and return all results.
If the bitmask is set directly via the `smoother_output` argument, then
the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the smoother output may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default smoother output is SMOOTHER_ALL.
If performance is a concern, only those results which are needed should
be specified as any results that are not specified will not be
calculated. For example, if the smoother output is set to only include
SMOOTHER_STATE, the smoother operates much more quickly than if all
output is required.
Examples
--------
>>> import statsmodels.tsa.statespace.kalman_smoother as ks
>>> mod = ks.KalmanSmoother(1,1)
>>> mod.smoother_output
15
>>> mod.set_smoother_output(smoother_output=0)
>>> mod.smoother_state = True
>>> mod.smoother_output
1
>>> mod.smoother_state
True
"""
if smoother_output is not None:
self.smoother_output = smoother_output
for name in KalmanSmoother.smoother_outputs:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_smooth_method(self, smooth_method=None, **kwargs):
r"""
Set the smoothing method
The smoothing method can be used to override the Kalman smoother
approach used. By default, the Kalman smoother used depends on the
Kalman filter method.
Parameters
----------
smooth_method : int, optional
Bitmask value to set the filter method to. See notes for details.
**kwargs
Keyword arguments may be used to influence the filter method by
setting individual boolean flags. See notes for details.
Notes
-----
The smoothing method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
SMOOTH_CONVENTIONAL = 0x01
Default Kalman smoother, as presented in Durbin and Koopman, 2012
chapter 4.
SMOOTH_CLASSICAL = 0x02
Classical Kalman smoother, as presented in Anderson and Moore, 1979
or Durbin and Koopman, 2012 chapter 4.6.1.
SMOOTH_ALTERNATIVE = 0x04
Modified Bryson-Frazier Kalman smoother method; this is identical
to the conventional method of Durbin and Koopman, 2012, except that
an additional intermediate step is included.
SMOOTH_UNIVARIATE = 0x08
Univariate Kalman smoother, as presented in Durbin and Koopman,
2012 chapter 6, except with modified Bryson-Frazier timing.
Practically speaking, these methods should all produce the same output
but different computational implications, numerical stability
implications, or internal timing assumptions.
Note that only the first method is available if using a Scipy version
older than 0.16.
If the bitmask is set directly via the `smooth_method` argument, then
the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the filter method may also be specified by directly modifying
the class attributes which are defined similarly to the keyword
arguments.
The default filtering method is SMOOTH_CONVENTIONAL.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.smooth_method
1
>>> mod.filter_conventional
True
>>> mod.filter_univariate = True
>>> mod.smooth_method
17
>>> mod.set_smooth_method(filter_univariate=False,
filter_collapsed=True)
>>> mod.smooth_method
33
>>> mod.set_smooth_method(smooth_method=1)
>>> mod.filter_conventional
True
>>> mod.filter_univariate
False
>>> mod.filter_collapsed
False
>>> mod.filter_univariate = True
>>> mod.smooth_method
17
"""
if smooth_method is not None:
self.smooth_method = smooth_method
for name in KalmanSmoother.smooth_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def _smooth(self, smoother_output=None, smooth_method=None, prefix=None,
complex_step=False, results=None, **kwargs):
# Initialize the smoother
prefix, dtype, create_smoother, create_filter, create_statespace = (
self._initialize_smoother(
smoother_output, smooth_method, prefix=prefix, **kwargs
))
# Check that the filter and statespace weren't just recreated
if create_filter or create_statespace:
raise ValueError('Passed settings forced re-creation of the'
' Kalman filter. Please run `_filter` before'
' running `_smooth`.')
# Get the appropriate smoother
smoother = self._kalman_smoothers[prefix]
# Run the smoother
smoother()
return smoother
def smooth(self, smoother_output=None, smooth_method=None, results=None,
run_filter=True, prefix=None, complex_step=False,
update_representation=True, update_filter=True,
update_smoother=True, **kwargs):
"""
Apply the Kalman smoother to the statespace model.
Parameters
----------
smoother_output : int, optional
Determines which Kalman smoother output calculate. Default is all
(including state, disturbances, and all covariances).
results : class or object, optional
If a class, then that class is instantiated and returned with the
result of both filtering and smoothing.
If an object, then that object is updated with the smoothing data.
If None, then a SmootherResults object is returned with both
filtering and smoothing results.
run_filter : bool, optional
Whether or not to run the Kalman filter prior to smoothing. Default
is True.
prefix : str
The prefix of the datatype. Usually only used internally.
Returns
-------
SmootherResults object
"""
# Run the filter
kfilter = self._filter(**kwargs)
# Create the results object
results = self.results_class(self)
if update_representation:
results.update_representation(self)
if update_filter:
results.update_filter(kfilter)
else:
# (even if we don't update all filter results, still need to
# update this)
results.nobs_diffuse = kfilter.nobs_diffuse
# Run the smoother
if smoother_output is None:
smoother_output = self.smoother_output
smoother = self._smooth(smoother_output, results=results, **kwargs)
# Update the results
if update_smoother:
results.update_smoother(smoother)
return results
class SmootherResults(FilterResults):
r"""
Results from applying the Kalman smoother and/or filter to a state space
model.
Parameters
----------
model : Representation
A Statespace representation
Attributes
----------
nobs : int
Number of observations.
k_endog : int
The dimension of the observation series.
k_states : int
The dimension of the unobserved state process.
k_posdef : int
The dimension of a guaranteed positive definite covariance matrix
describing the shocks in the measurement equation.
dtype : dtype
Datatype of representation matrices
prefix : str
BLAS prefix of representation matrices
shapes : dictionary of name:tuple
A dictionary recording the shapes of each of the representation
matrices as tuples.
endog : ndarray
The observation vector.
design : ndarray
The design matrix, :math:`Z`.
obs_intercept : ndarray
The intercept for the observation equation, :math:`d`.
obs_cov : ndarray
The covariance matrix for the observation equation :math:`H`.
transition : ndarray
The transition matrix, :math:`T`.
state_intercept : ndarray
The intercept for the transition equation, :math:`c`.
selection : ndarray
The selection matrix, :math:`R`.
state_cov : ndarray
The covariance matrix for the state equation :math:`Q`.
missing : array of bool
An array of the same size as `endog`, filled with boolean values that
are True if the corresponding entry in `endog` is NaN and False
otherwise.
nmissing : array of int
An array of size `nobs`, where the ith entry is the number (between 0
and k_endog) of NaNs in the ith row of the `endog` array.
time_invariant : bool
Whether or not the representation matrices are time-invariant
initialization : str
Kalman filter initialization method.
initial_state : array_like
The state vector used to initialize the Kalamn filter.
initial_state_cov : array_like
The state covariance matrix used to initialize the Kalamn filter.
filter_method : int
Bitmask representing the Kalman filtering method
inversion_method : int
Bitmask representing the method used to invert the forecast error
covariance matrix.
stability_method : int
Bitmask representing the methods used to promote numerical stability in
the Kalman filter recursions.
conserve_memory : int
Bitmask representing the selected memory conservation method.
tolerance : float
The tolerance at which the Kalman filter determines convergence to
steady-state.
loglikelihood_burn : int
The number of initial periods during which the loglikelihood is not
recorded.
converged : bool
Whether or not the Kalman filter converged.
period_converged : int
The time period in which the Kalman filter converged.
filtered_state : ndarray
The filtered state vector at each time period.
filtered_state_cov : ndarray
The filtered state covariance matrix at each time period.
predicted_state : ndarray
The predicted state vector at each time period.
predicted_state_cov : ndarray
The predicted state covariance matrix at each time period.
kalman_gain : ndarray
The Kalman gain at each time period.
forecasts : ndarray
The one-step-ahead forecasts of observations at each time period.
forecasts_error : ndarray
The forecast errors at each time period.
forecasts_error_cov : ndarray
The forecast error covariance matrices at each time period.
loglikelihood : ndarray
The loglikelihood values at each time period.
collapsed_forecasts : ndarray
If filtering using collapsed observations, stores the one-step-ahead
forecasts of collapsed observations at each time period.
collapsed_forecasts_error : ndarray
If filtering using collapsed observations, stores the one-step-ahead
forecast errors of collapsed observations at each time period.
collapsed_forecasts_error_cov : ndarray
If filtering using collapsed observations, stores the one-step-ahead
forecast error covariance matrices of collapsed observations at each
time period.
standardized_forecast_error : ndarray
The standardized forecast errors
smoother_output : int
Bitmask representing the generated Kalman smoothing output
scaled_smoothed_estimator : ndarray
The scaled smoothed estimator at each time period.
scaled_smoothed_estimator_cov : ndarray
The scaled smoothed estimator covariance matrices at each time period.
smoothing_error : ndarray
The smoothing error covariance matrices at each time period.
smoothed_state : ndarray
The smoothed state at each time period.
smoothed_state_cov : ndarray
The smoothed state covariance matrices at each time period.
smoothed_state_autocov : ndarray
The smoothed state lago-one autocovariance matrices at each time
period: :math:`Cov(\alpha_{t+1}, \alpha_t)`.
smoothed_measurement_disturbance : ndarray
The smoothed measurement at each time period.
smoothed_state_disturbance : ndarray
The smoothed state at each time period.
smoothed_measurement_disturbance_cov : ndarray
The smoothed measurement disturbance covariance matrices at each time
period.
smoothed_state_disturbance_cov : ndarray
The smoothed state disturbance covariance matrices at each time period.
"""
_smoother_attributes = [
'smoother_output', 'scaled_smoothed_estimator',
'scaled_smoothed_estimator_cov', 'smoothing_error',
'smoothed_state', 'smoothed_state_cov', 'smoothed_state_autocov',
'smoothed_measurement_disturbance', 'smoothed_state_disturbance',
'smoothed_measurement_disturbance_cov',
'smoothed_state_disturbance_cov', 'innovations_transition'
]
_smoother_options = KalmanSmoother.smoother_outputs
_attributes = FilterResults._model_attributes + _smoother_attributes
def update_representation(self, model, only_options=False):
"""
Update the results to match a given model
Parameters
----------
model : Representation
The model object from which to take the updated values.
only_options : bool, optional
If set to true, only the smoother and filter options are updated,
and the state space representation is not updated. Default is
False.
Notes
-----
This method is rarely required except for internal usage.
"""
super(SmootherResults, self).update_representation(model, only_options)
# Save the options as boolean variables
for name in self._smoother_options:
setattr(self, name, getattr(model, name, None))
# Initialize holders for smoothed forecasts
self._smoothed_forecasts = None
self._smoothed_forecasts_error = None
self._smoothed_forecasts_error_cov = None
def update_smoother(self, smoother):
"""
Update the smoother results
Parameters
----------
smoother : KalmanSmoother
The model object from which to take the updated values.
Notes
-----
This method is rarely required except for internal usage.
"""
# Copy the appropriate output
attributes = []
# Since update_representation will already have been called, we can
# use the boolean options smoother_* and know they match the smoother
# itself
if self.smoother_state or self.smoother_disturbance:
attributes.append('scaled_smoothed_estimator')
if self.smoother_state_cov or self.smoother_disturbance_cov:
attributes.append('scaled_smoothed_estimator_cov')
if self.smoother_state:
attributes.append('smoothed_state')
if self.smoother_state_cov:
attributes.append('smoothed_state_cov')
if self.smoother_state_autocov:
attributes.append('smoothed_state_autocov')
if self.smoother_disturbance:
attributes += [
'smoothing_error',
'smoothed_measurement_disturbance',
'smoothed_state_disturbance'
]
if self.smoother_disturbance_cov:
attributes += [
'smoothed_measurement_disturbance_cov',
'smoothed_state_disturbance_cov'
]
has_missing = np.sum(self.nmissing) > 0
for name in self._smoother_attributes:
if name == 'smoother_output':
pass
elif name in attributes:
if name in ['smoothing_error',
'smoothed_measurement_disturbance']:
vector = getattr(smoother, name, None)
if vector is not None and has_missing:
vector = np.array(reorder_missing_vector(
vector, self.missing, prefix=self.prefix))
else:
vector = np.array(vector, copy=True)
setattr(self, name, vector)
elif name == 'smoothed_measurement_disturbance_cov':
matrix = getattr(smoother, name, None)
if matrix is not None and has_missing:
matrix = reorder_missing_matrix(
matrix, self.missing, reorder_rows=True,
reorder_cols=True, prefix=self.prefix)
# In the missing data case, we want to set the missing
# components equal to their unconditional distribution
copy_index_matrix(
self.obs_cov, matrix, self.missing,
index_rows=True, index_cols=True, inplace=True,
prefix=self.prefix)
else:
matrix = np.array(matrix, copy=True)
setattr(self, name, matrix)
else:
setattr(self, name,
np.array(getattr(smoother, name, None), copy=True))
else:
setattr(self, name, None)
self.innovations_transition = (
np.array(smoother.innovations_transition, copy=True))
# Diffuse objects
self.scaled_smoothed_diffuse_estimator = None
self.scaled_smoothed_diffuse1_estimator_cov = None
self.scaled_smoothed_diffuse2_estimator_cov = None
if self.nobs_diffuse > 0:
self.scaled_smoothed_diffuse_estimator = np.array(
smoother.scaled_smoothed_diffuse_estimator, copy=True)
self.scaled_smoothed_diffuse1_estimator_cov = np.array(
smoother.scaled_smoothed_diffuse1_estimator_cov, copy=True)
self.scaled_smoothed_diffuse2_estimator_cov = np.array(
smoother.scaled_smoothed_diffuse2_estimator_cov, copy=True)
# Adjustments
# For r_t (and similarly for N_t), what was calculated was
# r_T, ..., r_{-1}. We only want r_0, ..., r_T
# so exclude the appropriate element so that the time index is
# consistent with the other returned output
# r_t stored such that scaled_smoothed_estimator[0] == r_{-1}
start = 1
end = None
if 'scaled_smoothed_estimator' in attributes:
self.scaled_smoothed_estimator_presample = (
self.scaled_smoothed_estimator[:, 0])
self.scaled_smoothed_estimator = (
self.scaled_smoothed_estimator[:, start:end]
)
if 'scaled_smoothed_estimator_cov' in attributes:
self.scaled_smoothed_estimator_cov_presample = (
self.scaled_smoothed_estimator_cov[:, :, 0])
self.scaled_smoothed_estimator_cov = (
self.scaled_smoothed_estimator_cov[:, :, start:end]
)
# Clear the smoothed forecasts
self._smoothed_forecasts = None
self._smoothed_forecasts_error = None
self._smoothed_forecasts_error_cov = None
# Note: if we concentrated out the scale, need to adjust the
# loglikelihood values and all of the covariance matrices and the
# values that depend on the covariance matrices
if self.filter_concentrated and self.model._scale is None:
self.smoothed_state_cov *= self.scale
self.smoothed_state_autocov *= self.scale
self.smoothed_state_disturbance_cov *= self.scale
self.smoothed_measurement_disturbance_cov *= self.scale
self.scaled_smoothed_estimator_presample /= self.scale
self.scaled_smoothed_estimator /= self.scale
self.scaled_smoothed_estimator_cov_presample /= self.scale
self.scaled_smoothed_estimator_cov /= self.scale
self.smoothing_error /= self.scale
# Cache
self.__smoothed_state_autocovariance = {}
def _smoothed_state_autocovariance(self, shift, start, end,
extend_kwargs=None):
"""
Compute "forward" autocovariances, Cov(t, t+j)
Parameters
----------
shift : int
The number of period to shift forwards when computing the
autocovariance. This has the opposite sign as `lag` from the
`smoothed_state_autocovariance` method.
start : int, optional
The start of the interval (inclusive) of autocovariances to compute
and return.
end : int, optional
The end of the interval (exclusive) autocovariances to compute and
return. Note that since it is an exclusive endpoint, the returned
autocovariances do not include the value at this index.
extend_kwargs : dict, optional
Keyword arguments containing updated state space system matrices
for handling out-of-sample autocovariance computations in
time-varying state space models.
"""
if extend_kwargs is None:
extend_kwargs = {}
# Size of returned array in the time dimension
n = end - start
# Get number of post-sample periods we need to create an extended
# model to compute
if shift == 0:
max_insample = self.nobs - shift
else:
max_insample = self.nobs - shift + 1
n_postsample = max(0, end - max_insample)
# Get full in-sample arrays
if shift != 0:
L = self.innovations_transition
P = self.predicted_state_cov
N = self.scaled_smoothed_estimator_cov
else:
acov = self.smoothed_state_cov
# If applicable, append out-of-sample arrays
if n_postsample > 0:
# Note: we need 1 less than the number of post
endog = np.zeros((n_postsample, self.k_endog)) * np.nan
mod = self.model.extend(endog, start=self.nobs, **extend_kwargs)
mod.initialize_known(self.predicted_state[..., self.nobs],
self.predicted_state_cov[..., self.nobs])
res = mod.smooth()
if shift != 0:
start_insample = max(0, start)
L = np.concatenate((L[..., start_insample:],
res.innovations_transition), axis=2)
P = np.concatenate((P[..., start_insample:],
res.predicted_state_cov[..., 1:]),
axis=2)
N = np.concatenate((N[..., start_insample:],
res.scaled_smoothed_estimator_cov),
axis=2)
end -= start_insample
start -= start_insample
else:
acov = np.concatenate((acov, res.predicted_state_cov), axis=2)
if shift != 0:
# Subset to appropriate start, end
start_insample = max(0, start)
LT = L[..., start_insample:end + shift - 1].T
P = P[..., start_insample:end + shift].T
N = N[..., start_insample:end + shift - 1].T
# Intermediate computations
tmpLT = np.eye(self.k_states)[None, :, :]
length = P.shape[0] - shift # this is the required length of LT
for i in range(1, shift + 1):
tmpLT = LT[shift - i:length + shift - i] @ tmpLT
eye = np.eye(self.k_states)[None, ...]
# Compute the autocovariance
acov = np.zeros((n, self.k_states, self.k_states))
acov[:start_insample - start] = np.nan
acov[start_insample - start:] = (
P[:-shift] @ tmpLT @ (eye - N[shift - 1:] @ P[shift:]))
else:
acov = acov.T[start:end]
return acov
def smoothed_state_autocovariance(self, lag=1, t=None, start=None,
end=None, extend_kwargs=None):
r"""
Compute state vector autocovariances, conditional on the full dataset
Computes:
.. math::
Cov(\alpha_t - \hat \alpha_t, \alpha_{t - j} - \hat \alpha_{t - j})
where the `lag` argument gives the value for :math:`j`. Thus when
the `lag` argument is positive, the autocovariance is between the
current and previous periods, while if `lag` is negative the
autocovariance is between the current and future periods.
Parameters
----------
lag : int, optional
The number of period to shift when computing the autocovariance.
Default is 1.
t : int, optional
A specific period for which to compute and return the
autocovariance. Cannot be used in combination with `start` or
`end`. See the Returns section for details on how this
parameter affects what is what is returned.
start : int, optional
The start of the interval (inclusive) of autocovariances to compute
and return. Cannot be used in combination with the `t` argument.
See the Returns section for details on how this parameter affects
what is what is returned. Default is 0.
end : int, optional
The end of the interval (exclusive) autocovariances to compute and
return. Note that since it is an exclusive endpoint, the returned
autocovariances do not include the value at this index. Cannot be
used in combination with the `t` argument. See the Returns section
for details on how this parameter affects what is what is returned
and what the default value is.
extend_kwargs : dict, optional
Keyword arguments containing updated state space system matrices
for handling out-of-sample autocovariance computations in
time-varying state space models.
Returns
-------
acov : ndarray
Array of autocovariance matrices. If the argument `t` is not
provided, then it is shaped `(k_states, k_states, n)`, while if `t`
given then the third axis is dropped and the array is shaped
`(k_states, k_states)`.
The output under the default case differs somewhat based on the
state space model and the sign of the lag. To see how these cases
differ, denote the output at each time point as Cov(t, t-j). Then:
- If `lag > 0` (and the model is either time-varying or
time-invariant), then the returned array is shaped `(*, *, nobs)`
and each entry [:, :, t] contains Cov(t, t-j). However, the model
does not have enough information to compute autocovariances in
the pre-sample period, so that we cannot compute Cov(1, 1-lag),
Cov(2, 2-lag), ..., Cov(lag, 0). Thus the first `lag` entries
have all values set to NaN.
- If the model is time-invariant and `lag < -1` or if `lag` is
0 or -1, and the model is either time-invariant or time-varying,
then the returned array is shaped `(*, *, nobs)` and each
entry [:, :, t] contains Cov(t, t+j). Moreover, all entries are
available (i.e. there are no NaNs).
- If the model is time-varying and `lag < -1` and `extend_kwargs`
is not provided, then the returned array is shaped
`(*, *, nobs - lag + 1)`.
- However, if the model is time-varying and `lag < -1`, then
`extend_kwargs` can be provided with `lag - 1` additional
matrices so that the returned array is shaped `(*, *, nobs)` as
usual.
More generally, the dimension of the last axis will be
`start - end`.
Notes
-----
This method computes:
.. math::
Cov(\alpha_t - \hat \alpha_t, \alpha_{t - j} - \hat \alpha_{t - j})
where the `lag` argument determines the autocovariance order :math:`j`,
and `lag` is an integer (positive, zero, or negative). This method
cannot compute values associated with time points prior to the sample,
and so it returns a matrix of NaN values for these time points.
For example, if `start=0` and `lag=2`, then assuming the output is
assigned to the variable `acov`, we will have `acov[..., 0]` and
`acov[..., 1]` as matrices filled with NaN values.
Based only on the "current" results object (i.e. the Kalman smoother
applied to the sample), there is not enough information to compute
Cov(t, t+j) for the last `lag - 1` observations of the sample. However,
the values can be computed for these time points using the transition
equation of the state space representation, and so for time-invariant
state space models we do compute these values. For time-varying models,
this can also be done, but updated state space matrices for the
out-of-sample time points must be provided via the `extend_kwargs`
argument.
See [1]_, Chapter 4.7, for all details about how these autocovariances
are computed.
The `t` and `start`/`end` parameters compute and return only the
requested autocovariances. As a result, using these parameters is
recommended to reduce the computational burden, particularly if the
number of observations and/or the dimension of the state vector is
large.
References
----------
.. [1] Durbin, James, and Siem Jan Koopman. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
# We can cache the results for time-invariant models
cache_key = None
if extend_kwargs is None or len(extend_kwargs) == 0:
cache_key = (lag, t, start, end)
# Short-circuit for a cache-hit
if (cache_key is not None and
cache_key in self.__smoothed_state_autocovariance):
return self.__smoothed_state_autocovariance[cache_key]
# Switch to only positive values for `lag`
forward_autocovariances = False
if lag < 0:
lag = -lag
forward_autocovariances = True
# Handle `t`
if t is not None and (start is not None or end is not None):
raise ValueError('Cannot specify both `t` and `start` or `end`.')
if t is not None:
start = t
end = t + 1
# Defaults
if start is None:
start = 0
if end is None:
if forward_autocovariances and lag > 1 and extend_kwargs is None:
end = self.nobs - lag + 1
else:
end = self.nobs
if extend_kwargs is None:
extend_kwargs = {}
# Sanity checks
if start < 0 or end < 0:
raise ValueError('Negative `t`, `start`, or `end` is not allowed.')
if end < start:
raise ValueError('`end` must be after `start`')
if lag == 0 and self.smoothed_state_cov is None:
raise RuntimeError('Cannot return smoothed state covariances'
' if those values have not been computed by'
' Kalman smoothing.')
# We already have in-sample (+1 out-of-sample) smoothed covariances
if lag == 0 and end <= self.nobs + 1:
acov = self.smoothed_state_cov
if end == self.nobs + 1:
acov = np.concatenate(
(acov[..., start:], self.predicted_state_cov[..., -1:]),
axis=2).T
else:
acov = acov.T[start:end]
# In-sample, we can compute up to Cov(T, T+1) or Cov(T+1, T) and down
# to Cov(1, 2) or Cov(2, 1). So:
# - For lag=1 we set Cov(1, 0) = np.nan and then can compute up to T-1
# in-sample values Cov(2, 1), ..., Cov(T, T-1) and the first
# out-of-sample value Cov(T+1, T)
elif (lag == 1 and self.smoothed_state_autocov is not None and
not forward_autocovariances and end <= self.nobs + 1):
# nans = np.zeros((self.k_states, self.k_states, lag)) * np.nan
# acov = np.concatenate((nans, self.smoothed_state_autocov),
# axis=2).transpose(2, 0, 1)[start:end]
if start == 0:
nans = np.zeros((self.k_states, self.k_states, lag)) * np.nan
acov = np.concatenate(
(nans, self.smoothed_state_autocov[..., :end - 1]),
axis=2)
else:
acov = self.smoothed_state_autocov[..., start - 1:end - 1]
acov = acov.transpose(2, 0, 1)
# - For lag=-1 we can compute T in-sample values, Cov(1, 2), ...,
# Cov(T, T+1) but we cannot compute the first out-of-sample value
# Cov(T+1, T+2).
elif (lag == 1 and self.smoothed_state_autocov is not None and
forward_autocovariances and end < self.nobs + 1):
acov = self.smoothed_state_autocov.T[start:end]
# Otherwise, we need to compute additional values at the end of the
# sample
else:
if forward_autocovariances:
# Cov(t, t + lag), t = start, ..., end
acov = self._smoothed_state_autocovariance(
lag, start, end, extend_kwargs=extend_kwargs)
else:
# Cov(t, t + lag)' = Cov(t + lag, t),
# with t = start - lag, ..., end - lag
out = self._smoothed_state_autocovariance(
lag, start - lag, end - lag, extend_kwargs=extend_kwargs)
acov = out.transpose(0, 2, 1)
# Squeeze the last axis or else reshape to have the same axis
# definitions as e.g. smoothed_state_cov
if t is not None:
acov = acov[0]
else:
acov = acov.transpose(1, 2, 0)
# Fill in the cache, if applicable
if cache_key is not None:
self.__smoothed_state_autocovariance[cache_key] = acov
return acov
def news(self, previous, t=None, start=None, end=None,
revised=None, design=None, state_index=None):
r"""
Compute the news and impacts associated with a data release
Parameters
----------
previous : SmootherResults
Prior results object relative to which to compute the news. This
results object must have identical state space representation for
the prior sample period so that the only difference is that this
results object has updates to the observed data.
t : int, optional
A specific period for which to compute the news. Cannot be used in
combination with `start` or `end`.
start : int, optional
The start of the interval (inclusive) of news to compute. Cannot be
used in combination with the `t` argument. Default is the last
period of the sample (`nobs - 1`).
end : int, optional
The end of the interval (exclusive) of news to compute. Note that
since it is an exclusive endpoint, the returned news do not include
the value at this index. Cannot be used in combination with the `t`
argument.
design : array, optional
Design matrix for the period `t` in time-varying models. If this
model has a time-varying design matrix, and the argument `t` is out
of this model's sample, then a new design matrix for period `t`
must be provided. Unused otherwise.
state_index : array_like, optional
An optional index specifying a subset of states to use when
constructing the impacts of revisions and news. For example, if
`state_index=[0, 1]` is passed, then only the impacts to the
observed variables arising from the impacts to the first two
states will be returned.
Returns
-------
news_results : SimpleNamespace
News and impacts associated with a data release. Includes the
following attributes:
- `update_impacts`: update to forecasts of impacted variables from
the news. It is equivalent to E[y^i | post] - E[y^i | revision],
where y^i are the variables of interest. In [1]_, this is
described as "revision" in equation (17).
- `revision_impacts`: update to forecasts of variables impacted
variables from data revisions. It is
E[y^i | revision] - E[y^i | previous], and does not have a
specific notation in [1]_, since there for simplicity they assume
that there are no revisions.
- `news`: the unexpected component of the updated data. Denoted
I = y^u - E[y^u | previous], where y^u are the data points that
were newly incorporated in a data release (but not including
revisions to data points that already existed in the previous
release). In [1]_, this is described as "news" in equation (17).
- `revisions`
- `gain`: the gain matrix associated with the "Kalman-like" update
from the news, E[y I'] E[I I']^{-1}. In [1]_, this can be found
in the equation For E[y_{k,t_k} \mid I_{v+1}] in the middle of
page 17.
- `revision_weights`
- `update_forecasts`: forecasts of the updated periods used to
construct the news, E[y^u | previous].
- `update_realized`: realizations of the updated periods used to
construct the news, y^u.
- `revised_prev`
- `revised`
- `prev_impacted_forecasts`: previous forecast of the periods of
interest, E[y^i | previous].
- `post_impacted_forecasts`: forecast of the periods of interest
after taking into account both revisions and updates,
E[y^i | post].
- `revision_results`: results object that updates the `previous`
results to take into account data revisions.
- `revisions_ix`: list of `(t, i)` positions of revisions in endog
- `updates_ix`: list of `(t, i)` positions of updates to endog
Notes
-----
This method computes the effect of new data (e.g. from a new data
release) on smoothed forecasts produced by a state space model, as
described in [1]_. It also computes the effect of revised data on
smoothed forecasts.
References
----------
.. [1] Bańbura, Marta and Modugno, Michele. 2010.
"Maximum likelihood estimation of factor models on data sets
with arbitrary pattern of missing data."
No 1189, Working Paper Series, European Central Bank.
https://EconPapers.repec.org/RePEc:ecb:ecbwps:20101189.
.. [2] Bańbura, Marta, and Michele Modugno.
"Maximum likelihood estimation of factor models on datasets with
arbitrary pattern of missing data."
Journal of Applied Econometrics 29, no. 1 (2014): 133-160.
"""
# Handle `t`
if t is not None and (start is not None or end is not None):
raise ValueError('Cannot specify both `t` and `start` or `end`.')
if t is not None:
start = t
end = t + 1
# Defaults
if start is None:
start = self.nobs - 1
if end is None:
end = self.nobs
# Sanity checks
if start < 0 or end < 0:
raise ValueError('Negative `t`, `start`, or `end` is not allowed.')
if end <= start:
raise ValueError('`end` must be after `start`')
if self.smoothed_state_cov is None:
raise ValueError('Cannot compute news without having applied the'
' Kalman smoother first.')
error_ss = ('This results object has %s and so it does not appear to'
' by an extension of `previous`. Can only compute the'
' news by comparing this results set to previous results'
' objects.')
if self.nobs < previous.nobs:
raise ValueError(error_ss % 'fewer observations than'
' `previous`')
if not (self.k_endog == previous.k_endog and
self.k_states == previous.k_states and
self.k_posdef == previous.k_posdef):
raise ValueError(error_ss % 'different state space dimensions than'
' `previous`')
for key in self.model.shapes.keys():
if key == 'obs':
continue
tv = getattr(self, key).shape[-1] > 1
tv_prev = getattr(previous, key).shape[-1] > 1
if tv and not tv_prev:
raise ValueError(error_ss % f'time-varying {key} while'
' `previous` does not')
if not tv and tv_prev:
raise ValueError(error_ss % f'time-invariant {key} while'
' `previous` does not')
# Standardize
if state_index is not None:
state_index = np.atleast_1d(
np.sort(np.array(state_index, dtype=int)))
# We cannot forecast out-of-sample periods in a time-varying model
if end > self.nobs and not self.model.time_invariant:
raise RuntimeError('Cannot compute the impacts of news on periods'
' outside of the sample in time-varying'
' models.')
# For time-varying case, figure out extension kwargs
extend_kwargs = {}
for key in self.model.shapes.keys():
if key == 'obs':
continue
mat = getattr(self, key)
prev_mat = getattr(previous, key)
if mat.shape[-1] > prev_mat.shape[-1]:
extend_kwargs[key] = mat[..., prev_mat.shape[-1]:]
# Figure out which indices have changed
revisions_ix, updates_ix = previous.model.diff_endog(self.endog.T)
# Compute prev / post impact forecasts
prev_impacted_forecasts = previous.predict(
start=start, end=end, **extend_kwargs).smoothed_forecasts
post_impacted_forecasts = self.predict(
start=start, end=end).smoothed_forecasts
# Get revision weights, impacts, and forecasts
if len(revisions_ix) > 0:
revised_endog = self.endog[:, :previous.nobs].copy()
revised_endog[previous.missing.astype(bool)] = np.nan
# Compute the revisions
revised_j, revised_p = zip(*revisions_ix)
compute_j = np.arange(revised_j[0], revised_j[-1] + 1)
revised_prev = previous.endog.T[compute_j]
revised = revised_endog.T[compute_j]
revisions = (revised - revised_prev)
# Compute the weights of the smoothed state vector
compute_t = np.arange(start, end)
ix = np.ix_(compute_t, compute_j)
# Construct a model from which we can create weights for impacts
# through `end`
# Construct endog for the new model
tmp_endog = revised_endog.T.copy()
tmp_nobs = max(end, previous.nobs)
oos_nobs = tmp_nobs - previous.nobs
if oos_nobs > 0:
tmp_endog = np.concatenate([
tmp_endog, np.zeros((oos_nobs, self.k_endog)) * np.nan
], axis=0)
# Copy time-varying matrices (required by clone)
clone_kwargs = {}
for key in self.model.shapes.keys():
if key == 'obs':
continue
mat = getattr(self, key)
if mat.shape[-1] > 1:
clone_kwargs[key] = mat[..., :tmp_nobs]
rev_mod = previous.model.clone(tmp_endog, **clone_kwargs)
init = initialization.Initialization.from_results(self)
rev_mod.initialize(init)
revision_results = rev_mod.smooth()
smoothed_state_weights, _, _ = (
tools._compute_smoothed_state_weights(
rev_mod, compute_t=compute_t, compute_j=compute_j,
compute_prior_weights=False, scale=previous.scale))
smoothed_state_weights = smoothed_state_weights[ix]
# Convert the weights in terms of smoothed forecasts
# t, j, m, p, i
ZT = rev_mod.design.T
if ZT.shape[0] > 1:
ZT = ZT[compute_t]
# Subset the states used for the impacts if applicable
if state_index is not None:
ZT = ZT[:, state_index, :]
smoothed_state_weights = (
smoothed_state_weights[:, :, state_index])
# Multiplication gives: t, j, m, p * t, j, m, p, k
# Sum along axis=2 gives: t, j, p, k
# Transpose to: t, j, k, p (i.e. like t, j, m, p but with k instead
# of m)
revision_weights = np.nansum(
smoothed_state_weights[..., None]
* ZT[:, None, :, None, :], axis=2).transpose(0, 1, 3, 2)
# Multiplication gives: t, j, k, p * t, j, k, p
# Sum along axes 1, 3 gives: t, k
# This is also a valid way to compute impacts, but it employes
# unnecessary multiplications with zeros; it is better to use the
# below method that flattens the revision indices before computing
# the impacts
# revision_impacts = np.nansum(
# revision_weights * revisions[None, :, None, :], axis=(1, 3))
# Flatten the weights and revisions along the revised j, k
# dimensions so that we only retain the actual revision elements
ix_j = revised_j - revised_j[0]
# Shape is: t, k, j * p
# Note: have to transpose first so that the two advanced indexes
# are next to each other, so that "the dimensions from the
# advanced indexing operations are inserted into the result
# array at the same spot as they were in the initial array"
# (see https://numpy.org/doc/stable/user/basics.indexing.html,
# "Combining advanced and basic indexing")
revision_weights = (
revision_weights.transpose(0, 2, 1, 3)[:, :, ix_j, revised_p])
# Shape is j * k
revisions = revisions[ix_j, revised_p]
# Shape is t, k
revision_impacts = revision_weights @ revisions
# Similarly, flatten the revised and revised_prev series
revised = revised[ix_j, revised_p]
revised_prev = revised_prev[ix_j, revised_p]
# Squeeze if `t` argument used
if t is not None:
revision_weights = revision_weights[0]
revision_impacts = revision_impacts[0]
else:
revised_endog = None
revised = None
revised_prev = None
revisions = None
revision_weights = None
revision_impacts = None
revision_results = None
# Now handle updates
if len(updates_ix) > 0:
# Figure out which time points we need forecast errors for
update_t, update_k = zip(*updates_ix)
update_start_t = np.min(update_t)
update_end_t = np.max(update_t)
if revision_results is None:
forecasts = previous.predict(
start=update_start_t, end=update_end_t + 1,
**extend_kwargs).smoothed_forecasts.T
else:
forecasts = revision_results.predict(
start=update_start_t,
end=update_end_t + 1).smoothed_forecasts.T
realized = self.endog.T[update_start_t:update_end_t + 1]
forecasts_error = realized - forecasts
# Now subset forecast errors to only the (time, endog) elements
# that are updates
ix_t = update_t - update_start_t
update_realized = realized[ix_t, update_k]
update_forecasts = forecasts[ix_t, update_k]
update_forecasts_error = forecasts_error[ix_t, update_k]
# Get the gains associated with each of the periods
if self.design.shape[2] == 1:
design = self.design[..., 0][None, ...]
elif end <= self.nobs:
design = self.design[..., start:end].transpose(2, 0, 1)
else:
# Note: this case is no longer possible, since above we raise
# ValueError for time-varying case with end > self.nobs
if design is None:
raise ValueError('Model has time-varying design matrix, so'
' an updated time-varying matrix for'
' period `t` is required.')
elif design.ndim == 2:
design = design[None, ...]
else:
design = design.transpose(2, 0, 1)
state_gain = previous.smoothed_state_gain(
updates_ix, start=start, end=end, extend_kwargs=extend_kwargs)
# Subset the states used for the impacts if applicable
if state_index is not None:
design = design[:, :, state_index]
state_gain = state_gain[:, state_index]
# Compute the gain in terms of observed variables
obs_gain = design @ state_gain
# Get the news
update_impacts = obs_gain @ update_forecasts_error
# Squeeze if `t` argument used
if t is not None:
obs_gain = obs_gain[0]
update_impacts = update_impacts[0]
else:
update_impacts = None
update_forecasts = None
update_realized = None
update_forecasts_error = None
obs_gain = None
# Results
out = SimpleNamespace(
# update to forecast of impacted variables from news
# = E[y^i | post] - E[y^i | revision] = weight @ news
update_impacts=update_impacts,
# update to forecast of variables of interest from revisions
# = E[y^i | revision] - E[y^i | previous]
revision_impacts=revision_impacts,
# news = A = y^u - E[y^u | previous]
news=update_forecasts_error,
# revivions y^r(updated) - y^r(previous)
revisions=revisions,
# gain matrix = E[y A'] E[A A']^{-1}
gain=obs_gain,
# weights on observations for the smoothed signal
revision_weights=revision_weights,
# forecasts of the updated periods used to construct the news
# = E[y^u | revised]
update_forecasts=update_forecasts,
# realizations of the updated periods used to construct the news
# = y^u
update_realized=update_realized,
# revised observations of the periods that were revised
# = y^r_{revised}
revised=revised,
# previous observations of the periods that were revised
# = y^r_{previous}
revised_prev=revised_prev,
# previous forecast of the periods of interest, E[y^i | previous]
prev_impacted_forecasts=prev_impacted_forecasts,
# post. forecast of the periods of interest, E[y^i | post]
post_impacted_forecasts=post_impacted_forecasts,
# results object associated with the revision
revision_results=revision_results,
# list of (x, y) positions of revisions to endog
revisions_ix=revisions_ix,
# list of (x, y) positions of updates to endog
updates_ix=updates_ix,
# index of state variables used to compute impacts
state_index=state_index)
return out
def smoothed_state_gain(self, updates_ix, t=None, start=None,
end=None, extend_kwargs=None):
r"""
Cov(\tilde \alpha_{t}, I) Var(I, I)^{-1}
where I is a vector of forecast errors associated with
`update_indices`.
Parameters
----------
updates_ix : list
List of indices `(t, i)`, where `t` denotes a zero-indexed time
location and `i` denotes a zero-indexed endog variable.
"""
# Handle `t`
if t is not None and (start is not None or end is not None):
raise ValueError('Cannot specify both `t` and `start` or `end`.')
if t is not None:
start = t
end = t + 1
# Defaults
if start is None:
start = self.nobs - 1
if end is None:
end = self.nobs
if extend_kwargs is None:
extend_kwargs = {}
# Sanity checks
if start < 0 or end < 0:
raise ValueError('Negative `t`, `start`, or `end` is not allowed.')
if end <= start:
raise ValueError('`end` must be after `start`')
# Dimensions
n_periods = end - start
n_updates = len(updates_ix)
# Helper to get possibly matrix that is possibly time-varying
def get_mat(which, t):
mat = getattr(self, which)
if mat.shape[-1] > 1:
if t < self.nobs:
out = mat[..., t]
else:
if (which not in extend_kwargs or
extend_kwargs[which].shape[-1] <= t - self.nobs):
raise ValueError(f'Model has time-varying {which}'
' matrix, so an updated time-varying'
' matrix for the extension period is'
' required.')
out = extend_kwargs[which][..., t - self.nobs]
else:
out = mat[..., 0]
return out
# Helper to get Cov(\tilde \alpha_{t}, I)
def get_cov_state_revision(t):
tmp1 = np.zeros((self.k_states, n_updates))
for i in range(n_updates):
t_i, k_i = updates_ix[i]
acov = self.smoothed_state_autocovariance(
lag=t - t_i, t=t, extend_kwargs=extend_kwargs)
Z_i = get_mat('design', t_i)
tmp1[:, i:i + 1] = acov @ Z_i[k_i:k_i + 1].T
return tmp1
# Compute Cov(\tilde \alpha_{t}, I)
tmp1 = np.zeros((n_periods, self.k_states, n_updates))
for s in range(start, end):
tmp1[s - start] = get_cov_state_revision(s)
# Compute Var(I)
tmp2 = np.zeros((n_updates, n_updates))
for i in range(n_updates):
t_i, k_i = updates_ix[i]
for j in range(i + 1):
t_j, k_j = updates_ix[j]
Z_i = get_mat('design', t_i)
Z_j = get_mat('design', t_j)
acov = self.smoothed_state_autocovariance(
lag=t_i - t_j, t=t_i, extend_kwargs=extend_kwargs)
tmp2[i, j] = tmp2[j, i] = (
Z_i[k_i:k_i + 1] @ acov @ Z_j[k_j:k_j + 1].T)
if t_i == t_j:
H = get_mat('obs_cov', t_i)
if i == j:
tmp2[i, j] += H[k_i, k_j]
else:
tmp2[i, j] += H[k_i, k_j]
tmp2[j, i] += H[k_i, k_j]
# Gain
gain = tmp1 @ np.linalg.inv(tmp2)
if t is not None:
gain = gain[0]
return gain
def _get_smoothed_forecasts(self):
if self._smoothed_forecasts is None:
# Initialize empty arrays
self._smoothed_forecasts = np.zeros(self.forecasts.shape,
dtype=self.dtype)
self._smoothed_forecasts_error = (
np.zeros(self.forecasts_error.shape, dtype=self.dtype)
)
self._smoothed_forecasts_error_cov = (
np.zeros(self.forecasts_error_cov.shape, dtype=self.dtype)
)
for t in range(self.nobs):
design_t = 0 if self.design.shape[2] == 1 else t
obs_cov_t = 0 if self.obs_cov.shape[2] == 1 else t
obs_intercept_t = 0 if self.obs_intercept.shape[1] == 1 else t
mask = ~self.missing[:, t].astype(bool)
# We can recover forecasts
self._smoothed_forecasts[:, t] = np.dot(
self.design[:, :, design_t], self.smoothed_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
if self.nmissing[t] > 0:
self._smoothed_forecasts_error[:, t] = np.nan
self._smoothed_forecasts_error[mask, t] = (
self.endog[mask, t] - self._smoothed_forecasts[mask, t]
)
self._smoothed_forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.smoothed_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
return (
self._smoothed_forecasts,
self._smoothed_forecasts_error,
self._smoothed_forecasts_error_cov
)
@property
def smoothed_forecasts(self):
return self._get_smoothed_forecasts()[0]
@property
def smoothed_forecasts_error(self):
return self._get_smoothed_forecasts()[1]
@property
def smoothed_forecasts_error_cov(self):
return self._get_smoothed_forecasts()[2]
def get_smoothed_decomposition(self, decomposition_of='smoothed_state',
state_index=None):
r"""
Decompose smoothed output into contributions from observations
Parameters
----------
decomposition_of : {"smoothed_state", "smoothed_signal"}
The object to perform a decomposition of. If it is set to
"smoothed_state", then the elements of the smoothed state vector
are decomposed into the contributions of each observation. If it
is set to "smoothed_signal", then the predictions of the
observation vector based on the smoothed state vector are
decomposed. Default is "smoothed_state".
state_index : array_like, optional
An optional index specifying a subset of states to use when
constructing the decomposition of the "smoothed_signal". For
example, if `state_index=[0, 1]` is passed, then only the
contributions of observed variables to the smoothed signal arising
from the first two states will be returned. Note that if not all
states are used, the contributions will not sum to the smoothed
signal. Default is to use all states.
Returns
-------
data_contributions : array
Contributions of observations to the decomposed object. If the
smoothed state is being decomposed, then `data_contributions` are
shaped `(nobs, k_states, nobs, k_endog)`, where the
`(t, m, j, p)`-th element is the contribution of the `p`-th
observation at time `j` to the `m`-th state at time `t`. If the
smoothed signal is being decomposed, then `data_contributions` are
shaped `(nobs, k_endog, nobs, k_endog)`, where the
`(t, k, j, p)`-th element is the contribution of the `p`-th
observation at time `j` to the smoothed prediction of the `k`-th
observation at time `t`.
obs_intercept_contributions : array
Contributions of the observation intercept to the decomposed
object. If the smoothed state is being decomposed, then
`obs_intercept_contributions` are shaped
`(nobs, k_states, nobs, k_endog)`, where the `(t, m, j, p)`-th
element is the contribution of the `p`-th observation intercept at
time `j` to the `m`-th state at time `t`. If the smoothed signal
is being decomposed, then `obs_intercept_contributions` are shaped
`(nobs, k_endog, nobs, k_endog)`, where the `(t, k, j, p)`-th
element is the contribution of the `p`-th observation at time `j`
to the smoothed prediction of the `k`-th observation at time `t`.
state_intercept_contributions : array
Contributions of the state intercept to the decomposed object. If
the smoothed state is being decomposed, then
`state_intercept_contributions` are shaped
`(nobs, k_states, nobs, k_states)`, where the `(t, m, j, l)`-th
element is the contribution of the `l`-th state intercept at
time `j` to the `m`-th state at time `t`. If the smoothed signal
is being decomposed, then `state_intercept_contributions` are
shaped `(nobs, k_endog, nobs, k_endog)`, where the
`(t, k, j, l)`-th element is the contribution of the `p`-th
observation at time `j` to the smoothed prediction of the `k`-th
observation at time `t`.
prior_contributions : array
Contributions of the prior to the decomposed object. If the
smoothed state is being decomposed, then `prior_contributions` are
shaped `(nobs, k_states, k_states)`, where the `(t, m, l)`-th
element is the contribution of the `l`-th element of the prior
mean to the `m`-th state at time `t`. If the smoothed signal is
being decomposed, then `prior_contributions` are shaped
`(nobs, k_endog, k_states)`, where the `(t, k, l)`-th
element is the contribution of the `l`-th element of the prior mean
to the smoothed prediction of the `k`-th observation at time `t`.
Notes
-----
Denote the smoothed state at time :math:`t` by :math:`\alpha_t`. Then
the smoothed signal is :math:`Z_t \alpha_t`, where :math:`Z_t` is the
design matrix operative at time :math:`t`.
"""
if decomposition_of not in ['smoothed_state', 'smoothed_signal']:
raise ValueError('Invalid value for `decomposition_of`. Must be'
' one of "smoothed_state" or "smoothed_signal".')
weights, state_intercept_weights, prior_weights = (
tools._compute_smoothed_state_weights(
self.model, compute_prior_weights=True, scale=self.scale))
# Get state space objects
ZT = self.model.design.T # t, m, p
dT = self.model.obs_intercept.T # t, p
cT = self.model.state_intercept.T # t, m
# Subset the states used for the impacts if applicable
if decomposition_of == 'smoothed_signal' and state_index is not None:
ZT = ZT[:, state_index, :]
weights = weights[:, :, state_index]
prior_weights = prior_weights[:, state_index, :]
# Convert the weights in terms of smoothed signal
# t, j, m, p, i
if decomposition_of == 'smoothed_signal':
# Multiplication gives: t, j, m, p * t, j, m, p, k
# Sum along axis=2 gives: t, j, p, k
# Transpose to: t, j, k, p (i.e. like t, j, m, p but with k instead
# of m)
weights = np.nansum(weights[..., None] * ZT[:, None, :, None, :],
axis=2).transpose(0, 1, 3, 2)
# Multiplication gives: t, j, m, l * t, j, m, l, k
# Sum along axis=2 gives: t, j, l, k
# Transpose to: t, j, k, l (i.e. like t, j, m, p but with k instead
# of m and l instead of p)
state_intercept_weights = np.nansum(
state_intercept_weights[..., None] * ZT[:, None, :, None, :],
axis=2).transpose(0, 1, 3, 2)
# Multiplication gives: t, m, l * t, m, l, k = t, m, l, k
# Sum along axis=1 gives: t, l, k
# Transpose to: t, k, l (i.e. like t, m, l but with k instead of m)
prior_weights = np.nansum(
prior_weights[..., None] * ZT[:, :, None, :],
axis=1).transpose(0, 2, 1)
# Contributions of observations: multiply weights by observations
# Multiplication gives t, j, {m,k}, p
data_contributions = weights * self.model.endog.T[None, :, None, :]
# Transpose to: t, {m,k}, j, p
data_contributions = data_contributions.transpose(0, 2, 1, 3)
# Contributions of obs intercept: multiply data weights by obs
# intercept
# Multiplication gives t, j, {m,k}, p
obs_intercept_contributions = -weights * dT[None, :, None, :]
# Transpose to: t, {m,k}, j, p
obs_intercept_contributions = (
obs_intercept_contributions.transpose(0, 2, 1, 3))
# Contributions of state intercept: multiply state intercept weights
# by state intercept
# Multiplication gives t, j, {m,k}, l
state_intercept_contributions = (
state_intercept_weights * cT[None, :, None, :])
# Transpose to: t, {m,k}, j, l
state_intercept_contributions = (
state_intercept_contributions.transpose(0, 2, 1, 3))
# Contributions of prior: multiply weights by prior
# Multiplication gives t, {m, k}, l
prior_contributions = prior_weights * self.initial_state[None, None, :]
return (data_contributions, obs_intercept_contributions,
state_intercept_contributions, prior_contributions)
| bsd-3-clause | 5a35a23b2c9fe047603a03ab35ebac0a | 42.330097 | 79 | 0.580683 | 4.174011 | false | false | false | false |
statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | 1 | 197877 | """
Limited dependent variable and qualitative variables.
Includes binary outcomes, count data, (ordered) ordinal data and limited
dependent variables.
General References
--------------------
A.C. Cameron and P.K. Trivedi. `Regression Analysis of Count Data`.
Cambridge, 1998
G.S. Madalla. `Limited-Dependent and Qualitative Variables in Econometrics`.
Cambridge, 1983.
W. Greene. `Econometric Analysis`. Prentice Hall, 5th. edition. 2003.
"""
__all__ = ["Poisson", "Logit", "Probit", "MNLogit", "NegativeBinomial",
"GeneralizedPoisson", "NegativeBinomialP", "CountModel"]
from statsmodels.compat.pandas import Appender
import warnings
import numpy as np
from pandas import MultiIndex, get_dummies
from scipy import special, stats
from scipy.special import digamma, gammaln, loggamma, polygamma
from scipy.stats import nbinom
from statsmodels.base.data import handle_data # for mnlogit
from statsmodels.base.l1_slsqp import fit_l1_slsqp
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.base._constraints import fit_constrained_wrap
import statsmodels.base._parameter_inference as pinfer
from statsmodels.base import _prediction_inference as pred
from statsmodels.distributions import genpoisson_p
import statsmodels.regression.linear_model as lm
from statsmodels.tools import data as data_tools, tools
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.numdiff import approx_fprime_cs
from statsmodels.tools.sm_exceptions import (
PerfectSeparationError,
SpecificationWarning,
)
try:
import cvxopt # noqa:F401
have_cvxopt = True
except ImportError:
have_cvxopt = False
# TODO: When we eventually get user-settable precision, we need to change
# this
FLOAT_EPS = np.finfo(float).eps
# Limit for exponentials to avoid overflow
EXP_UPPER_LIMIT = np.log(np.finfo(np.float64).max) - 1.0
# TODO: add options for the parameter covariance/variance
# ie., OIM, EIM, and BHHH see Green 21.4
_discrete_models_docs = """
"""
_discrete_results_docs = """
%(one_line_description)s
Parameters
----------
model : A DiscreteModel instance
params : array_like
The parameters of a fitted model.
hessian : array_like
The hessian of the fitted model.
scale : float
A scale parameter for the covariance matrix.
Attributes
----------
df_resid : float
See model definition.
df_model : float
See model definition.
llf : float
Value of the loglikelihood
%(extra_attr)s"""
_l1_results_attr = """ nnz_params : int
The number of nonzero parameters in the model. Train with
trim_params == True or else numerical error will distort this.
trimmed : bool array
trimmed[i] == True if the ith parameter was trimmed from the model."""
_get_start_params_null_docs = """
Compute one-step moment estimator for null (constant-only) model
This is a preliminary estimator used as start_params.
Returns
-------
params : ndarray
parameter estimate based one one-step moment matching
"""
_check_rank_doc = """
check_rank : bool
Check exog rank to determine model degrees of freedom. Default is
True. Setting to False reduces model initialization time when
exog.shape[1] is large.
"""
# helper for MNLogit (will be generally useful later)
def _numpy_to_dummies(endog):
if endog.ndim == 2 and endog.dtype.kind not in ["S", "O"]:
endog_dummies = endog
ynames = range(endog.shape[1])
else:
dummies = get_dummies(endog, drop_first=False)
ynames = {i: dummies.columns[i] for i in range(dummies.shape[1])}
endog_dummies = np.asarray(dummies, dtype=float)
return endog_dummies, ynames
return endog_dummies, ynames
def _pandas_to_dummies(endog):
if endog.ndim == 2:
if endog.shape[1] == 1:
yname = endog.columns[0]
endog_dummies = get_dummies(endog.iloc[:, 0])
else: # series
yname = 'y'
endog_dummies = endog
else:
yname = endog.name
endog_dummies = get_dummies(endog)
ynames = endog_dummies.columns.tolist()
return endog_dummies, ynames, yname
def _validate_l1_method(method):
"""
As of 0.10.0, the supported values for `method` in `fit_regularized`
are "l1" and "l1_cvxopt_cp". If an invalid value is passed, raise
with a helpful error message
Parameters
----------
method : str
Raises
------
ValueError
"""
if method not in ['l1', 'l1_cvxopt_cp']:
raise ValueError('`method` = {method} is not supported, use either '
'"l1" or "l1_cvxopt_cp"'.format(method=method))
#### Private Model Classes ####
class DiscreteModel(base.LikelihoodModel):
"""
Abstract class for discrete choice models.
This class does not do anything itself but lays out the methods and
call signature expected of child classes in addition to those of
statsmodels.model.LikelihoodModel.
"""
def __init__(self, endog, exog, check_rank=True, **kwargs):
self._check_rank = check_rank
super().__init__(endog, exog, **kwargs)
self.raise_on_perfect_prediction = True
self.k_extra = 0
def initialize(self):
"""
Initialize is called by
statsmodels.model.LikelihoodModel.__init__
and should contain any preprocessing that needs to be done for a model.
"""
if self._check_rank:
# assumes constant
rank = tools.matrix_rank(self.exog, method="qr")
else:
# If rank check is skipped, assume full
rank = self.exog.shape[1]
self.df_model = float(rank - 1)
self.df_resid = float(self.exog.shape[0] - rank)
def cdf(self, X):
"""
The cumulative distribution function of the model.
"""
raise NotImplementedError
def pdf(self, X):
"""
The probability density (mass) function of the model.
"""
raise NotImplementedError
def _check_perfect_pred(self, params, *args):
endog = self.endog
fittedvalues = self.cdf(np.dot(self.exog, params[:self.exog.shape[1]]))
if (self.raise_on_perfect_prediction and
np.allclose(fittedvalues - endog, 0)):
msg = "Perfect separation detected, results not available"
raise PerfectSeparationError(msg)
@Appender(base.LikelihoodModel.fit.__doc__)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
"""
Fit the model using maximum likelihood.
The rest of the docstring is from
statsmodels.base.model.LikelihoodModel.fit
"""
if callback is None:
callback = self._check_perfect_pred
else:
pass # TODO: make a function factory to have multiple call-backs
mlefit = super().fit(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs)
return mlefit # It is up to subclasses to wrap results
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=True,
callback=None, alpha=0, trim_mode='auto',
auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03,
qc_verbose=False, **kwargs):
"""
Fit the model using a regularized maximum likelihood.
The regularization method AND the solver used is determined by the
argument method.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
method : 'l1' or 'l1_cvxopt_cp'
See notes for details.
maxiter : {int, 'defined_by_method'}
Maximum number of iterations to perform.
If 'defined_by_method', then use method defaults (see notes).
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool
Set to True to print convergence messages.
fargs : tuple
Extra arguments passed to the likelihood function, i.e.,
loglike(x,*args).
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
alpha : non-negative scalar or numpy array (same size as parameters)
The weight multiplying the l1 penalty term.
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been
zero if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value.
size_trim_tol : float or 'auto' (default = 'auto')
Tolerance used when trim_mode == 'size'.
auto_trim_tol : float
Tolerance used when trim_mode == 'auto'.
qc_tol : float
Print warning and do not allow auto trim when (ii) (above) is
violated by this much.
qc_verbose : bool
If true, print out a full QC report upon failure.
**kwargs
Additional keyword arguments used when fitting the model.
Returns
-------
Results
A results instance.
Notes
-----
Using 'l1_cvxopt_cp' requires the cvxopt module.
Extra parameters are not penalized if alpha is given as a scalar.
An example is the shape parameter in NegativeBinomial `nb1` and `nb2`.
Optional arguments for the solvers (available in Results.mle_settings)::
'l1'
acc : float (default 1e-6)
Requested accuracy as used by slsqp
'l1_cvxopt_cp'
abstol : float
absolute accuracy (default: 1e-7).
reltol : float
relative accuracy (default: 1e-6).
feastol : float
tolerance for feasibility conditions (default: 1e-7).
refinement : int
number of iterative refinement steps when solving KKT
equations (default: 1).
Optimization methodology
With :math:`L` the negative log likelihood, we solve the convex but
non-smooth problem
.. math:: \\min_\\beta L(\\beta) + \\sum_k\\alpha_k |\\beta_k|
via the transformation to the smooth, convex, constrained problem
in twice as many variables (adding the "added variables" :math:`u_k`)
.. math:: \\min_{\\beta,u} L(\\beta) + \\sum_k\\alpha_k u_k,
subject to
.. math:: -u_k \\leq \\beta_k \\leq u_k.
With :math:`\\partial_k L` the derivative of :math:`L` in the
:math:`k^{th}` parameter direction, theory dictates that, at the
minimum, exactly one of two conditions holds:
(i) :math:`|\\partial_k L| = \\alpha_k` and :math:`\\beta_k \\neq 0`
(ii) :math:`|\\partial_k L| \\leq \\alpha_k` and :math:`\\beta_k = 0`
"""
_validate_l1_method(method)
# Set attributes based on method
cov_params_func = self.cov_params_func_l1
### Bundle up extra kwargs for the dictionary kwargs. These are
### passed through super(...).fit() as kwargs and unpacked at
### appropriate times
alpha = np.array(alpha)
assert alpha.min() >= 0
try:
kwargs['alpha'] = alpha
except TypeError:
kwargs = dict(alpha=alpha)
kwargs['alpha_rescaled'] = kwargs['alpha'] / float(self.endog.shape[0])
kwargs['trim_mode'] = trim_mode
kwargs['size_trim_tol'] = size_trim_tol
kwargs['auto_trim_tol'] = auto_trim_tol
kwargs['qc_tol'] = qc_tol
kwargs['qc_verbose'] = qc_verbose
### Define default keyword arguments to be passed to super(...).fit()
if maxiter == 'defined_by_method':
if method == 'l1':
maxiter = 1000
elif method == 'l1_cvxopt_cp':
maxiter = 70
## Parameters to pass to super(...).fit()
# For the 'extra' parameters, pass all that are available,
# even if we know (at this point) we will only use one.
extra_fit_funcs = {'l1': fit_l1_slsqp}
if have_cvxopt and method == 'l1_cvxopt_cp':
from statsmodels.base.l1_cvxopt import fit_l1_cvxopt_cp
extra_fit_funcs['l1_cvxopt_cp'] = fit_l1_cvxopt_cp
elif method.lower() == 'l1_cvxopt_cp':
raise ValueError("Cannot use l1_cvxopt_cp as cvxopt "
"was not found (install it, or use method='l1' instead)")
if callback is None:
callback = self._check_perfect_pred
else:
pass # make a function factory to have multiple call-backs
mlefit = super().fit(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
extra_fit_funcs=extra_fit_funcs,
cov_params_func=cov_params_func,
**kwargs)
return mlefit # up to subclasses to wrap results
def cov_params_func_l1(self, likelihood_model, xopt, retvals):
"""
Computes cov_params on a reduced parameter space
corresponding to the nonzero parameters resulting from the
l1 regularized fit.
Returns a full cov_params matrix, with entries corresponding
to zero'd values set to np.nan.
"""
H = likelihood_model.hessian(xopt)
trimmed = retvals['trimmed']
nz_idx = np.nonzero(~trimmed)[0]
nnz_params = (~trimmed).sum()
if nnz_params > 0:
H_restricted = H[nz_idx[:, None], nz_idx]
# Covariance estimate for the nonzero params
H_restricted_inv = np.linalg.inv(-H_restricted)
else:
H_restricted_inv = np.zeros(0)
cov_params = np.nan * np.ones(H.shape)
cov_params[nz_idx[:, None], nz_idx] = H_restricted_inv
return cov_params
def predict(self, params, exog=None, which="mean", linear=None):
"""
Predict response variable of a model given exogenous variables.
"""
raise NotImplementedError
def _derivative_exog(self, params, exog=None, dummy_idx=None,
count_idx=None):
"""
This should implement the derivative of the non-linear function
"""
raise NotImplementedError
def _derivative_exog_helper(self, margeff, params, exog, dummy_idx,
count_idx, transform):
"""
Helper for _derivative_exog to wrap results appropriately
"""
from .discrete_margins import _get_count_effects, _get_dummy_effects
if count_idx is not None:
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
class BinaryModel(DiscreteModel):
_continuous_ok = False
def __init__(self, endog, exog, offset=None, check_rank=True, **kwargs):
# unconditional check, requires no extra kwargs added by subclasses
self._check_kwargs(kwargs)
super().__init__(endog, exog, offset=offset, check_rank=check_rank,
**kwargs)
if not issubclass(self.__class__, MultinomialModel):
if not np.all((self.endog >= 0) & (self.endog <= 1)):
raise ValueError("endog must be in the unit interval.")
if offset is None:
delattr(self, 'offset')
if (not self._continuous_ok and
np.any(self.endog != np.round(self.endog))):
raise ValueError("endog must be binary, either 0 or 1")
def predict(self, params, exog=None, which="mean", linear=None,
offset=None):
"""
Predict response variable of a model given exogenous variables.
Parameters
----------
params : array_like
Fitted parameters of the model.
exog : array_like
1d or 2d array of exogenous values. If not supplied, the
whole exog attribute of the model is used.
which : {'mean', 'linear', 'var', 'prob'}, optional
Statistic to predict. Default is 'mean'.
- 'mean' returns the conditional expectation of endog E(y | x),
i.e. exp of linear predictor.
- 'linear' returns the linear predictor of the mean function.
- 'var' returns the estimated variance of endog implied by the
model.
linear : bool
If True, returns the linear predicted values. If False or None,
then the statistic specified by ``which`` will be returned.
.. deprecated: 0.14
The ``linear` keyword is deprecated and will be removed,
use ``which`` keyword instead.
Returns
-------
array
Fitted values at exog.
"""
if linear is not None:
msg = 'linear keyword is deprecated, use which="linear"'
warnings.warn(msg, DeprecationWarning)
if linear is True:
which = "linear"
# Use fit offset if appropriate
if offset is None and exog is None and hasattr(self, 'offset'):
offset = self.offset
elif offset is None:
offset = 0.
if exog is None:
exog = self.exog
linpred = np.dot(exog, params) + offset
if which == "mean":
return self.cdf(linpred)
elif which == "linear":
return linpred
if which == "var":
mu = self.cdf(linpred)
var_ = mu * (1 - mu)
return var_
else:
raise ValueError('Only `which` is "mean", "linear" or "var" are'
' available.')
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
bnryfit = super().fit_regularized(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
alpha=alpha,
trim_mode=trim_mode,
auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol,
qc_tol=qc_tol,
**kwargs)
discretefit = L1BinaryResults(self, bnryfit)
return L1BinaryResultsWrapper(discretefit)
def fit_constrained(self, constraints, start_params=None, **fit_kwds):
res = fit_constrained_wrap(self, constraints, start_params=None,
**fit_kwds)
return res
fit_constrained.__doc__ = fit_constrained_wrap.__doc__
def _derivative_predict(self, params, exog=None, transform='dydx',
offset=None):
"""
For computing marginal effects standard errors.
This is used only in the case of discrete and count regressors to
get the variance-covariance of the marginal effects. It returns
[d F / d params] where F is the predict.
Transform can be 'dydx' or 'eydx'. Checking is done in margeff
computations for appropriate transform.
"""
if exog is None:
exog = self.exog
linpred = self.predict(params, exog, offset=offset, which="linear")
dF = self.pdf(linpred)[:,None] * exog
if 'ey' in transform:
dF /= self.predict(params, exog, offset=offset)[:,None]
return dF
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None, offset=None):
"""
For computing marginal effects returns dF(XB) / dX where F(.) is
the predicted probabilities
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# Note: this form should be appropriate for
# group 1 probit, logit, logistic, cloglog, heckprob, xtprobit
if exog is None:
exog = self.exog
linpred = self.predict(params, exog, offset=offset, which="linear")
margeff = np.dot(self.pdf(linpred)[:,None],
params[None,:])
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:, None]
return self._derivative_exog_helper(margeff, params, exog,
dummy_idx, count_idx, transform)
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
link = self.link
lin_pred = self.predict(params, which="linear")
idl = link.inverse_deriv(lin_pred)
dmat = self.exog * idl[:, None]
return dmat
def get_distribution(self, params, exog=None, offset=None):
"""Get frozen instance of distribution based on predicted parameters.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor of the mean
function with coefficient equal to 1. If exposure is specified,
then it will be logged by the method. The user does not need to
log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
Returns
-------
Instance of frozen scipy distribution.
"""
mu = self.predict(params, exog=exog, offset=offset)
distr = stats.bernoulli(mu[:, None])
return distr
class MultinomialModel(BinaryModel):
def _handle_data(self, endog, exog, missing, hasconst, **kwargs):
if data_tools._is_using_ndarray_type(endog, None):
endog_dummies, ynames = _numpy_to_dummies(endog)
yname = 'y'
elif data_tools._is_using_pandas(endog, None):
endog_dummies, ynames, yname = _pandas_to_dummies(endog)
else:
endog = np.asarray(endog)
endog_dummies, ynames = _numpy_to_dummies(endog)
yname = 'y'
if not isinstance(ynames, dict):
ynames = dict(zip(range(endog_dummies.shape[1]), ynames))
self._ynames_map = ynames
data = handle_data(endog_dummies, exog, missing, hasconst, **kwargs)
data.ynames = yname # overwrite this to single endog name
data.orig_endog = endog
self.wendog = data.endog
# repeating from upstream...
for key in kwargs:
if key in ['design_info', 'formula']: # leave attached to data
continue
try:
setattr(self, key, data.__dict__.pop(key))
except KeyError:
pass
return data
def initialize(self):
"""
Preprocesses the data for MNLogit.
"""
super().initialize()
# This is also a "whiten" method in other models (eg regression)
self.endog = self.endog.argmax(1) # turn it into an array of col idx
self.J = self.wendog.shape[1]
self.K = self.exog.shape[1]
self.df_model *= (self.J-1) # for each J - 1 equation.
self.df_resid = self.exog.shape[0] - self.df_model - (self.J-1)
def predict(self, params, exog=None, which="mean", linear=None):
"""
Predict response variable of a model given exogenous variables.
Parameters
----------
params : array_like
2d array of fitted parameters of the model. Should be in the
order returned from the model.
exog : array_like
1d or 2d array of exogenous values. If not supplied, the
whole exog attribute of the model is used. If a 1d array is given
it assumed to be 1 row of exogenous variables. If you only have
one regressor and would like to do prediction, you must provide
a 2d array with shape[1] == 1.
linear : bool, optional
If True, returns the linear predictor dot(exog,params). Else,
returns the value of the cdf at the linear predictor.
Notes
-----
Column 0 is the base case, the rest conform to the rows of params
shifted up one for the base case.
"""
if linear is not None:
msg = 'linear keyword is deprecated, use which="linear"'
warnings.warn(msg, DeprecationWarning)
if linear is True:
which = "linear"
if exog is None: # do here to accommodate user-given exog
exog = self.exog
if exog.ndim == 1:
exog = exog[None]
pred = super().predict(params, exog, which=which)
if which == "linear":
pred = np.column_stack((np.zeros(len(exog)), pred))
return pred
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
if start_params is None:
start_params = np.zeros((self.K * (self.J-1)))
else:
start_params = np.asarray(start_params)
callback = lambda x : None # placeholder until check_perfect_pred
# skip calling super to handle results from LikelihoodModel
mnfit = base.LikelihoodModel.fit(self, start_params = start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, **kwargs)
mnfit.params = mnfit.params.reshape(self.K, -1, order='F')
mnfit = MultinomialResults(self, mnfit)
return MultinomialResultsWrapper(mnfit)
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
if start_params is None:
start_params = np.zeros((self.K * (self.J-1)))
else:
start_params = np.asarray(start_params)
mnfit = DiscreteModel.fit_regularized(
self, start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
mnfit.params = mnfit.params.reshape(self.K, -1, order='F')
mnfit = L1MultinomialResults(self, mnfit)
return L1MultinomialResultsWrapper(mnfit)
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""
For computing marginal effects standard errors.
This is used only in the case of discrete and count regressors to
get the variance-covariance of the marginal effects. It returns
[d F / d params] where F is the predicted probabilities for each
choice. dFdparams is of shape nobs x (J*K) x (J-1)*K.
The zero derivatives for the base category are not included.
Transform can be 'dydx' or 'eydx'. Checking is done in margeff
computations for appropriate transform.
"""
if exog is None:
exog = self.exog
if params.ndim == 1: # will get flatted from approx_fprime
params = params.reshape(self.K, self.J-1, order='F')
eXB = np.exp(np.dot(exog, params))
sum_eXB = (1 + eXB.sum(1))[:,None]
J = int(self.J)
K = int(self.K)
repeat_eXB = np.repeat(eXB, J, axis=1)
X = np.tile(exog, J-1)
# this is the derivative wrt the base level
F0 = -repeat_eXB * X / sum_eXB ** 2
# this is the derivative wrt the other levels when
# dF_j / dParams_j (ie., own equation)
#NOTE: this computes too much, any easy way to cut down?
F1 = eXB.T[:,:,None]*X * (sum_eXB - repeat_eXB) / (sum_eXB**2)
F1 = F1.transpose((1,0,2)) # put the nobs index first
# other equation index
other_idx = ~np.kron(np.eye(J-1), np.ones(K)).astype(bool)
F1[:, other_idx] = (-eXB.T[:,:,None]*X*repeat_eXB / \
(sum_eXB**2)).transpose((1,0,2))[:, other_idx]
dFdX = np.concatenate((F0[:, None,:], F1), axis=1)
if 'ey' in transform:
dFdX /= self.predict(params, exog)[:, :, None]
return dFdX
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects returns dF(XB) / dX where F(.) is
the predicted probabilities
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
For Multinomial models the marginal effects are
P[j] * (params[j] - sum_k P[k]*params[k])
It is returned unshaped, so that each row contains each of the J
equations. This makes it easier to take derivatives of this for
standard errors. If you want average marginal effects you can do
margeff.reshape(nobs, K, J, order='F).mean(0) and the marginal effects
for choice J are in column J
"""
J = int(self.J) # number of alternative choices
K = int(self.K) # number of variables
# Note: this form should be appropriate for
# group 1 probit, logit, logistic, cloglog, heckprob, xtprobit
if exog is None:
exog = self.exog
if params.ndim == 1: # will get flatted from approx_fprime
params = params.reshape(K, J-1, order='F')
zeroparams = np.c_[np.zeros(K), params] # add base in
cdf = self.cdf(np.dot(exog, params))
# TODO: meaningful interpretation for `iterm`?
iterm = np.array([cdf[:, [i]] * zeroparams[:, i]
for i in range(int(J))]).sum(0)
margeff = np.array([cdf[:, [j]] * (zeroparams[:, j] - iterm)
for j in range(J)])
# swap the axes to make sure margeff are in order nobs, K, J
margeff = np.transpose(margeff, (1, 2, 0))
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:,None,:]
margeff = self._derivative_exog_helper(margeff, params, exog,
dummy_idx, count_idx, transform)
return margeff.reshape(len(exog), -1, order='F')
def get_distribution(self, params, exog=None, offset=None):
"""get frozen instance of distribution
"""
raise NotImplementedError
class CountModel(DiscreteModel):
def __init__(self, endog, exog, offset=None, exposure=None, missing='none',
check_rank=True, **kwargs):
self._check_kwargs(kwargs)
super().__init__(endog, exog, check_rank, missing=missing,
offset=offset, exposure=exposure, **kwargs)
if exposure is not None:
self.exposure = np.asarray(self.exposure)
self.exposure = np.log(self.exposure)
if offset is not None:
self.offset = np.asarray(self.offset)
self._check_inputs(self.offset, self.exposure, self.endog)
if offset is None:
delattr(self, 'offset')
if exposure is None:
delattr(self, 'exposure')
# promote dtype to float64 if needed
dt = np.promote_types(self.endog.dtype, np.float64)
self.endog = np.asarray(self.endog, dt)
dt = np.promote_types(self.exog.dtype, np.float64)
self.exog = np.asarray(self.exog, dt)
def _check_inputs(self, offset, exposure, endog):
if offset is not None and offset.shape[0] != endog.shape[0]:
raise ValueError("offset is not the same length as endog")
if exposure is not None and exposure.shape[0] != endog.shape[0]:
raise ValueError("exposure is not the same length as endog")
def _get_init_kwds(self):
# this is a temporary fixup because exposure has been transformed
# see #1609
kwds = super()._get_init_kwds()
if 'exposure' in kwds and kwds['exposure'] is not None:
kwds['exposure'] = np.exp(kwds['exposure'])
return kwds
def _get_predict_arrays(self, exog=None, offset=None, exposure=None):
# convert extras if not None
if exposure is not None:
exposure = np.log(np.asarray(exposure))
if offset is not None:
offset = np.asarray(offset)
# get defaults
if exog is None:
# prediction is in-sample
exog = self.exog
if exposure is None:
exposure = getattr(self, 'exposure', 0)
if offset is None:
offset = getattr(self, 'offset', 0)
else:
# user specified
exog = np.asarray(exog)
if exposure is None:
exposure = 0
if offset is None:
offset = 0
return exog, offset, exposure
def predict(self, params, exog=None, exposure=None, offset=None,
which='mean', linear=None):
"""
Predict response variable of a count model given exogenous variables
Parameters
----------
params : array_like
Model parameters
exog : array_like, optional
Design / exogenous data. Is exog is None, model exog is used.
exposure : array_like, optional
Log(exposure) is added to the linear prediction with
coefficient equal to 1. If exposure is not provided and exog
is None, uses the model's exposure if present. If not, uses
0 as the default value.
offset : array_like, optional
Offset is added to the linear prediction with coefficient
equal to 1. If offset is not provided and exog
is None, uses the model's offset if present. If not, uses
0 as the default value.
which : 'mean', 'linear', 'var', 'prob' (optional)
Statitistic to predict. Default is 'mean'.
- 'mean' returns the conditional expectation of endog E(y | x),
i.e. exp of linear predictor.
- 'linear' returns the linear predictor of the mean function.
- 'var' variance of endog implied by the likelihood model
- 'prob' predicted probabilities for counts.
linear : bool
The ``linear` keyword is deprecated and will be removed,
use ``which`` keyword instead.
If True, returns the linear predicted values. If False or None,
then the statistic specified by ``which`` will be returned.
Notes
-----
If exposure is specified, then it will be logged by the method.
The user does not need to log it first.
"""
if linear is not None:
msg = 'linear keyword is deprecated, use which="linear"'
warnings.warn(msg, DeprecationWarning)
if linear is True:
which = "linear"
# the following is copied from GLM predict (without family/link check)
# Use fit offset if appropriate
if offset is None and exog is None and hasattr(self, 'offset'):
offset = self.offset
elif offset is None:
offset = 0.
# Use fit exposure if appropriate
if exposure is None and exog is None and hasattr(self, 'exposure'):
# Already logged
exposure = self.exposure
elif exposure is None:
exposure = 0.
else:
exposure = np.log(exposure)
if exog is None:
exog = self.exog
fitted = np.dot(exog, params[:exog.shape[1]])
linpred = fitted + exposure + offset
if which == "mean":
return np.exp(linpred)
elif which.startswith("lin"):
return linpred
else:
raise ValueError('keyword which has to be "mean" and "linear"')
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""
For computing marginal effects standard errors.
This is used only in the case of discrete and count regressors to
get the variance-covariance of the marginal effects. It returns
[d F / d params] where F is the predict.
Transform can be 'dydx' or 'eydx'. Checking is done in margeff
computations for appropriate transform.
"""
if exog is None:
exog = self.exog
#NOTE: this handles offset and exposure
dF = self.predict(params, exog)[:,None] * exog
if 'ey' in transform:
dF /= self.predict(params, exog)[:,None]
return dF
def _derivative_exog(self, params, exog=None, transform="dydx",
dummy_idx=None, count_idx=None):
"""
For computing marginal effects. These are the marginal effects
d F(XB) / dX
For the Poisson model F(XB) is the predicted counts rather than
the probabilities.
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# group 3 poisson, nbreg, zip, zinb
if exog is None:
exog = self.exog
k_extra = getattr(self, 'k_extra', 0)
params_exog = params if k_extra == 0 else params[:-k_extra]
margeff = self.predict(params, exog)[:,None] * params_exog[None,:]
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:,None]
return self._derivative_exog_helper(margeff, params, exog,
dummy_idx, count_idx, transform)
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
from statsmodels.genmod.families import links
link = links.Log()
lin_pred = self.predict(params, which="linear")
idl = link.inverse_deriv(lin_pred)
dmat = self.exog * idl[:, None]
if self.k_extra > 0:
dmat_extra = np.zeros((dmat.shape[0], self.k_extra))
dmat = np.column_stack((dmat, dmat_extra))
return dmat
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
cntfit = super().fit(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs)
discretefit = CountResults(self, cntfit)
return CountResultsWrapper(discretefit)
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
cntfit = super().fit_regularized(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
alpha=alpha,
trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol,
qc_tol=qc_tol,
**kwargs)
discretefit = L1CountResults(self, cntfit)
return L1CountResultsWrapper(discretefit)
# Public Model Classes
class Poisson(CountModel):
__doc__ = """
Poisson Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
""" % {'params': base._model_params_doc,
'extra_params':
"""offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
""" + base._missing_param_doc + _check_rank_doc}
@cache_readonly
def family(self):
from statsmodels.genmod import families
return families.Poisson()
def cdf(self, X):
"""
Poisson model cumulative distribution function
Parameters
----------
X : array_like
`X` is the linear predictor of the model. See notes.
Returns
-------
The value of the Poisson CDF at each point.
Notes
-----
The CDF is defined as
.. math:: \\exp\\left(-\\lambda\\right)\\sum_{i=0}^{y}\\frac{\\lambda^{i}}{i!}
where :math:`\\lambda` assumes the loglinear model. I.e.,
.. math:: \\ln\\lambda_{i}=X\\beta
The parameter `X` is :math:`X\\beta` in the above formula.
"""
y = self.endog
return stats.poisson.cdf(y, np.exp(X))
def pdf(self, X):
"""
Poisson model probability mass function
Parameters
----------
X : array_like
`X` is the linear predictor of the model. See notes.
Returns
-------
pdf : ndarray
The value of the Poisson probability mass function, PMF, for each
point of X.
Notes
-----
The PMF is defined as
.. math:: \\frac{e^{-\\lambda_{i}}\\lambda_{i}^{y_{i}}}{y_{i}!}
where :math:`\\lambda` assumes the loglinear model. I.e.,
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
The parameter `X` is :math:`x_{i}\\beta` in the above formula.
"""
y = self.endog
return np.exp(stats.poisson.logpmf(y, np.exp(X)))
def loglike(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
XB = np.dot(self.exog, params) + offset + exposure
endog = self.endog
return np.sum(
-np.exp(np.clip(XB, None, EXP_UPPER_LIMIT))
+ endog * XB
- gammaln(endog + 1)
)
def loglikeobs(self, params):
"""
Loglikelihood for observations of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : array_like
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
-----
.. math:: \\ln L_{i}=\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
for observations :math:`i=1,...,n`
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
XB = np.dot(self.exog, params) + offset + exposure
endog = self.endog
#np.sum(stats.poisson.logpmf(endog, np.exp(XB)))
return -np.exp(XB) + endog*XB - gammaln(endog+1)
@Appender(_get_start_params_null_docs)
def _get_start_params_null(self):
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
const = (self.endog / np.exp(offset + exposure)).mean()
params = [np.log(const)]
return params
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
if start_params is None and self.data.const_idx is not None:
# k_params or k_exog not available?
start_params = 0.001 * np.ones(self.exog.shape[1])
start_params[self.data.const_idx] = self._get_start_params_null()[0]
kwds = {}
if kwargs.get('cov_type') is not None:
kwds['cov_type'] = kwargs.get('cov_type')
kwds['cov_kwds'] = kwargs.get('cov_kwds', {})
cntfit = super(CountModel, self).fit(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs)
discretefit = PoissonResults(self, cntfit, **kwds)
return PoissonResultsWrapper(discretefit)
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = L1PoissonResults(self, cntfit)
return L1PoissonResultsWrapper(discretefit)
def fit_constrained(self, constraints, start_params=None, **fit_kwds):
"""fit the model subject to linear equality constraints
The constraints are of the form `R params = q`
where R is the constraint_matrix and q is the vector of
constraint_values.
The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.
Parameters
----------
constraints : formula expression or tuple
If it is a tuple, then the constraint needs to be given by two
arrays (constraint_matrix, constraint_value), i.e. (R, q).
Otherwise, the constraints can be given as strings or list of
strings.
see t_test for details
start_params : None or array_like
starting values for the optimization. `start_params` needs to be
given in the original parameter space and are internally
transformed.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the transformed model.
Returns
-------
results : Results instance
"""
#constraints = (R, q)
# TODO: temporary trailing underscore to not overwrite the monkey
# patched version
# TODO: decide whether to move the imports
from patsy import DesignInfo
from statsmodels.base._constraints import (fit_constrained,
LinearConstraints)
# same pattern as in base.LikelihoodModel.t_test
lc = DesignInfo(self.exog_names).linear_constraint(constraints)
R, q = lc.coefs, lc.constants
# TODO: add start_params option, need access to tranformation
# fit_constrained needs to do the transformation
params, cov, res_constr = fit_constrained(self, R, q,
start_params=start_params,
fit_kwds=fit_kwds)
#create dummy results Instance, TODO: wire up properly
res = self.fit(maxiter=0, method='nm', disp=0,
warn_convergence=False) # we get a wrapper back
res.mle_retvals['fcall'] = res_constr.mle_retvals.get('fcall', np.nan)
res.mle_retvals['iterations'] = res_constr.mle_retvals.get(
'iterations', np.nan)
res.mle_retvals['converged'] = res_constr.mle_retvals['converged']
res._results.params = params
res._results.cov_params_default = cov
cov_type = fit_kwds.get('cov_type', 'nonrobust')
if cov_type != 'nonrobust':
res._results.normalized_cov_params = cov # assume scale=1
else:
res._results.normalized_cov_params = None
k_constr = len(q)
res._results.df_resid += k_constr
res._results.df_model -= k_constr
res._results.constraints = LinearConstraints.from_patsy(lc)
res._results.k_constr = k_constr
res._results.results_constrained = res_constr
return res
def score(self, params):
"""
Poisson model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left(y_{i}-\\lambda_{i}\\right)x_{i}
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + offset + exposure)
return np.dot(self.endog - L, X)
def score_obs(self, params):
"""
Poisson model Jacobian of the log-likelihood for each observation
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : array_like
The score vector (nobs, k_vars) of the model evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\lambda_{i}\\right)x_{i}
for observations :math:`i=1,...,n`
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + offset + exposure)
return (self.endog - L)[:,None] * X
def score_factor(self, params):
"""
Poisson model score_factor for each observation
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : array_like
The score factor (nobs, ) of the model evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\lambda_{i}\\right)
for observations :math:`i=1,...,n`
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + offset + exposure)
return (self.endog - L)
def hessian(self, params):
"""
Poisson model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i=1}^{n}\\lambda_{i}x_{i}x_{i}^{\\prime}
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + exposure + offset)
return -np.dot(L*X.T, X)
def hessian_factor(self, params):
"""
Poisson model Hessian factor
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (nobs,)
The Hessian factor, second derivative of loglikelihood function
with respect to the linear predictor evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i=1}^{n}\\lambda_{i}
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + exposure + offset)
return -L
def _deriv_score_obs_dendog(self, params, scale=None):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog. This
can is given by `score_factor0[:, None] * exog` where
`score_factor0` is the score_factor without the residual.
"""
return self.exog
def predict(self, params, exog=None, exposure=None, offset=None,
which='mean', linear=None, y_values=None):
"""
Predict response variable of a model given exogenous variables.
Parameters
----------
params : array_like
2d array of fitted parameters of the model. Should be in the
order returned from the model.
exog : array_like, optional
1d or 2d array of exogenous values. If not supplied, then the
exog attribute of the model is used. If a 1d array is given
it assumed to be 1 row of exogenous variables. If you only have
one regressor and would like to do prediction, you must provide
a 2d array with shape[1] == 1.
offset : array_like, optional
Offset is added to the linear predictor with coefficient equal
to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : array_like, optional
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
Default is one if exog is is not None, and is the model exposure
if exog is None.
which : 'mean', 'linear', 'var', 'prob' (optional)
Statitistic to predict. Default is 'mean'.
- 'mean' returns the conditional expectation of endog E(y | x),
i.e. exp of linear predictor.
- 'linear' returns the linear predictor of the mean function.
- 'var' returns the estimated variance of endog implied by the
model.
- 'prob' return probabilities for counts from 0 to max(endog) or
for y_values if those are provided.
linear : bool
The ``linear` keyword is deprecated and will be removed,
use ``which`` keyword instead.
If True, returns the linear predicted values. If False or None,
then the statistic specified by ``which`` will be returned.
y_values : array_like
Values of the random variable endog at which pmf is evaluated.
Only used if ``which="prob"``
"""
# Note docstring is reused by other count models
if linear is not None:
msg = 'linear keyword is deprecated, use which="linear"'
warnings.warn(msg, DeprecationWarning)
if linear is True:
which = "linear"
if which.startswith("lin"):
which = "linear"
if which in ["mean", "linear"]:
return super().predict(params, exog=exog, exposure=exposure,
offset=offset,
which=which, linear=linear)
# TODO: add full set of which
elif which == "var":
mu = self.predict(params, exog=exog,
exposure=exposure, offset=offset,
)
return mu
elif which == "prob":
if y_values is not None:
y_values = np.atleast_2d(y_values)
else:
y_values = np.atleast_2d(
np.arange(0, np.max(self.endog) + 1))
mu = self.predict(params, exog=exog,
exposure=exposure, offset=offset,
)[:, None]
# uses broadcasting
return stats.poisson.pmf(y_values, mu)
else:
raise ValueError('Value of the `which` option is not recognized')
def _prob_nonzero(self, mu, params=None):
"""Probability that count is not zero
internal use in Censored model, will be refactored or removed
"""
prob_nz = - np.expm1(-mu)
return prob_nz
def _var(self, mu, params=None):
"""variance implied by the distribution
internal use, will be refactored or removed
"""
return mu
def get_distribution(self, params, exog=None, exposure=None, offset=None):
"""Get frozen instance of distribution based on predicted parameters.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor of the mean
function with coefficient equal to 1. If exposure is specified,
then it will be logged by the method. The user does not need to
log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
Returns
-------
Instance of frozen scipy distribution subclass.
"""
mu = self.predict(params, exog=exog, exposure=exposure, offset=offset)
distr = stats.poisson(mu)
return distr
class GeneralizedPoisson(CountModel):
__doc__ = """
Generalized Poisson Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
""" % {'params': base._model_params_doc,
'extra_params':
"""
p : scalar
P denotes parameterizations for GP regression. p=1 for GP-1 and
p=2 for GP-2. Default is p=1.
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.""" + base._missing_param_doc + _check_rank_doc}
def __init__(self, endog, exog, p=1, offset=None,
exposure=None, missing='none', check_rank=True, **kwargs):
super().__init__(endog,
exog,
offset=offset,
exposure=exposure,
missing=missing,
check_rank=check_rank,
**kwargs)
self.parameterization = p - 1
self.exog_names.append('alpha')
self.k_extra = 1
self._transparams = False
def _get_init_kwds(self):
kwds = super()._get_init_kwds()
kwds['p'] = self.parameterization + 1
return kwds
def _get_exogs(self):
return (self.exog, None)
def loglike(self, params):
"""
Loglikelihood of Generalized Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[\\mu_{i}+(y_{i}-1)*ln(\\mu_{i}+
\\alpha*\\mu_{i}^{p-1}*y_{i})-y_{i}*ln(1+\\alpha*\\mu_{i}^{p-1})-
ln(y_{i}!)-\\frac{\\mu_{i}+\\alpha*\\mu_{i}^{p-1}*y_{i}}{1+\\alpha*
\\mu_{i}^{p-1}}\\right]
"""
return np.sum(self.loglikeobs(params))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generalized Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[\\mu_{i}+(y_{i}-1)*ln(\\mu_{i}+
\\alpha*\\mu_{i}^{p-1}*y_{i})-y_{i}*ln(1+\\alpha*\\mu_{i}^{p-1})-
ln(y_{i}!)-\\frac{\\mu_{i}+\\alpha*\\mu_{i}^{p-1}*y_{i}}{1+\\alpha*
\\mu_{i}^{p-1}}\\right]
for observations :math:`i=1,...,n`
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
endog = self.endog
mu = self.predict(params)
mu_p = np.power(mu, p)
a1 = 1 + alpha * mu_p
a2 = mu + (a1 - 1) * endog
a1 = np.maximum(1e-20, a1)
a2 = np.maximum(1e-20, a2)
return (np.log(mu) + (endog - 1) * np.log(a2) - endog *
np.log(a1) - gammaln(endog + 1) - a2 / a1)
@Appender(_get_start_params_null_docs)
def _get_start_params_null(self):
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
const = (self.endog / np.exp(offset + exposure)).mean()
params = [np.log(const)]
mu = const * np.exp(offset + exposure)
resid = self.endog - mu
a = self._estimate_dispersion(mu, resid, df_resid=resid.shape[0] - 1)
params.append(a)
return np.array(params)
def _estimate_dispersion(self, mu, resid, df_resid=None):
q = self.parameterization
if df_resid is None:
df_resid = resid.shape[0]
a = ((np.abs(resid) / np.sqrt(mu) - 1) * mu**(-q)).sum() / df_resid
return a
@Appender(
"""
use_transparams : bool
This parameter enable internal transformation to impose
non-negativity. True to enable. Default is False.
use_transparams=True imposes the no underdispersion (alpha > 0)
constraint. In case use_transparams=True and method="newton" or
"ncg" transformation is ignored.
""")
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None, use_transparams=False,
cov_type='nonrobust', cov_kwds=None, use_t=None, optim_kwds_prelim=None,
**kwargs):
if use_transparams and method not in ['newton', 'ncg']:
self._transparams = True
else:
if use_transparams:
warnings.warn('Parameter "use_transparams" is ignored',
RuntimeWarning)
self._transparams = False
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
kwds_prelim = {'disp': 0, 'skip_hessian': True,
'warn_convergence': False}
if optim_kwds_prelim is not None:
kwds_prelim.update(optim_kwds_prelim)
mod_poi = Poisson(self.endog, self.exog, offset=offset)
with warnings.catch_warnings():
warnings.simplefilter("always")
res_poi = mod_poi.fit(**kwds_prelim)
start_params = res_poi.params
a = self._estimate_dispersion(res_poi.predict(), res_poi.resid,
df_resid=res_poi.df_resid)
start_params = np.append(start_params, max(-0.1, a))
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super().fit(start_params=start_params,
maxiter=maxiter,
method=method,
disp=disp,
full_output=full_output,
callback=callback,
**kwargs)
if optim_kwds_prelim is not None:
mlefit.mle_settings["optim_kwds_prelim"] = optim_kwds_prelim
if use_transparams and method not in ["newton", "ncg"]:
self._transparams = False
mlefit._results.params[-1] = np.exp(mlefit._results.params[-1])
gpfit = GeneralizedPoissonResults(self, mlefit._results)
result = GeneralizedPoissonResultsWrapper(gpfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if np.size(alpha) == 1 and alpha != 0:
k_params = self.exog.shape[1] + self.k_extra
alpha = alpha * np.ones(k_params)
alpha[-1] = 0
alpha_p = alpha[:-1] if (self.k_extra and np.size(alpha) > 1) else alpha
self._transparams = False
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
mod_poi = Poisson(self.endog, self.exog, offset=offset)
with warnings.catch_warnings():
warnings.simplefilter("always")
start_params = mod_poi.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode,
auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol,
qc_tol=qc_tol, **kwargs).params
start_params = np.append(start_params, 0.1)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = L1GeneralizedPoissonResults(self, cntfit)
return L1GeneralizedPoissonResultsWrapper(discretefit)
def score_obs(self, params):
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
mu_p = np.power(mu, p)
a1 = 1 + alpha * mu_p
a2 = mu + alpha * mu_p * y
a3 = alpha * p * mu ** (p - 1)
a4 = a3 * y
dmudb = mu * exog
dalpha = (mu_p * (y * ((y - 1) / a2 - 2 / a1) + a2 / a1**2))
dparams = dmudb * (-a4 / a1 +
a3 * a2 / (a1 ** 2) +
(1 + a4) * ((y - 1) / a2 - 1 / a1) +
1 / mu)
return np.concatenate((dparams, np.atleast_2d(dalpha)),
axis=1)
def score(self, params):
score = np.sum(self.score_obs(params), axis=0)
if self._transparams:
score[-1] == score[-1] ** 2
return score
else:
return score
def score_factor(self, params, endog=None):
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
y = self.endog if endog is None else endog
mu = self.predict(params)
mu_p = np.power(mu, p)
a1 = 1 + alpha * mu_p
a2 = mu + alpha * mu_p * y
a3 = alpha * p * mu ** (p - 1)
a4 = a3 * y
dmudb = mu
dalpha = (mu_p * (y * ((y - 1) / a2 - 2 / a1) + a2 / a1**2))
dparams = dmudb * (-a4 / a1 +
a3 * a2 / (a1 ** 2) +
(1 + a4) * ((y - 1) / a2 - 1 / a1) +
1 / mu)
return dparams, dalpha
def _score_p(self, params):
"""
Generalized Poisson model derivative of the log-likelihood by p-parameter
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
dldp : float
dldp is first derivative of the loglikelihood function,
evaluated at `p-parameter`.
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
y = self.endog[:,None]
mu = self.predict(params)[:,None]
mu_p = np.power(mu, p)
a1 = 1 + alpha * mu_p
a2 = mu + alpha * mu_p * y
dp = np.sum((np.log(mu) * ((a2 - mu) * ((y - 1) / a2 - 2 / a1) +
(a1 - 1) * a2 / a1 ** 2)))
return dp
def hessian(self, params):
"""
Generalized Poisson model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
mu_p = np.power(mu, p)
a1 = 1 + alpha * mu_p
a2 = mu + alpha * mu_p * y
a3 = alpha * p * mu ** (p - 1)
a4 = a3 * y
a5 = p * mu ** (p - 1)
dmudb = mu * exog
# for dl/dparams dparams
dim = exog.shape[1]
hess_arr = np.empty((dim+1,dim+1))
for i in range(dim):
for j in range(i + 1):
hess_arr[i,j] = np.sum(mu * exog[:,i,None] * exog[:,j,None] *
(mu * (a3 * a4 / a1**2 -
2 * a3**2 * a2 / a1**3 +
2 * a3 * (a4 + 1) / a1**2 -
a4 * p / (mu * a1) +
a3 * p * a2 / (mu * a1**2) +
(y - 1) * a4 * (p - 1) / (a2 * mu) -
(y - 1) * (1 + a4)**2 / a2**2 -
a4 * (p - 1) / (a1 * mu)) +
((y - 1) * (1 + a4) / a2 -
(1 + a4) / a1)), axis=0)
tri_idx = np.triu_indices(dim, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
# for dl/dparams dalpha
dldpda = np.sum((2 * a4 * mu_p / a1**2 -
2 * a3 * mu_p * a2 / a1**3 -
mu_p * y * (y - 1) * (1 + a4) / a2**2 +
mu_p * (1 + a4) / a1**2 +
a5 * y * (y - 1) / a2 -
2 * a5 * y / a1 +
a5 * a2 / a1**2) * dmudb,
axis=0)
hess_arr[-1,:-1] = dldpda
hess_arr[:-1,-1] = dldpda
# for dl/dalpha dalpha
dldada = mu_p**2 * (3 * y / a1**2 -
(y / a2)**2. * (y - 1) -
2 * a2 / a1**3)
hess_arr[-1,-1] = dldada.sum()
return hess_arr
def hessian_factor(self, params):
"""
Generalized Poisson model Hessian matrix of the loglikelihood
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
hess : ndarray, (nobs, 3)
The Hessian factor, second derivative of loglikelihood function
with respect to linear predictor and dispersion parameter
evaluated at `params`
The first column contains the second derivative w.r.t. linpred,
the second column contains the cross derivative, and the
third column contains the second derivative w.r.t. the dispersion
parameter.
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
y = self.endog
mu = self.predict(params)
mu_p = np.power(mu, p)
a1 = 1 + alpha * mu_p
a2 = mu + alpha * mu_p * y
a3 = alpha * p * mu ** (p - 1)
a4 = a3 * y
a5 = p * mu ** (p - 1)
dmudb = mu
dbb = mu * (
mu * (a3 * a4 / a1**2 -
2 * a3**2 * a2 / a1**3 +
2 * a3 * (a4 + 1) / a1**2 -
a4 * p / (mu * a1) +
a3 * p * a2 / (mu * a1**2) +
a4 / (mu * a1) -
a3 * a2 / (mu * a1**2) +
(y - 1) * a4 * (p - 1) / (a2 * mu) -
(y - 1) * (1 + a4)**2 / a2**2 -
a4 * (p - 1) / (a1 * mu) -
1 / mu**2) +
(-a4 / a1 +
a3 * a2 / a1**2 +
(y - 1) * (1 + a4) / a2 -
(1 + a4) / a1 +
1 / mu))
# for dl/dlinpred dalpha
dba = ((2 * a4 * mu_p / a1**2 -
2 * a3 * mu_p * a2 / a1**3 -
mu_p * y * (y - 1) * (1 + a4) / a2**2 +
mu_p * (1 + a4) / a1**2 +
a5 * y * (y - 1) / a2 -
2 * a5 * y / a1 +
a5 * a2 / a1**2) * dmudb)
# for dl/dalpha dalpha
daa = mu_p**2 * (3 * y / a1**2 -
(y / a2)**2. * (y - 1) -
2 * a2 / a1**3)
return dbb, dba, daa
@Appender(Poisson.predict.__doc__)
def predict(self, params, exog=None, exposure=None, offset=None,
which='mean', y_values=None):
if exog is None:
exog = self.exog
if exposure is None:
exposure = getattr(self, 'exposure', 0)
elif exposure != 0:
exposure = np.log(exposure)
if offset is None:
offset = getattr(self, 'offset', 0)
fitted = np.dot(exog, params[:exog.shape[1]])
linpred = fitted + exposure + offset
if which == 'mean':
return np.exp(linpred)
elif which == 'linear':
return linpred
elif which == 'var':
mean = np.exp(linpred)
alpha = params[-1]
pm1 = self.parameterization # `p - 1` in GPP
var_ = mean * (1 + alpha * mean**pm1)**2
return var_
elif which == 'prob':
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
mu = self.predict(params, exog=exog, exposure=exposure,
offset=offset)[:, None]
return genpoisson_p.pmf(y_values, mu, params[-1],
self.parameterization + 1)
else:
raise ValueError('keyword \'which\' not recognized')
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
"""
# code duplication with NegativeBinomialP
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar
def f(y):
if y.ndim == 2 and y.shape[1] == 1:
y = y[:, 0]
sf = self.score_factor(params, endog=y)
return np.column_stack(sf)
dsf = _approx_fprime_cs_scalar(self.endog[:, None], f)
# deriv is 2d vector
d1 = dsf[:, :1] * self.exog
d2 = dsf[:, 1:2]
return np.column_stack((d1, d2))
def _var(self, mu, params=None):
"""variance implied by the distribution
internal use, will be refactored or removed
"""
alpha = params[-1]
pm1 = self.parameterization # `p-1` in GPP
var_ = mu * (1 + alpha * mu**pm1)**2
return var_
def _prob_nonzero(self, mu, params):
"""Probability that count is not zero
internal use in Censored model, will be refactored or removed
"""
alpha = params[-1]
pm1 = self.parameterization # p-1 in GPP
prob_zero = np.exp(- mu / (1 + alpha * mu**pm1))
prob_nz = 1 - prob_zero
return prob_nz
@Appender(Poisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exposure=None, offset=None):
"""get frozen instance of distribution
"""
mu = self.predict(params, exog=exog, exposure=exposure, offset=offset)
p = self.parameterization + 1
distr = genpoisson_p(mu[:, None], params[-1], p)
return distr
class Logit(BinaryModel):
__doc__ = """
Logit Model
%(params)s
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + _check_rank_doc}
_continuous_ok = True
@cache_readonly
def link(self):
from statsmodels.genmod.families import links
link = links.Logit()
return link
def cdf(self, X):
"""
The logistic cumulative distribution function
Parameters
----------
X : array_like
`X` is the linear predictor of the logit model. See notes.
Returns
-------
1/(1 + exp(-X))
Notes
-----
In the logit model,
.. math:: \\Lambda\\left(x^{\\prime}\\beta\\right)=
\\text{Prob}\\left(Y=1|x\\right)=
\\frac{e^{x^{\\prime}\\beta}}{1+e^{x^{\\prime}\\beta}}
"""
X = np.asarray(X)
return 1/(1+np.exp(-X))
def pdf(self, X):
"""
The logistic probability density function
Parameters
----------
X : array_like
`X` is the linear predictor of the logit model. See notes.
Returns
-------
pdf : ndarray
The value of the Logit probability mass function, PMF, for each
point of X. ``np.exp(-x)/(1+np.exp(-X))**2``
Notes
-----
In the logit model,
.. math:: \\lambda\\left(x^{\\prime}\\beta\\right)=\\frac{e^{-x^{\\prime}\\beta}}{\\left(1+e^{-x^{\\prime}\\beta}\\right)^{2}}
"""
X = np.asarray(X)
return np.exp(-X)/(1+np.exp(-X))**2
@cache_readonly
def family(self):
from statsmodels.genmod import families
return families.Binomial()
def loglike(self, params):
"""
Log-likelihood of logit model.
Parameters
----------
params : array_like
The parameters of the logit model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math::
\\ln L=\\sum_{i}\\ln\\Lambda
\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
Where :math:`q=2y-1`. This simplification comes from the fact that the
logistic distribution is symmetric.
"""
q = 2*self.endog - 1
linpred = self.predict(params, which="linear")
return np.sum(np.log(self.cdf(q * linpred)))
def loglikeobs(self, params):
"""
Log-likelihood of logit model for each observation.
Parameters
----------
params : array_like
The parameters of the logit model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
-----
.. math::
\\ln L=\\sum_{i}\\ln\\Lambda
\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
for observations :math:`i=1,...,n`
where :math:`q=2y-1`. This simplification comes from the fact that the
logistic distribution is symmetric.
"""
q = 2*self.endog - 1
linpred = self.predict(params, which="linear")
return np.log(self.cdf(q * linpred))
def score(self, params):
"""
Logit model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left(y_{i}-\\Lambda_{i}\\right)x_{i}
"""
y = self.endog
X = self.exog
fitted = self.predict(params)
return np.dot(y - fitted, X)
def score_obs(self, params):
"""
Logit model Jacobian of the log-likelihood for each observation
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
jac : array_like
The derivative of the loglikelihood for each observation evaluated
at `params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\Lambda_{i}\\right)x_{i}
for observations :math:`i=1,...,n`
"""
y = self.endog
X = self.exog
fitted = self.predict(params)
return (y - fitted)[:,None] * X
def score_factor(self, params):
"""
Logit model derivative of the log-likelihood with respect to linpred.
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score_factor : array_like
The derivative of the loglikelihood for each observation evaluated
at `params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\lambda_{i}\\right)
for observations :math:`i=1,...,n`
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
y = self.endog
fitted = self.predict(params)
return (y - fitted)
def hessian(self, params):
"""
Logit model Hessian matrix of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i}\\Lambda_{i}\\left(1-\\Lambda_{i}\\right)x_{i}x_{i}^{\\prime}
"""
X = self.exog
L = self.predict(params)
return -np.dot(L*(1-L)*X.T,X)
def hessian_factor(self, params):
"""
Logit model Hessian factor
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (nobs,)
The Hessian factor, second derivative of loglikelihood function
with respect to the linear predictor evaluated at `params`
"""
L = self.predict(params)
return -L * (1 - L)
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
bnryfit = super().fit(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs)
discretefit = LogitResults(self, bnryfit)
return BinaryResultsWrapper(discretefit)
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog. This
can is given by `score_factor0[:, None] * exog` where
`score_factor0` is the score_factor without the residual.
"""
return self.exog
class Probit(BinaryModel):
__doc__ = """
Probit Model
%(params)s
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + _check_rank_doc}
@cache_readonly
def link(self):
from statsmodels.genmod.families import links
link = links.probit()
return link
def cdf(self, X):
"""
Probit (Normal) cumulative distribution function
Parameters
----------
X : array_like
The linear predictor of the model (XB).
Returns
-------
cdf : ndarray
The cdf evaluated at `X`.
Notes
-----
This function is just an alias for scipy.stats.norm.cdf
"""
return stats.norm._cdf(X)
def pdf(self, X):
"""
Probit (Normal) probability density function
Parameters
----------
X : array_like
The linear predictor of the model (XB).
Returns
-------
pdf : ndarray
The value of the normal density function for each point of X.
Notes
-----
This function is just an alias for scipy.stats.norm.pdf
"""
X = np.asarray(X)
return stats.norm._pdf(X)
def loglike(self, params):
"""
Log-likelihood of probit model (i.e., the normal distribution).
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{i}\\ln\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
Where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
q = 2*self.endog - 1
linpred = self.predict(params, which="linear")
return np.sum(np.log(np.clip(self.cdf(q * linpred), FLOAT_EPS, 1)))
def loglikeobs(self, params):
"""
Log-likelihood of probit model for each observation
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : array_like
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
-----
.. math:: \\ln L_{i}=\\ln\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
for observations :math:`i=1,...,n`
where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
q = 2*self.endog - 1
linpred = self.predict(params, which="linear")
return np.log(np.clip(self.cdf(q*linpred), FLOAT_EPS, 1))
def score(self, params):
"""
Probit model score (gradient) vector
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left[\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}\\right]x_{i}
Where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
y = self.endog
X = self.exog
XB = self.predict(params, which="linear")
q = 2*y - 1
# clip to get rid of invalid divide complaint
L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS)
return np.dot(L,X)
def score_obs(self, params):
"""
Probit model Jacobian for each observation
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
jac : array_like
The derivative of the loglikelihood for each observation evaluated
at `params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left[\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}\\right]x_{i}
for observations :math:`i=1,...,n`
Where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
y = self.endog
X = self.exog
XB = self.predict(params, which="linear")
q = 2*y - 1
# clip to get rid of invalid divide complaint
L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS)
return L[:,None] * X
def score_factor(self, params):
"""
Probit model Jacobian for each observation
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
score_factor : array_like (nobs,)
The derivative of the loglikelihood function for each observation
with respect to linear predictor evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left[\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}\\right]x_{i}
for observations :math:`i=1,...,n`
Where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
y = self.endog
XB = self.predict(params, which="linear")
q = 2*y - 1
# clip to get rid of invalid divide complaint
L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS)
return L
def hessian(self, params):
"""
Probit model Hessian matrix of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\lambda_{i}\\left(\\lambda_{i}+x_{i}^{\\prime}\\beta\\right)x_{i}x_{i}^{\\prime}
where
.. math:: \\lambda_{i}=\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}
and :math:`q=2y-1`
"""
X = self.exog
XB = self.predict(params, which="linear")
q = 2*self.endog - 1
L = q*self.pdf(q*XB)/self.cdf(q*XB)
return np.dot(-L*(L+XB)*X.T,X)
def hessian_factor(self, params):
"""
Probit model Hessian factor of the log-likelihood
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
hess : ndarray, (nobs,)
The Hessian factor, second derivative of loglikelihood function
with respect to linear predictor evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\lambda_{i}\\left(\\lambda_{i}+x_{i}^{\\prime}\\beta\\right)x_{i}x_{i}^{\\prime}
where
.. math:: \\lambda_{i}=\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}
and :math:`q=2y-1`
"""
XB = self.predict(params, which="linear")
q = 2 * self.endog - 1
L = q * self.pdf(q * XB) / self.cdf(q * XB)
return -L * (L + XB)
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
bnryfit = super().fit(start_params=start_params,
method=method,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs)
discretefit = ProbitResults(self, bnryfit)
return BinaryResultsWrapper(discretefit)
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog. This
can is given by `score_factor0[:, None] * exog` where
`score_factor0` is the score_factor without the residual.
"""
linpred = self.predict(params, which="linear")
pdf_ = self.pdf(linpred)
# clip to get rid of invalid divide complaint
cdf_ = np.clip(self.cdf(linpred), FLOAT_EPS, 1 - FLOAT_EPS)
deriv = pdf_ / cdf_ / (1 - cdf_) # deriv factor
return deriv[:, None] * self.exog
class MNLogit(MultinomialModel):
__doc__ = """
Multinomial Logit Model
Parameters
----------
endog : array_like
`endog` is an 1-d vector of the endogenous response. `endog` can
contain strings, ints, or floats or may be a pandas Categorical Series.
Note that if it contains strings, every distinct string will be a
category. No stripping of whitespace is done.
exog : array_like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user. See `statsmodels.tools.add_constant`.
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
J : float
The number of choices for the endogenous variable. Note that this
is zero-indexed.
K : float
The actual number of parameters for the exogenous design. Includes
the constant if the design has one.
names : dict
A dictionary mapping the column number in `wendog` to the variables
in `endog`.
wendog : ndarray
An n x j array where j is the number of unique categories in `endog`.
Each column of j is a dummy variable indicating the category of
each observation. See `names` for a dictionary mapping each column to
its category.
Notes
-----
See developer notes for further information on `MNLogit` internals.
""" % {'extra_params': base._missing_param_doc + _check_rank_doc}
def __init__(self, endog, exog, check_rank=True, **kwargs):
super().__init__(endog, exog, check_rank=check_rank, **kwargs)
# Override cov_names since multivariate model
yname = self.endog_names
ynames = self._ynames_map
ynames = MultinomialResults._maybe_convert_ynames_int(ynames)
# use range below to ensure sortedness
ynames = [ynames[key] for key in range(int(self.J))]
idx = MultiIndex.from_product((ynames[1:], self.data.xnames),
names=(yname, None))
self.data.cov_names = idx
def pdf(self, eXB):
"""
NotImplemented
"""
raise NotImplementedError
def cdf(self, X):
"""
Multinomial logit cumulative distribution function.
Parameters
----------
X : ndarray
The linear predictor of the model XB.
Returns
-------
cdf : ndarray
The cdf evaluated at `X`.
Notes
-----
In the multinomial logit model.
.. math:: \\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}
"""
eXB = np.column_stack((np.ones(len(X)), np.exp(X)))
return eXB/eXB.sum(1)[:,None]
def loglike(self, params):
"""
Log-likelihood of the multinomial logit model.
Parameters
----------
params : array_like
The parameters of the multinomial logit model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math::
\\ln L=\\sum_{i=1}^{n}\\sum_{j=0}^{J}d_{ij}\\ln
\\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}
{\\sum_{k=0}^{J}
\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)
where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0
if not.
"""
params = params.reshape(self.K, -1, order='F')
d = self.wendog
logprob = np.log(self.cdf(np.dot(self.exog,params)))
return np.sum(d * logprob)
def loglikeobs(self, params):
"""
Log-likelihood of the multinomial logit model for each observation.
Parameters
----------
params : array_like
The parameters of the multinomial logit model.
Returns
-------
loglike : array_like
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
-----
.. math::
\\ln L_{i}=\\sum_{j=0}^{J}d_{ij}\\ln
\\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}
{\\sum_{k=0}^{J}
\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)
for observations :math:`i=1,...,n`
where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0
if not.
"""
params = params.reshape(self.K, -1, order='F')
d = self.wendog
logprob = np.log(self.cdf(np.dot(self.exog,params)))
return d * logprob
def score(self, params):
"""
Score matrix for multinomial logit model log-likelihood
Parameters
----------
params : ndarray
The parameters of the multinomial logit model.
Returns
-------
score : ndarray, (K * (J-1),)
The 2-d score vector, i.e. the first derivative of the
loglikelihood function, of the multinomial logit model evaluated at
`params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta_{j}}=\\sum_{i}\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i}
for :math:`j=1,...,J`
In the multinomial model the score matrix is K x J-1 but is returned
as a flattened array to work with the solvers.
"""
params = params.reshape(self.K, -1, order='F')
firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog,
params))[:,1:]
#NOTE: might need to switch terms if params is reshaped
return np.dot(firstterm.T, self.exog).flatten()
def loglike_and_score(self, params):
"""
Returns log likelihood and score, efficiently reusing calculations.
Note that both of these returned quantities will need to be negated
before being minimized by the maximum likelihood fitting machinery.
"""
params = params.reshape(self.K, -1, order='F')
cdf_dot_exog_params = self.cdf(np.dot(self.exog, params))
loglike_value = np.sum(self.wendog * np.log(cdf_dot_exog_params))
firstterm = self.wendog[:, 1:] - cdf_dot_exog_params[:, 1:]
score_array = np.dot(firstterm.T, self.exog).flatten()
return loglike_value, score_array
def score_obs(self, params):
"""
Jacobian matrix for multinomial logit model log-likelihood
Parameters
----------
params : ndarray
The parameters of the multinomial logit model.
Returns
-------
jac : array_like
The derivative of the loglikelihood for each observation evaluated
at `params` .
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta_{j}}=\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i}
for :math:`j=1,...,J`, for observations :math:`i=1,...,n`
In the multinomial model the score vector is K x (J-1) but is returned
as a flattened array. The Jacobian has the observations in rows and
the flattened array of derivatives in columns.
"""
params = params.reshape(self.K, -1, order='F')
firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog,
params))[:,1:]
#NOTE: might need to switch terms if params is reshaped
return (firstterm[:,:,None] * self.exog[:,None,:]).reshape(self.exog.shape[0], -1)
def hessian(self, params):
"""
Multinomial logit Hessian matrix of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (J*K, J*K)
The Hessian, second derivative of loglikelihood function with
respect to the flattened parameters, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta_{j}\\partial\\beta_{l}}=-\\sum_{i=1}^{n}\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\left[\\boldsymbol{1}\\left(j=l\\right)-\\frac{\\exp\\left(\\beta_{l}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right]x_{i}x_{l}^{\\prime}
where
:math:`\\boldsymbol{1}\\left(j=l\\right)` equals 1 if `j` = `l` and 0
otherwise.
The actual Hessian matrix has J**2 * K x K elements. Our Hessian
is reshaped to be square (J*K, J*K) so that the solvers can use it.
This implementation does not take advantage of the symmetry of
the Hessian and could probably be refactored for speed.
"""
params = params.reshape(self.K, -1, order='F')
X = self.exog
pr = self.cdf(np.dot(X,params))
partials = []
J = self.J
K = self.K
for i in range(J-1):
for j in range(J-1): # this loop assumes we drop the first col.
if i == j:
partials.append(\
-np.dot(((pr[:,i+1]*(1-pr[:,j+1]))[:,None]*X).T,X))
else:
partials.append(-np.dot(((pr[:,i+1]*-pr[:,j+1])[:,None]*X).T,X))
H = np.array(partials)
# the developer's notes on multinomial should clear this math up
H = np.transpose(H.reshape(J-1, J-1, K, K), (0, 2, 1, 3)).reshape((J-1)*K, (J-1)*K)
return H
#TODO: Weibull can replaced by a survival analsysis function
# like stat's streg (The cox model as well)
#class Weibull(DiscreteModel):
# """
# Binary choice Weibull model
#
# Notes
# ------
# This is unfinished and untested.
# """
##TODO: add analytic hessian for Weibull
# def initialize(self):
# pass
#
# def cdf(self, X):
# """
# Gumbell (Log Weibull) cumulative distribution function
# """
## return np.exp(-np.exp(-X))
# return stats.gumbel_r.cdf(X)
# # these two are equivalent.
# # Greene table and discussion is incorrect.
#
# def pdf(self, X):
# """
# Gumbell (LogWeibull) probability distribution function
# """
# return stats.gumbel_r.pdf(X)
#
# def loglike(self, params):
# """
# Loglikelihood of Weibull distribution
# """
# X = self.exog
# cdf = self.cdf(np.dot(X,params))
# y = self.endog
# return np.sum(y*np.log(cdf) + (1-y)*np.log(1-cdf))
#
# def score(self, params):
# y = self.endog
# X = self.exog
# F = self.cdf(np.dot(X,params))
# f = self.pdf(np.dot(X,params))
# term = (y*f/F + (1 - y)*-f/(1-F))
# return np.dot(term,X)
#
# def hessian(self, params):
# hess = nd.Jacobian(self.score)
# return hess(params)
#
# def fit(self, start_params=None, method='newton', maxiter=35, tol=1e-08):
## The example had problems with all zero start values, Hessian = 0
# if start_params is None:
# start_params = OLS(self.endog, self.exog).fit().params
# mlefit = super(Weibull, self).fit(start_params=start_params,
# method=method, maxiter=maxiter, tol=tol)
# return mlefit
#
class NegativeBinomial(CountModel):
__doc__ = """
Negative Binomial Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
References
----------
Greene, W. 2008. "Functional forms for the negative binomial model
for count data". Economics Letters. Volume 99, Number 3, pp.585-590.
Hilbe, J.M. 2011. "Negative binomial regression". Cambridge University
Press.
""" % {'params': base._model_params_doc,
'extra_params':
"""loglike_method : str
Log-likelihood type. 'nb2','nb1', or 'geometric'.
Fitted value :math:`\\mu`
Heterogeneity parameter :math:`\\alpha`
- nb2: Variance equal to :math:`\\mu + \\alpha\\mu^2` (most common)
- nb1: Variance equal to :math:`\\mu + \\alpha\\mu`
- geometric: Variance equal to :math:`\\mu + \\mu^2`
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
""" + base._missing_param_doc + _check_rank_doc}
def __init__(self, endog, exog, loglike_method='nb2', offset=None,
exposure=None, missing='none', check_rank=True, **kwargs):
super().__init__(endog,
exog,
offset=offset,
exposure=exposure,
missing=missing,
check_rank=check_rank,
**kwargs)
self.loglike_method = loglike_method
self._initialize()
if loglike_method in ['nb2', 'nb1']:
self.exog_names.append('alpha')
self.k_extra = 1
else:
self.k_extra = 0
# store keys for extras if we need to recreate model instance
# we need to append keys that do not go to super
self._init_keys.append('loglike_method')
def _initialize(self):
if self.loglike_method == 'nb2':
self.hessian = self._hessian_nb2
self.score = self._score_nbin
self.loglikeobs = self._ll_nb2
self._transparams = True # transform lnalpha -> alpha in fit
elif self.loglike_method == 'nb1':
self.hessian = self._hessian_nb1
self.score = self._score_nb1
self.loglikeobs = self._ll_nb1
self._transparams = True # transform lnalpha -> alpha in fit
elif self.loglike_method == 'geometric':
self.hessian = self._hessian_geom
self.score = self._score_geom
self.loglikeobs = self._ll_geometric
else:
raise ValueError('Likelihood type must "nb1", "nb2" '
'or "geometric"')
# Workaround to pickle instance methods
def __getstate__(self):
odict = self.__dict__.copy() # copy the dict since we change it
del odict['hessian']
del odict['score']
del odict['loglikeobs']
return odict
def __setstate__(self, indict):
self.__dict__.update(indict)
self._initialize()
def _ll_nbin(self, params, alpha, Q=0):
if np.any(np.iscomplex(params)) or np.iscomplex(alpha):
gamma_ln = loggamma
else:
gamma_ln = gammaln
endog = self.endog
mu = self.predict(params)
size = 1/alpha * mu**Q
prob = size/(size+mu)
coeff = (gamma_ln(size+endog) - gamma_ln(endog+1) -
gamma_ln(size))
llf = coeff + size*np.log(prob) + endog*np.log(1-prob)
return llf
def _ll_nb2(self, params):
if self._transparams: # got lnalpha during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
return self._ll_nbin(params[:-1], alpha, Q=0)
def _ll_nb1(self, params):
if self._transparams: # got lnalpha during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
return self._ll_nbin(params[:-1], alpha, Q=1)
def _ll_geometric(self, params):
# we give alpha of 1 because it's actually log(alpha) where alpha=0
return self._ll_nbin(params, 1, 0)
def loglike(self, params):
r"""
Loglikelihood for negative binomial model
Parameters
----------
params : array_like
The parameters of the model. If `loglike_method` is nb1 or
nb2, then the ancillary parameter is expected to be the
last element.
Returns
-------
llf : float
The loglikelihood value at `params`
Notes
-----
Following notation in Greene (2008), with negative binomial
heterogeneity parameter :math:`\alpha`:
.. math::
\lambda_i &= exp(X\beta) \\
\theta &= 1 / \alpha \\
g_i &= \theta \lambda_i^Q \\
w_i &= g_i/(g_i + \lambda_i) \\
r_i &= \theta / (\theta+\lambda_i) \\
ln \mathcal{L}_i &= ln \Gamma(y_i+g_i) - ln \Gamma(1+y_i) + g_iln (r_i) + y_i ln(1-r_i)
where :math`Q=0` for NB2 and geometric and :math:`Q=1` for NB1.
For the geometric, :math:`\alpha=0` as well.
"""
llf = np.sum(self.loglikeobs(params))
return llf
def _score_geom(self, params):
exog = self.exog
y = self.endog[:, None]
mu = self.predict(params)[:, None]
dparams = exog * (y-mu)/(mu+1)
return dparams.sum(0)
def _score_nbin(self, params, Q=0):
"""
Score vector for NB2 model
"""
if self._transparams: # lnalpha came in during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
a1 = 1/alpha * mu**Q
prob = a1 / (a1 + mu) # a1 aka "size" in _ll_nbin
if Q == 1: # nb1
# Q == 1 --> a1 = mu / alpha --> prob = 1 / (alpha + 1)
dgpart = digamma(y + a1) - digamma(a1)
dparams = exog * a1 * (np.log(prob) +
dgpart)
dalpha = ((alpha * (y - mu * np.log(prob) -
mu*(dgpart + 1)) -
mu * (np.log(prob) +
dgpart))/
(alpha**2*(alpha + 1))).sum()
elif Q == 0: # nb2
dgpart = digamma(y + a1) - digamma(a1)
dparams = exog*a1 * (y-mu)/(mu+a1)
da1 = -alpha**-2
dalpha = (dgpart + np.log(a1)
- np.log(a1+mu) - (y-mu)/(a1+mu)).sum() * da1
#multiply above by constant outside sum to reduce rounding error
if self._transparams:
return np.r_[dparams.sum(0), dalpha*alpha]
else:
return np.r_[dparams.sum(0), dalpha]
def _score_nb1(self, params):
return self._score_nbin(params, Q=1)
def _hessian_geom(self, params):
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
# for dl/dparams dparams
dim = exog.shape[1]
hess_arr = np.empty((dim, dim))
const_arr = mu*(1+y)/(mu+1)**2
for i in range(dim):
for j in range(dim):
if j > i:
continue
hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] *
const_arr, axis=0)
tri_idx = np.triu_indices(dim, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr
def _hessian_nb1(self, params):
"""
Hessian of NB1 model.
"""
if self._transparams: # lnalpha came in during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
a1 = mu/alpha
dgpart = digamma(y + a1) - digamma(a1)
prob = 1 / (1 + alpha) # equiv: a1 / (a1 + mu)
# for dl/dparams dparams
dim = exog.shape[1]
hess_arr = np.empty((dim+1,dim+1))
#const_arr = a1*mu*(a1+y)/(mu+a1)**2
# not all of dparams
dparams = exog / alpha * (np.log(prob) +
dgpart)
dmudb = exog*mu
xmu_alpha = exog * a1
trigamma = (special.polygamma(1, a1 + y) -
special.polygamma(1, a1))
for i in range(dim):
for j in range(dim):
if j > i:
continue
hess_arr[i,j] = np.sum(dparams[:,i,None] * dmudb[:,j,None] +
xmu_alpha[:,i,None] * xmu_alpha[:,j,None] *
trigamma, axis=0)
tri_idx = np.triu_indices(dim, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
# for dl/dparams dalpha
# da1 = -alpha**-2
dldpda = np.sum(-a1 * dparams + exog * a1 *
(-trigamma*mu/alpha**2 - prob), axis=0)
hess_arr[-1,:-1] = dldpda
hess_arr[:-1,-1] = dldpda
log_alpha = np.log(prob)
alpha3 = alpha**3
alpha2 = alpha**2
mu2 = mu**2
dada = ((alpha3*mu*(2*log_alpha + 2*dgpart + 3) -
2*alpha3*y +
4*alpha2*mu*(log_alpha + dgpart) +
alpha2 * (2*mu - y) +
2*alpha*mu2*trigamma + mu2 * trigamma + alpha2 * mu2 * trigamma +
2*alpha*mu*(log_alpha + dgpart)
)/(alpha**4*(alpha2 + 2*alpha + 1)))
hess_arr[-1,-1] = dada.sum()
return hess_arr
def _hessian_nb2(self, params):
"""
Hessian of NB2 model.
"""
if self._transparams: # lnalpha came in during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
a1 = 1/alpha
params = params[:-1]
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
prob = a1 / (a1 + mu)
dgpart = digamma(a1 + y) - digamma(a1)
# for dl/dparams dparams
dim = exog.shape[1]
hess_arr = np.empty((dim+1,dim+1))
const_arr = a1*mu*(a1+y)/(mu+a1)**2
for i in range(dim):
for j in range(dim):
if j > i:
continue
hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] *
const_arr, axis=0)
tri_idx = np.triu_indices(dim, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
# for dl/dparams dalpha
da1 = -alpha**-2
dldpda = -np.sum(mu*exog*(y-mu)*a1**2/(mu+a1)**2 , axis=0)
hess_arr[-1,:-1] = dldpda
hess_arr[:-1,-1] = dldpda
# for dl/dalpha dalpha
#NOTE: polygamma(1,x) is the trigamma function
da2 = 2*alpha**-3
dalpha = da1 * (dgpart +
np.log(prob) - (y - mu)/(a1+mu))
dada = (da2 * dalpha/da1 + da1**2 * (special.polygamma(1, a1+y) -
special.polygamma(1, a1) + 1/a1 - 1/(a1 + mu) +
(y - mu)/(mu + a1)**2)).sum()
hess_arr[-1,-1] = dada
return hess_arr
#TODO: replace this with analytic where is it used?
def score_obs(self, params):
sc = approx_fprime_cs(params, self.loglikeobs)
return sc
@Appender(Poisson.predict.__doc__)
def predict(self, params, exog=None, exposure=None, offset=None,
which='mean', linear=None, y_values=None):
if linear is not None:
msg = 'linear keyword is deprecated, use which="linear"'
warnings.warn(msg, DeprecationWarning)
if linear is True:
which = "linear"
# avoid duplicate computation for get-distribution
if which == "prob":
distr = self.get_distribution(
params,
exog=exog,
exposure=exposure,
offset=offset
)
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
return distr.pmf(y_values)
exog, offset, exposure = self._get_predict_arrays(
exog=exog,
offset=offset,
exposure=exposure
)
fitted = np.dot(exog, params[:exog.shape[1]])
linpred = fitted + exposure + offset
if which == "mean":
return np.exp(linpred)
elif which.startswith("lin"):
return linpred
elif which == "var":
mu = np.exp(linpred)
if self.loglike_method == 'geometric':
var_ = mu * (1 + mu)
else:
if self.loglike_method == 'nb2':
p = 2
elif self.loglike_method == 'nb1':
p = 1
alpha = params[-1]
var_ = mu * (1 + alpha * mu**(p - 1))
return var_
else:
raise ValueError('keyword which has to be "mean" and "linear"')
@Appender(_get_start_params_null_docs)
def _get_start_params_null(self):
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
const = (self.endog / np.exp(offset + exposure)).mean()
params = [np.log(const)]
mu = const * np.exp(offset + exposure)
resid = self.endog - mu
a = self._estimate_dispersion(mu, resid, df_resid=resid.shape[0] - 1)
params.append(a)
return np.array(params)
def _estimate_dispersion(self, mu, resid, df_resid=None):
if df_resid is None:
df_resid = resid.shape[0]
if self.loglike_method == 'nb2':
#params.append(np.linalg.pinv(mu[:,None]).dot(resid**2 / mu - 1))
a = ((resid**2 / mu - 1) / mu).sum() / df_resid
else: #self.loglike_method == 'nb1':
a = (resid**2 / mu - 1).sum() / df_resid
return a
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None,
cov_type='nonrobust', cov_kwds=None, use_t=None,
optim_kwds_prelim=None, **kwargs):
# Note: do not let super handle robust covariance because it has
# transformed params
self._transparams = False # always define attribute
if self.loglike_method.startswith('nb') and method not in ['newton',
'ncg']:
self._transparams = True # in case same Model instance is refit
elif self.loglike_method.startswith('nb'): # method is newton/ncg
self._transparams = False # because we need to step in alpha space
if start_params is None:
# Use poisson fit as first guess.
#TODO, Warning: this assumes exposure is logged
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
kwds_prelim = {'disp': 0, 'skip_hessian': True, 'warn_convergence': False}
if optim_kwds_prelim is not None:
kwds_prelim.update(optim_kwds_prelim)
mod_poi = Poisson(self.endog, self.exog, offset=offset)
with warnings.catch_warnings():
warnings.simplefilter("always")
res_poi = mod_poi.fit(**kwds_prelim)
start_params = res_poi.params
if self.loglike_method.startswith('nb'):
a = self._estimate_dispersion(res_poi.predict(), res_poi.resid,
df_resid=res_poi.df_resid)
start_params = np.append(start_params, max(0.05, a))
else:
if self._transparams is True:
# transform user provided start_params dispersion, see #3918
start_params = np.array(start_params, copy=True)
start_params[-1] = np.log(start_params[-1])
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super().fit(start_params=start_params,
maxiter=maxiter, method=method, disp=disp,
full_output=full_output, callback=callback,
**kwargs)
if optim_kwds_prelim is not None:
mlefit.mle_settings["optim_kwds_prelim"] = optim_kwds_prelim
# TODO: Fix NBin _check_perfect_pred
if self.loglike_method.startswith('nb'):
# mlefit is a wrapped counts results
self._transparams = False # do not need to transform anymore now
# change from lnalpha to alpha
if method not in ["newton", "ncg"]:
mlefit._results.params[-1] = np.exp(mlefit._results.params[-1])
nbinfit = NegativeBinomialResults(self, mlefit._results)
result = NegativeBinomialResultsWrapper(nbinfit)
else:
result = mlefit
if cov_kwds is None:
cov_kwds = {} #TODO: make this unnecessary ?
result._get_robustcov_results(cov_type=cov_type, use_self=True, use_t=use_t, **cov_kwds)
return result
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if self.loglike_method.startswith('nb') and (np.size(alpha) == 1 and
alpha != 0):
# do not penalize alpha if alpha is scalar
k_params = self.exog.shape[1] + self.k_extra
alpha = alpha * np.ones(k_params)
alpha[-1] = 0
# alpha for regularized poisson to get starting values
alpha_p = alpha[:-1] if (self.k_extra and np.size(alpha) > 1) else alpha
self._transparams = False
if start_params is None:
# Use poisson fit as first guess.
#TODO, Warning: this assumes exposure is logged
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
mod_poi = Poisson(self.endog, self.exog, offset=offset)
with warnings.catch_warnings():
warnings.simplefilter("always")
start_params = mod_poi.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode,
auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol,
qc_tol=qc_tol, **kwargs).params
if self.loglike_method.startswith('nb'):
start_params = np.append(start_params, 0.1)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = L1NegativeBinomialResults(self, cntfit)
return L1NegativeBinomialResultsWrapper(discretefit)
@Appender(Poisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exposure=None, offset=None):
"""get frozen instance of distribution
"""
mu = self.predict(params, exog=exog, exposure=exposure, offset=offset)
if self.loglike_method == 'geometric':
distr = stats.geom(1 / (1 + mu[:, None]), loc=-1)
else:
if self.loglike_method == 'nb2':
p = 2
elif self.loglike_method == 'nb1':
p = 1
alpha = params[-1]
q = 2 - p
size = 1. / alpha * mu**q
prob = size / (size + mu)
distr = nbinom(size[:, None], prob[:, None])
return distr
class NegativeBinomialP(CountModel):
__doc__ = """
Generalized Negative Binomial (NB-P) Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
p : scalar
P denotes parameterizations for NB-P regression. p=1 for NB-1 and
p=2 for NB-2. Default is p=1.
""" % {'params': base._model_params_doc,
'extra_params':
"""p : scalar
P denotes parameterizations for NB regression. p=1 for NB-1 and
p=2 for NB-2. Default is p=2.
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
""" + base._missing_param_doc + _check_rank_doc}
def __init__(self, endog, exog, p=2, offset=None,
exposure=None, missing='none', check_rank=True,
**kwargs):
super().__init__(endog,
exog,
offset=offset,
exposure=exposure,
missing=missing,
check_rank=check_rank,
**kwargs)
self.parameterization = p
self.exog_names.append('alpha')
self.k_extra = 1
self._transparams = False
def _get_init_kwds(self):
kwds = super()._get_init_kwds()
kwds['p'] = self.parameterization
return kwds
def _get_exogs(self):
return (self.exog, None)
def loglike(self, params):
"""
Loglikelihood of Generalized Negative Binomial (NB-P) model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
"""
return np.sum(self.loglikeobs(params))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generalized Negative Binomial (NB-P) model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
y = self.endog
mu = self.predict(params)
mu_p = mu**(2 - p)
a1 = mu_p / alpha
a2 = mu + a1
llf = (gammaln(y + a1) - gammaln(y + 1) - gammaln(a1) +
a1 * np.log(a1) + y * np.log(mu) -
(y + a1) * np.log(a2))
return llf
def score_obs(self, params):
"""
Generalized Negative Binomial (NB-P) model score (gradient) vector of the log-likelihood for each observations.
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = 2 - self.parameterization
y = self.endog
mu = self.predict(params)
mu_p = mu**p
a1 = mu_p / alpha
a2 = mu + a1
a3 = y + a1
a4 = p * a1 / mu
dgpart = digamma(a3) - digamma(a1)
dgterm = dgpart + np.log(a1 / a2) + 1 - a3 / a2
# TODO: better name/interpretation for dgterm?
dparams = (a4 * dgterm -
a3 / a2 +
y / mu)
dparams = (self.exog.T * mu * dparams).T
dalpha = -a1 / alpha * dgterm
return np.concatenate((dparams, np.atleast_2d(dalpha).T),
axis=1)
def score(self, params):
"""
Generalized Negative Binomial (NB-P) model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
score = np.sum(self.score_obs(params), axis=0)
if self._transparams:
score[-1] == score[-1] ** 2
return score
else:
return score
def score_factor(self, params, endog=None):
"""
Generalized Negative Binomial (NB-P) model score (gradient) vector of the log-likelihood for each observations.
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = 2 - self.parameterization
y = self.endog if endog is None else endog
mu = self.predict(params)
mu_p = mu**p
a1 = mu_p / alpha
a2 = mu + a1
a3 = y + a1
a4 = p * a1 / mu
dgpart = digamma(a3) - digamma(a1)
dparams = ((a4 * dgpart -
a3 / a2) +
y / mu + a4 * (1 - a3 / a2 + np.log(a1 / a2)))
dparams = (mu * dparams).T
dalpha = (-a1 / alpha * (dgpart +
np.log(a1 / a2) +
1 - a3 / a2))
return dparams, dalpha
def hessian(self, params):
"""
Generalized Negative Binomial (NB-P) model hessian maxtrix of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hessian : ndarray, 2-D
The hessian matrix of the model.
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = 2 - self.parameterization
y = self.endog
exog = self.exog
mu = self.predict(params)
mu_p = mu**p
a1 = mu_p / alpha
a2 = mu + a1
a3 = y + a1
a4 = p * a1 / mu
prob = a1 / a2
lprob = np.log(prob)
dgpart = digamma(a3) - digamma(a1)
pgpart = polygamma(1, a3) - polygamma(1, a1)
dim = exog.shape[1]
hess_arr = np.zeros((dim + 1, dim + 1))
coeff = mu**2 * (((1 + a4)**2 * a3 / a2**2 -
a3 / a2 * (p - 1) * a4 / mu -
y / mu**2 -
2 * a4 * (1 + a4) / a2 +
p * a4 / mu * (lprob + dgpart + 2) -
a4 / mu * (lprob + dgpart + 1) +
a4**2 * pgpart) +
(-(1 + a4) * a3 / a2 +
y / mu +
a4 * (lprob + dgpart + 1)) / mu)
for i in range(dim):
hess_arr[i, :-1] = np.sum(self.exog[:, :].T * self.exog[:, i] * coeff, axis=1)
hess_arr[-1,:-1] = (self.exog[:, :].T * mu * a1 *
((1 + a4) * (1 - a3 / a2) / a2 -
p * (lprob + dgpart + 2) / mu +
p / mu * (a3 + p * a1) / a2 -
a4 * pgpart) / alpha).sum(axis=1)
da2 = (a1 * (2 * lprob +
2 * dgpart + 3 -
2 * a3 / a2
+ a1 * pgpart
- 2 * prob +
prob * a3 / a2) / alpha**2)
hess_arr[-1, -1] = da2.sum()
tri_idx = np.triu_indices(dim + 1, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr
def hessian_factor(self, params):
"""
Generalized Negative Binomial (NB-P) model hessian maxtrix of the log-likelihood
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
hessian : ndarray, 2-D
The hessian matrix of the model.
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = 2 - self.parameterization
y = self.endog
mu = self.predict(params)
mu_p = mu**p
a1 = mu_p / alpha
a2 = mu + a1
a3 = y + a1
a4 = p * a1 / mu
a5 = a4 * p / mu
dgpart = digamma(a3) - digamma(a1)
coeff = mu**2 * (((1 + a4)**2 * a3 / a2**2 -
a3 * (a5 - a4 / mu) / a2 -
y / mu**2 -
2 * a4 * (1 + a4) / a2 +
a5 * (np.log(a1) - np.log(a2) + dgpart + 2) -
a4 * (np.log(a1) - np.log(a2) + dgpart + 1) / mu -
a4**2 * (polygamma(1, a1) - polygamma(1, a3))) +
(-(1 + a4) * a3 / a2 +
y / mu +
a4 * (np.log(a1) - np.log(a2) + dgpart + 1)) / mu)
hfbb = coeff
hfba = (mu * a1 *
((1 + a4) * (1 - a3 / a2) / a2 -
p * (np.log(a1 / a2) + dgpart + 2) / mu +
p * (a3 / mu + a4) / a2 +
a4 * (polygamma(1, a1) - polygamma(1, a3))) / alpha)
hfaa = (a1 * (2 * np.log(a1 / a2) +
2 * dgpart + 3 -
2 * a3 / a2 - a1 * polygamma(1, a1) +
a1 * polygamma(1, a3) - 2 * a1 / a2 +
a1 * a3 / a2**2) / alpha**2)
return hfbb, hfba, hfaa
@Appender(_get_start_params_null_docs)
def _get_start_params_null(self):
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
const = (self.endog / np.exp(offset + exposure)).mean()
params = [np.log(const)]
mu = const * np.exp(offset + exposure)
resid = self.endog - mu
a = self._estimate_dispersion(mu, resid, df_resid=resid.shape[0] - 1)
params.append(a)
return np.array(params)
def _estimate_dispersion(self, mu, resid, df_resid=None):
q = self.parameterization - 1
if df_resid is None:
df_resid = resid.shape[0]
a = ((resid**2 / mu - 1) * mu**(-q)).sum() / df_resid
return a
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None, use_transparams=False,
cov_type='nonrobust', cov_kwds=None, use_t=None,
optim_kwds_prelim=None, **kwargs):
# TODO: Fix doc string
"""
use_transparams : bool
This parameter enable internal transformation to impose
non-negativity. True to enable. Default is False.
use_transparams=True imposes the no underdispersion (alpha > 0)
constraint. In case use_transparams=True and method="newton" or
"ncg" transformation is ignored.
"""
if use_transparams and method not in ['newton', 'ncg']:
self._transparams = True
else:
if use_transparams:
warnings.warn('Parameter "use_transparams" is ignored',
RuntimeWarning)
self._transparams = False
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
kwds_prelim = {'disp': 0, 'skip_hessian': True, 'warn_convergence': False}
if optim_kwds_prelim is not None:
kwds_prelim.update(optim_kwds_prelim)
mod_poi = Poisson(self.endog, self.exog, offset=offset)
with warnings.catch_warnings():
warnings.simplefilter("always")
res_poi = mod_poi.fit(**kwds_prelim)
start_params = res_poi.params
a = self._estimate_dispersion(res_poi.predict(), res_poi.resid,
df_resid=res_poi.df_resid)
start_params = np.append(start_params, max(0.05, a))
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super(NegativeBinomialP, self).fit(start_params=start_params,
maxiter=maxiter, method=method, disp=disp,
full_output=full_output, callback=callback,
**kwargs)
if optim_kwds_prelim is not None:
mlefit.mle_settings["optim_kwds_prelim"] = optim_kwds_prelim
if use_transparams and method not in ["newton", "ncg"]:
self._transparams = False
mlefit._results.params[-1] = np.exp(mlefit._results.params[-1])
nbinfit = NegativeBinomialPResults(self, mlefit._results)
result = NegativeBinomialPResultsWrapper(nbinfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if np.size(alpha) == 1 and alpha != 0:
k_params = self.exog.shape[1] + self.k_extra
alpha = alpha * np.ones(k_params)
alpha[-1] = 0
alpha_p = alpha[:-1] if (self.k_extra and np.size(alpha) > 1) else alpha
self._transparams = False
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
mod_poi = Poisson(self.endog, self.exog, offset=offset)
with warnings.catch_warnings():
warnings.simplefilter("always")
start_params = mod_poi.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode,
auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol,
qc_tol=qc_tol, **kwargs).params
start_params = np.append(start_params, 0.1)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = L1NegativeBinomialResults(self, cntfit)
return L1NegativeBinomialResultsWrapper(discretefit)
@Appender(Poisson.predict.__doc__)
def predict(self, params, exog=None, exposure=None, offset=None,
which='mean', y_values=None):
if exog is None:
exog = self.exog
if exposure is None:
exposure = getattr(self, 'exposure', 0)
elif exposure != 0:
exposure = np.log(exposure)
if offset is None:
offset = getattr(self, 'offset', 0)
fitted = np.dot(exog, params[:exog.shape[1]])
linpred = fitted + exposure + offset
if which == 'mean':
return np.exp(linpred)
elif which == 'linear':
return linpred
elif which == 'var':
mean = np.exp(linpred)
alpha = params[-1]
p = self.parameterization # no `-1` as in GPP
var_ = mean * (1 + alpha * mean**(p - 1))
return var_
elif which == 'prob':
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
mu = self.predict(params, exog, exposure, offset)
size, prob = self.convert_params(params, mu)
return nbinom.pmf(y_values, size[:, None], prob[:, None])
else:
raise ValueError('keyword "which" = %s not recognized' % which)
def convert_params(self, params, mu):
alpha = params[-1]
p = 2 - self.parameterization
size = 1. / alpha * mu**p
prob = size / (size + mu)
return (size, prob)
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
"""
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar
def f(y):
if y.ndim == 2 and y.shape[1] == 1:
y = y[:, 0]
sf = self.score_factor(params, endog=y)
return np.column_stack(sf)
dsf = _approx_fprime_cs_scalar(self.endog[:, None], f)
# deriv is 2d vector
d1 = dsf[:, :1] * self.exog
d2 = dsf[:, 1:2]
return np.column_stack((d1, d2))
def _var(self, mu, params=None):
"""variance implied by the distribution
internal use, will be refactored or removed
"""
alpha = params[-1]
p = self.parameterization # no `-1` as in GPP
var_ = mu * (1 + alpha * mu**(p - 1))
return var_
def _prob_nonzero(self, mu, params):
"""Probability that count is not zero
internal use in Censored model, will be refactored or removed
"""
alpha = params[-1]
p = self.parameterization
prob_nz = 1 - (1 + alpha * mu**(p-1))**(- 1 / alpha)
return prob_nz
@Appender(Poisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exposure=None, offset=None):
"""get frozen instance of distribution
"""
mu = self.predict(params, exog=exog, exposure=exposure, offset=offset)
size, prob = self.convert_params(params, mu)
distr = nbinom(size[:, None], prob[:, None])
return distr
### Results Class ###
class DiscreteResults(base.LikelihoodModelResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"A results class for the discrete dependent variable models.",
"extra_attr" : ""}
def __init__(self, model, mlefit, cov_type='nonrobust', cov_kwds=None,
use_t=None):
#super(DiscreteResults, self).__init__(model, params,
# np.linalg.inv(-hessian), scale=1.)
self.model = model
self.method = "MLE"
self.df_model = model.df_model
self.df_resid = model.df_resid
self._cache = {}
self.nobs = model.exog.shape[0]
self.__dict__.update(mlefit.__dict__)
self.converged = mlefit.mle_retvals["converged"]
if not hasattr(self, 'cov_type'):
# do this only if super, i.e. mlefit did not already add cov_type
# robust covariance
if use_t is not None:
self.use_t = use_t
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
else:
if cov_kwds is None:
cov_kwds = {}
from statsmodels.base.covtype import get_robustcov_results
get_robustcov_results(self, cov_type=cov_type, use_self=True,
**cov_kwds)
def __getstate__(self):
# remove unpicklable methods
mle_settings = getattr(self, 'mle_settings', None)
if mle_settings is not None:
if 'callback' in mle_settings:
mle_settings['callback'] = None
if 'cov_params_func' in mle_settings:
mle_settings['cov_params_func'] = None
return self.__dict__
@cache_readonly
def prsquared(self):
"""
McFadden's pseudo-R-squared. `1 - (llf / llnull)`
"""
return 1 - self.llf/self.llnull
@cache_readonly
def llr(self):
"""
Likelihood ratio chi-squared statistic; `-2*(llnull - llf)`
"""
return -2*(self.llnull - self.llf)
@cache_readonly
def llr_pvalue(self):
"""
The chi-squared probability of getting a log-likelihood ratio
statistic greater than llr. llr has a chi-squared distribution
with degrees of freedom `df_model`.
"""
return stats.distributions.chi2.sf(self.llr, self.df_model)
def set_null_options(self, llnull=None, attach_results=True, **kwargs):
"""
Set the fit options for the Null (constant-only) model.
This resets the cache for related attributes which is potentially
fragile. This only sets the option, the null model is estimated
when llnull is accessed, if llnull is not yet in cache.
Parameters
----------
llnull : {None, float}
If llnull is not None, then the value will be directly assigned to
the cached attribute "llnull".
attach_results : bool
Sets an internal flag whether the results instance of the null
model should be attached. By default without calling this method,
thenull model results are not attached and only the loglikelihood
value llnull is stored.
**kwargs
Additional keyword arguments used as fit keyword arguments for the
null model. The override and model default values.
Notes
-----
Modifies attributes of this instance, and so has no return.
"""
# reset cache, note we need to add here anything that depends on
# llnullor the null model. If something is missing, then the attribute
# might be incorrect.
self._cache.pop('llnull', None)
self._cache.pop('llr', None)
self._cache.pop('llr_pvalue', None)
self._cache.pop('prsquared', None)
if hasattr(self, 'res_null'):
del self.res_null
if llnull is not None:
self._cache['llnull'] = llnull
self._attach_nullmodel = attach_results
self._optim_kwds_null = kwargs
@cache_readonly
def llnull(self):
"""
Value of the constant-only loglikelihood
"""
model = self.model
kwds = model._get_init_kwds().copy()
for key in getattr(model, '_null_drop_keys', []):
del kwds[key]
# TODO: what parameters to pass to fit?
mod_null = model.__class__(model.endog, np.ones(self.nobs), **kwds)
# TODO: consider catching and warning on convergence failure?
# in the meantime, try hard to converge. see
# TestPoissonConstrained1a.test_smoke
optim_kwds = getattr(self, '_optim_kwds_null', {}).copy()
if 'start_params' in optim_kwds:
# user provided
sp_null = optim_kwds.pop('start_params')
elif hasattr(model, '_get_start_params_null'):
# get moment estimates if available
sp_null = model._get_start_params_null()
else:
sp_null = None
opt_kwds = dict(method='bfgs', warn_convergence=False, maxiter=10000,
disp=0)
opt_kwds.update(optim_kwds)
if optim_kwds:
res_null = mod_null.fit(start_params=sp_null, **opt_kwds)
else:
# this should be a reasonably method case across versions
res_null = mod_null.fit(start_params=sp_null, method='nm',
warn_convergence=False,
maxiter=10000, disp=0)
res_null = mod_null.fit(start_params=res_null.params, method='bfgs',
warn_convergence=False,
maxiter=10000, disp=0)
if getattr(self, '_attach_nullmodel', False) is not False:
self.res_null = res_null
return res_null.llf
@cache_readonly
def fittedvalues(self):
"""
Linear predictor XB.
"""
return np.dot(self.model.exog, self.params[:self.model.exog.shape[1]])
@cache_readonly
def resid_response(self):
"""
Respnose residuals. The response residuals are defined as
`endog - fittedvalues`
"""
return self.model.endog - self.predict()
@cache_readonly
def resid_pearson(self):
"""
Pearson residuals defined as response residuals divided by standard
deviation implied by the model.
"""
var_ = self.predict(which="var")
return self.resid_response / np.sqrt(var_)
@cache_readonly
def aic(self):
"""
Akaike information criterion. `-2*(llf - p)` where `p` is the number
of regressors including the intercept.
"""
k_extra = getattr(self.model, 'k_extra', 0)
return -2*(self.llf - (self.df_model + 1 + k_extra))
@cache_readonly
def bic(self):
"""
Bayesian information criterion. `-2*llf + ln(nobs)*p` where `p` is the
number of regressors including the intercept.
"""
k_extra = getattr(self.model, 'k_extra', 0)
return -2*self.llf + np.log(self.nobs)*(self.df_model + 1 + k_extra)
@cache_readonly
def im_ratio(self):
return pinfer.im_ratio(self)
def info_criteria(self, crit, dk_params=0):
"""Return an information criterion for the model.
Parameters
----------
crit : string
One of 'aic', 'bic', 'tic' or 'gbic'.
dk_params : int or float
Correction to the number of parameters used in the information
criterion. By default, only mean parameters are included, the
scale parameter is not included in the parameter count.
Use ``dk_params=1`` to include scale in the parameter count.
Returns the given information criterion value.
Notes
-----
Tic and bbic
References
----------
Burnham KP, Anderson KR (2002). Model Selection and Multimodel
Inference; Springer New York.
"""
crit = crit.lower()
k_extra = getattr(self.model, 'k_extra', 0)
k_params = self.df_model + 1 + k_extra + dk_params
if crit == "aic":
return -2 * self.llf + 2 * k_params
elif crit == "bic":
nobs = self.df_model + self.df_resid + 1
bic = -2*self.llf + k_params*np.log(nobs)
return bic
elif crit == "tic":
return pinfer.tic(self)
elif crit == "gbic":
return pinfer.gbic(self)
else:
raise ValueError("Name of information criterion not recognized.")
def score_test(self, exog_extra=None, params_constrained=None,
hypothesis='joint', cov_type=None, cov_kwds=None,
k_constraints=None, observed=True):
res = pinfer.score_test(self, exog_extra=exog_extra,
params_constrained=params_constrained,
hypothesis=hypothesis,
cov_type=cov_type, cov_kwds=cov_kwds,
k_constraints=k_constraints,
observed=observed)
return res
score_test.__doc__ = pinfer.score_test.__doc__
def get_prediction(self, exog=None,
transform=True, which="mean", linear=None,
row_labels=None, average=False,
agg_weights=None, y_values=None,
**kwargs):
"""
Compute prediction results when endpoint transformation is valid.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
which : str
Which statistic is to be predicted. Default is "mean".
The available statistics and options depend on the model.
see the model.predict docstring
linear : bool
Linear has been replaced by the `which` keyword and will be
deprecated.
If linear is True, then `which` is ignored and the linear
prediction is returned.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
average : bool
If average is True, then the mean prediction is computed, that is,
predictions are computed for individual exog and then the average
over observation is used.
If average is False, then the results are the predictions for all
observations, i.e. same length as ``exog``.
agg_weights : ndarray, optional
Aggregation weights, only used if average is True.
The weights are not normalized.
y_values : None or nd_array
Some predictive statistics like which="prob" are computed at
values of the response variable. If y_values is not None, then
it will be used instead of the default set of y_values.
**Warning:** ``which="prob"`` for count models currently computes
the pmf for all y=k up to max(endog). This can be a large array if
the observed endog values are large.
This will likely change so that the set of y_values will be chosen
to limit the array size.
**kwargs :
Some models can take additional keyword arguments, such as offset,
exposure or additional exog in multi-part models like zero inflated
models.
See the predict method of the model for the details.
Returns
-------
prediction_results : PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and
summary dataframe for the prediction.
Notes
-----
Status: new in 0.14, experimental
"""
if linear is True:
# compatibility with old keyword
which = "linear"
pred_kwds = kwargs
# y_values is explicit so we can add it to the docstring
if y_values is not None:
pred_kwds["y_values"] = y_values
res = pred.get_prediction(
self,
exog=exog,
which=which,
transform=transform,
row_labels=row_labels,
average=average,
agg_weights=agg_weights,
pred_kwds=pred_kwds
)
return res
def get_distribution(self, exog=None, transform=True, **kwargs):
exog, _ = self._transform_predict_exog(exog, transform=transform)
if exog is not None:
exog = np.asarray(exog)
distr = self.model.get_distribution(self.params,
exog=exog,
**kwargs
)
return distr
def _get_endog_name(self, yname, yname_list):
if yname is None:
yname = self.model.endog_names
if yname_list is None:
yname_list = self.model.endog_names
return yname, yname_list
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is all
only margeff will be available from the returned object.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semi-elasticity -- dy/d(lnx)
- 'eydx' - estimate semi-elasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables. For interpretations of these methods
see notes below.
atexog : array_like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
DiscreteMargins : marginal effects instance
Returns an object that holds the marginal effects, standard
errors, confidence intervals, etc. See
`statsmodels.discrete.discrete_margins.DiscreteMargins` for more
information.
Notes
-----
Interpretations of methods:
- 'dydx' - change in `endog` for a change in `exog`.
- 'eyex' - proportional change in `endog` for a proportional change
in `exog`.
- 'dyex' - change in `endog` for a proportional change in `exog`.
- 'eydx' - proportional change in `endog` for a change in `exog`.
When using after Poisson, returns the expected number of events per
period, assuming that the model is loglinear.
"""
from statsmodels.discrete.discrete_margins import DiscreteMargins
return DiscreteMargins(self, (at, method, atexog, dummy, count))
def get_influence(self):
"""
Get an instance of MLEInfluence with influence and outlier measures
Returns
-------
infl : MLEInfluence instance
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
"""
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self)
def summary(self, yname=None, xname=None, title=None, alpha=.05,
yname_list=None):
"""
Summarize the Regression Results.
Parameters
----------
yname : str, optional
The name of the endog variable in the tables. The default is `y`.
xname : list[str], optional
The names for the exogenous variables, default is "var_xx".
Must match the number of parameters in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
Returns
-------
Summary
Class that holds the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : Class that hold summary results.
"""
top_left = [('Dep. Variable:', None),
('Model:', [self.model.__class__.__name__]),
('Method:', [self.method]),
('Date:', None),
('Time:', None),
('converged:', ["%s" % self.mle_retvals['converged']]),
]
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
('Pseudo R-squ.:', ["%#6.4g" % self.prsquared]),
('Log-Likelihood:', None),
('LL-Null:', ["%#8.5g" % self.llnull]),
('LLR p-value:', ["%#6.4g" % self.llr_pvalue])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
# boiler plate
from statsmodels.iolib.summary import Summary
smry = Summary()
yname, yname_list = self._get_endog_name(yname, yname_list)
# for top of table
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
# for parameters, etc
smry.add_table_params(self, yname=yname_list, xname=xname, alpha=alpha,
use_t=self.use_t)
if hasattr(self, 'constraints'):
smry.add_extra_txt(['Model has been estimated subject to linear '
'equality constraints.'])
return smry
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""
Experimental function to summarize regression results.
Parameters
----------
yname : str
Name of the dependent variable (optional).
xname : list[str], optional
List of strings of length equal to the number of parameters
Names of the independent variables (optional).
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
float_format : str
The print format for floats in parameters summary.
Returns
-------
Summary
Instance that contains the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : Class that holds summary results.
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
if hasattr(self, 'constraints'):
smry.add_text('Model has been estimated subject to linear '
'equality constraints.')
return smry
class CountResults(DiscreteResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for count data",
"extra_attr": ""}
@cache_readonly
def resid(self):
"""
Residuals
Notes
-----
The residuals for Count models are defined as
.. math:: y - p
where :math:`p = \\exp(X\\beta)`. Any exposure and offset variables
are also handled.
"""
return self.model.endog - self.predict()
def get_diagnostic(self, y_max=None):
"""
Get instance of class with specification and diagnostic methods.
experimental, API of Diagnostic classes will change
Returns
-------
CountDiagnostic instance
The instance has methods to perform specification and diagnostic
tesst and plots
See Also
--------
statsmodels.statsmodels.discrete.diagnostic.CountDiagnostic
"""
from statsmodels.discrete.diagnostic import CountDiagnostic
return CountDiagnostic(self, y_max=y_max)
class NegativeBinomialResults(CountResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for NegativeBinomial 1 and 2",
"extra_attr": ""}
@cache_readonly
def lnalpha(self):
"""Natural log of alpha"""
return np.log(self.params[-1])
@cache_readonly
def lnalpha_std_err(self):
"""Natural log of standardized error"""
return self.bse[-1] / self.params[-1]
@cache_readonly
def aic(self):
# + 1 because we estimate alpha
k_extra = getattr(self.model, 'k_extra', 0)
return -2*(self.llf - (self.df_model + self.k_constant + k_extra))
@cache_readonly
def bic(self):
# + 1 because we estimate alpha
k_extra = getattr(self.model, 'k_extra', 0)
return -2*self.llf + np.log(self.nobs)*(self.df_model +
self.k_constant + k_extra)
class NegativeBinomialPResults(NegativeBinomialResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for NegativeBinomialP",
"extra_attr": ""}
class GeneralizedPoissonResults(NegativeBinomialResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Generalized Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = getattr(self.model, 'parameterization', 0)
mu = self.predict()
return (1 + self.params[-1] * mu**p)**2
class L1CountResults(DiscreteResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"A results class for count data fit by l1 regularization",
"extra_attr" : _l1_results_attr}
def __init__(self, model, cntfit):
super(L1CountResults, self).__init__(model, cntfit)
# self.trimmed is a boolean array with T/F telling whether or not that
# entry in params has been set zero'd out.
self.trimmed = cntfit.mle_retvals['trimmed']
self.nnz_params = (~self.trimmed).sum()
# Set degrees of freedom. In doing so,
# adjust for extra parameter in NegativeBinomial nb1 and nb2
# extra parameter is not included in df_model
k_extra = getattr(self.model, 'k_extra', 0)
self.df_model = self.nnz_params - 1 - k_extra
self.df_resid = float(self.model.endog.shape[0] - self.nnz_params) + k_extra
class PoissonResults(CountResults):
def predict_prob(self, n=None, exog=None, exposure=None, offset=None,
transform=True):
"""
Return predicted probability of each count level for each observation
Parameters
----------
n : array_like or int
The counts for which you want the probabilities. If n is None
then the probabilities for each count from 0 to max(y) are
given.
Returns
-------
ndarray
A nobs x n array where len(`n`) columns are indexed by the count
n. If n is None, then column 0 is the probability that each
observation is 0, column 1 is the probability that each
observation is 1, etc.
"""
if n is not None:
counts = np.atleast_2d(n)
else:
counts = np.atleast_2d(np.arange(0, np.max(self.model.endog)+1))
mu = self.predict(exog=exog, exposure=exposure, offset=offset,
transform=transform, which="mean")[:,None]
# uses broadcasting
return stats.poisson.pmf(counts, mu)
@property
def resid_pearson(self):
"""
Pearson residuals
Notes
-----
Pearson residuals are defined to be
.. math:: r_j = \\frac{(y - M_jp_j)}{\\sqrt{M_jp_j(1-p_j)}}
where :math:`p_j=cdf(X\\beta)` and :math:`M_j` is the total number of
observations sharing the covariate pattern :math:`j`.
For now :math:`M_j` is always set to 1.
"""
# Pearson residuals
p = self.predict() # fittedvalues is still linear
return (self.model.endog - p)/np.sqrt(p)
def get_influence(self):
"""
Get an instance of MLEInfluence with influence and outlier measures
Returns
-------
infl : MLEInfluence instance
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
"""
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self)
def get_diagnostic(self, y_max=None):
"""
Get instance of class with specification and diagnostic methods
experimental, API of Diagnostic classes will change
Returns
-------
PoissonDiagnostic instance
The instance has methods to perform specification and diagnostic
tesst and plots
See Also
--------
statsmodels.statsmodels.discrete.diagnostic.PoissonDiagnostic
"""
from statsmodels.discrete.diagnostic import (
PoissonDiagnostic)
return PoissonDiagnostic(self, y_max=y_max)
class L1PoissonResults(L1CountResults, PoissonResults):
pass
class L1NegativeBinomialResults(L1CountResults, NegativeBinomialResults):
pass
class L1GeneralizedPoissonResults(L1CountResults, GeneralizedPoissonResults):
pass
class OrderedResults(DiscreteResults):
__doc__ = _discrete_results_docs % {"one_line_description" : "A results class for ordered discrete data." , "extra_attr" : ""}
pass
class BinaryResults(DiscreteResults):
__doc__ = _discrete_results_docs % {"one_line_description" : "A results class for binary data", "extra_attr" : ""}
def pred_table(self, threshold=.5):
"""
Prediction table
Parameters
----------
threshold : scalar
Number between 0 and 1. Threshold above which a prediction is
considered 1 and below which a prediction is considered 0.
Notes
-----
pred_table[i,j] refers to the number of times "i" was observed and
the model predicted "j". Correct predictions are along the diagonal.
"""
model = self.model
actual = model.endog
pred = np.array(self.predict() > threshold, dtype=float)
bins = np.array([0, 0.5, 1])
return np.histogram2d(actual, pred, bins=bins)[0]
@Appender(DiscreteResults.summary.__doc__)
def summary(self, yname=None, xname=None, title=None, alpha=.05,
yname_list=None):
smry = super(BinaryResults, self).summary(yname, xname, title, alpha,
yname_list)
fittedvalues = self.model.cdf(self.fittedvalues)
absprederror = np.abs(self.model.endog - fittedvalues)
predclose_sum = (absprederror < 1e-4).sum()
predclose_frac = predclose_sum / len(fittedvalues)
# add warnings/notes
etext = []
if predclose_sum == len(fittedvalues): # TODO: nobs?
wstr = "Complete Separation: The results show that there is"
wstr += "complete separation.\n"
wstr += "In this case the Maximum Likelihood Estimator does "
wstr += "not exist and the parameters\n"
wstr += "are not identified."
etext.append(wstr)
elif predclose_frac > 0.1: # TODO: get better diagnosis
wstr = "Possibly complete quasi-separation: A fraction "
wstr += "%4.2f of observations can be\n" % predclose_frac
wstr += "perfectly predicted. This might indicate that there "
wstr += "is complete\nquasi-separation. In this case some "
wstr += "parameters will not be identified."
etext.append(wstr)
if etext:
smry.add_extra_txt(etext)
return smry
@cache_readonly
def resid_dev(self):
"""
Deviance residuals
Notes
-----
Deviance residuals are defined
.. math:: d_j = \\pm\\left(2\\left[Y_j\\ln\\left(\\frac{Y_j}{M_jp_j}\\right) + (M_j - Y_j\\ln\\left(\\frac{M_j-Y_j}{M_j(1-p_j)} \\right) \\right] \\right)^{1/2}
where
:math:`p_j = cdf(X\\beta)` and :math:`M_j` is the total number of
observations sharing the covariate pattern :math:`j`.
For now :math:`M_j` is always set to 1.
"""
#These are the deviance residuals
#model = self.model
endog = self.model.endog
#exog = model.exog
# M = # of individuals that share a covariate pattern
# so M[i] = 2 for i = two share a covariate pattern
M = 1
p = self.predict()
#Y_0 = np.where(exog == 0)
#Y_M = np.where(exog == M)
#NOTE: Common covariate patterns are not yet handled
res = -(1-endog)*np.sqrt(2*M*np.abs(np.log(1-p))) + \
endog*np.sqrt(2*M*np.abs(np.log(p)))
return res
@cache_readonly
def resid_pearson(self):
"""
Pearson residuals
Notes
-----
Pearson residuals are defined to be
.. math:: r_j = \\frac{(y - M_jp_j)}{\\sqrt{M_jp_j(1-p_j)}}
where :math:`p_j=cdf(X\\beta)` and :math:`M_j` is the total number of
observations sharing the covariate pattern :math:`j`.
For now :math:`M_j` is always set to 1.
"""
# Pearson residuals
#model = self.model
endog = self.model.endog
#exog = model.exog
# M = # of individuals that share a covariate pattern
# so M[i] = 2 for i = two share a covariate pattern
# use unique row pattern?
M = 1
p = self.predict()
return (endog - M*p)/np.sqrt(M*p*(1-p))
@cache_readonly
def resid_response(self):
"""
The response residuals
Notes
-----
Response residuals are defined to be
.. math:: y - p
where :math:`p=cdf(X\\beta)`.
"""
return self.model.endog - self.predict()
class LogitResults(BinaryResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Logit Model",
"extra_attr": ""}
@cache_readonly
def resid_generalized(self):
"""
Generalized residuals
Notes
-----
The generalized residuals for the Logit model are defined
.. math:: y - p
where :math:`p=cdf(X\\beta)`. This is the same as the `resid_response`
for the Logit model.
"""
# Generalized residuals
return self.model.endog - self.predict()
def get_influence(self):
"""
Get an instance of MLEInfluence with influence and outlier measures
Returns
-------
infl : MLEInfluence instance
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
"""
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self)
class ProbitResults(BinaryResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Probit Model",
"extra_attr": ""}
@cache_readonly
def resid_generalized(self):
"""
Generalized residuals
Notes
-----
The generalized residuals for the Probit model are defined
.. math:: y\\frac{\\phi(X\\beta)}{\\Phi(X\\beta)}-(1-y)\\frac{\\phi(X\\beta)}{1-\\Phi(X\\beta)}
"""
# generalized residuals
model = self.model
endog = model.endog
XB = self.predict(which="linear")
pdf = model.pdf(XB)
cdf = model.cdf(XB)
return endog * pdf/cdf - (1-endog)*pdf/(1-cdf)
class L1BinaryResults(BinaryResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"Results instance for binary data fit by l1 regularization",
"extra_attr" : _l1_results_attr}
def __init__(self, model, bnryfit):
super(L1BinaryResults, self).__init__(model, bnryfit)
# self.trimmed is a boolean array with T/F telling whether or not that
# entry in params has been set zero'd out.
self.trimmed = bnryfit.mle_retvals['trimmed']
self.nnz_params = (~self.trimmed).sum()
self.df_model = self.nnz_params - 1
self.df_resid = float(self.model.endog.shape[0] - self.nnz_params)
class MultinomialResults(DiscreteResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"A results class for multinomial data", "extra_attr" : ""}
def __init__(self, model, mlefit):
super(MultinomialResults, self).__init__(model, mlefit)
self.J = model.J
self.K = model.K
@staticmethod
def _maybe_convert_ynames_int(ynames):
# see if they're integers
issue_warning = False
msg = ('endog contains values are that not int-like. Uses string '
'representation of value. Use integer-valued endog to '
'suppress this warning.')
for i in ynames:
try:
if ynames[i] % 1 == 0:
ynames[i] = str(int(ynames[i]))
else:
issue_warning = True
ynames[i] = str(ynames[i])
except TypeError:
ynames[i] = str(ynames[i])
if issue_warning:
warnings.warn(msg, SpecificationWarning)
return ynames
def _get_endog_name(self, yname, yname_list, all=False):
"""
If all is False, the first variable name is dropped
"""
model = self.model
if yname is None:
yname = model.endog_names
if yname_list is None:
ynames = model._ynames_map
ynames = self._maybe_convert_ynames_int(ynames)
# use range below to ensure sortedness
ynames = [ynames[key] for key in range(int(model.J))]
ynames = ['='.join([yname, name]) for name in ynames]
if not all:
yname_list = ynames[1:] # assumes first variable is dropped
else:
yname_list = ynames
return yname, yname_list
def pred_table(self):
"""
Returns the J x J prediction table.
Notes
-----
pred_table[i,j] refers to the number of times "i" was observed and
the model predicted "j". Correct predictions are along the diagonal.
"""
ju = self.model.J - 1 # highest index
# these are the actual, predicted indices
#idx = lzip(self.model.endog, self.predict().argmax(1))
bins = np.concatenate(([0], np.linspace(0.5, ju - 0.5, ju), [ju]))
return np.histogram2d(self.model.endog, self.predict().argmax(1),
bins=bins)[0]
@cache_readonly
def bse(self):
bse = np.sqrt(np.diag(self.cov_params()))
return bse.reshape(self.params.shape, order='F')
@cache_readonly
def aic(self):
return -2*(self.llf - (self.df_model+self.model.J-1))
@cache_readonly
def bic(self):
return -2*self.llf + np.log(self.nobs)*(self.df_model+self.model.J-1)
def conf_int(self, alpha=.05, cols=None):
confint = super(DiscreteResults, self).conf_int(alpha=alpha,
cols=cols)
return confint.transpose(2,0,1)
def get_prediction(self):
"""Not implemented for Multinomial
"""
raise NotImplementedError
def margeff(self):
raise NotImplementedError("Use get_margeff instead")
@cache_readonly
def resid_misclassified(self):
"""
Residuals indicating which observations are misclassified.
Notes
-----
The residuals for the multinomial model are defined as
.. math:: argmax(y_i) \\neq argmax(p_i)
where :math:`argmax(y_i)` is the index of the category for the
endogenous variable and :math:`argmax(p_i)` is the index of the
predicted probabilities for each category. That is, the residual
is a binary indicator that is 0 if the category with the highest
predicted probability is the same as that of the observed variable
and 1 otherwise.
"""
# it's 0 or 1 - 0 for correct prediction and 1 for a missed one
return (self.model.wendog.argmax(1) !=
self.predict().argmax(1)).astype(float)
def summary2(self, alpha=0.05, float_format="%.4f"):
"""Experimental function to summarize regression results
Parameters
----------
alpha : float
significance level for the confidence intervals
float_format : str
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_dict(summary2.summary_model(self))
# One data frame per value of endog
eqn = self.params.shape[1]
confint = self.conf_int(alpha)
for i in range(eqn):
coefs = summary2.summary_params((self, self.params[:, i],
self.bse[:, i],
self.tvalues[:, i],
self.pvalues[:, i],
confint[i]),
alpha=alpha)
# Header must show value of endog
level_str = self.model.endog_names + ' = ' + str(i)
coefs[level_str] = coefs.index
coefs = coefs.iloc[:, [-1, 0, 1, 2, 3, 4, 5]]
smry.add_df(coefs, index=False, header=True,
float_format=float_format)
smry.add_title(results=self)
return smry
class L1MultinomialResults(MultinomialResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"A results class for multinomial data fit by l1 regularization",
"extra_attr" : _l1_results_attr}
def __init__(self, model, mlefit):
super(L1MultinomialResults, self).__init__(model, mlefit)
# self.trimmed is a boolean array with T/F telling whether or not that
# entry in params has been set zero'd out.
self.trimmed = mlefit.mle_retvals['trimmed']
self.nnz_params = (~self.trimmed).sum()
# Note: J-1 constants
self.df_model = self.nnz_params - (self.model.J - 1)
self.df_resid = float(self.model.endog.shape[0] - self.nnz_params)
#### Results Wrappers ####
class OrderedResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(OrderedResultsWrapper, OrderedResults)
class CountResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(CountResultsWrapper, CountResults)
class NegativeBinomialResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(NegativeBinomialResultsWrapper,
NegativeBinomialResults)
class NegativeBinomialPResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(NegativeBinomialPResultsWrapper,
NegativeBinomialPResults)
class GeneralizedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(GeneralizedPoissonResultsWrapper,
GeneralizedPoissonResults)
class PoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(PoissonResultsWrapper, PoissonResults)
class L1CountResultsWrapper(lm.RegressionResultsWrapper):
pass
class L1PoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1PoissonResultsWrapper, L1PoissonResults)
class L1NegativeBinomialResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1NegativeBinomialResultsWrapper,
L1NegativeBinomialResults)
class L1GeneralizedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1GeneralizedPoissonResultsWrapper,
L1GeneralizedPoissonResults)
class BinaryResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {"resid_dev": "rows",
"resid_generalized": "rows",
"resid_pearson": "rows",
"resid_response": "rows"
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(BinaryResultsWrapper, BinaryResults)
class L1BinaryResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1BinaryResultsWrapper, L1BinaryResults)
class MultinomialResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {"resid_misclassified": "rows"}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
_methods = {'conf_int': 'multivariate_confint'}
_wrap_methods = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(MultinomialResultsWrapper, MultinomialResults)
class L1MultinomialResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1MultinomialResultsWrapper, L1MultinomialResults)
| bsd-3-clause | 3fe921ab2960276cd2b862ad44204351 | 34.259622 | 401 | 0.541094 | 3.948222 | false | false | false | false |
statsmodels/statsmodels | statsmodels/tsa/ardl/pss_critical_values.py | 3 | 22444 | #!/usr/bin/env python
# coding: utf-8
"""
Critical value polynomials and related quantities for the bounds test of
Pesaran, M. H., Shin, Y., & Smith, R. J. (2001). Bounds testing approaches
to the analysis of level relationships. Journal of applied econometrics,
16(3), 289-326.
These were computed using 32,000,000 simulations for each key using the
methodology of PSS, who only used 40,000. The asymptotic P-value response
functions were computed based on the simulated value. Critical values
are the point estimates for the respective quantiles. The simulation code
is contained in pss.py. The output files from this function are then
transformed using pss-process.py.
The format of the keys are (k, case, I1) where
* k is is the number of x variables included in the model (0 is an ADF)
* case is 1, 2, 3, 4 or 5 and corresponds to the PSS paper
* I1 is True if X contains I1 variables and False if X is stationary
The parameters are for polynomials of order 3 (large) or 2 (small).
stat_star is the value where the switch between large and small occurs.
Stat values less then stat_star use large_p, while values above use
small_p. In all cases the stat is logged prior to computing the p-value
so that the p-value is
1 - Phi(c[0] + c[1] * x + c[2] * x**2 + c[3] * x**3)
where x = np.log(stat) and Phi() is the normal cdf.
When this the models, the polynomial is evaluated at the natural log of the
test statistic and then the normal CDF of this value is computed to produce
the p-value.
"""
__all__ = ["large_p", "small_p", "crit_vals", "crit_percentiles", "stat_star"]
large_p = {
(1, 1, False): [0.2231, 0.91426, 0.10102, 0.00569],
(1, 1, True): [-0.21766, 0.85933, 0.10411, 0.00661],
(1, 2, False): [-0.60796, 1.48713, 0.15076, 0.04453],
(1, 2, True): [-0.96204, 1.52593, 0.15996, 0.04166],
(1, 3, False): [-0.62883, 0.78991, 0.1, 0.00693],
(1, 3, True): [-0.91895, 0.82086, 0.12921, 0.01076],
(1, 4, False): [-1.50546, 1.79052, 0.05488, 0.06801],
(1, 4, True): [-1.79654, 1.8048, 0.06573, 0.06768],
(1, 5, False): [-1.36367, 0.94126, 0.21556, 0.02473],
(1, 5, True): [-1.60554, 0.93305, 0.2422, 0.03241],
(2, 1, False): [0.20576, 1.18914, 0.15731, 0.01144],
(2, 1, True): [-0.49024, 1.16958, 0.20564, 0.02008],
(2, 2, False): [-0.51799, 1.6368, 0.18955, 0.04317],
(2, 2, True): [-1.13394, 1.71056, 0.20442, 0.04195],
(2, 3, False): [-0.51712, 1.12963, 0.18936, 0.01808],
(2, 3, True): [-1.07441, 1.14964, 0.26066, 0.03338],
(2, 4, False): [-1.29895, 1.88501, 0.11734, 0.06615],
(2, 4, True): [-1.82455, 1.92207, 0.13753, 0.06269],
(2, 5, False): [-1.22263, 1.23208, 0.31401, 0.04495],
(2, 5, True): [-1.67689, 1.17567, 0.33606, 0.05898],
(3, 1, False): [0.1826, 1.39275, 0.19774, 0.01647],
(3, 1, True): [-0.71889, 1.39726, 0.29712, 0.03794],
(3, 2, False): [-0.45864, 1.77632, 0.22125, 0.04372],
(3, 2, True): [-1.28619, 1.88107, 0.23969, 0.04414],
(3, 3, False): [-0.45093, 1.38824, 0.26556, 0.03063],
(3, 3, True): [-1.22712, 1.36564, 0.34942, 0.05555],
(3, 4, False): [-1.15886, 1.99182, 0.16358, 0.06392],
(3, 4, True): [-1.88388, 2.05362, 0.18349, 0.06501],
(3, 5, False): [-1.11221, 1.44327, 0.3547, 0.05263],
(3, 5, True): [-1.75354, 1.37461, 0.3882, 0.07239],
(4, 1, False): [0.16431, 1.56391, 0.22944, 0.02067],
(4, 1, True): [-0.90799, 1.56908, 0.34763, 0.04814],
(4, 2, False): [-0.41568, 1.90715, 0.24783, 0.04407],
(4, 2, True): [-1.42373, 2.03902, 0.26907, 0.04755],
(4, 3, False): [-0.41104, 1.5716, 0.3066, 0.03842],
(4, 3, True): [-1.36194, 1.54043, 0.40145, 0.06846],
(4, 4, False): [-1.05651, 2.10007, 0.20201, 0.06129],
(4, 4, True): [-1.95474, 2.18305, 0.22527, 0.06441],
(4, 5, False): [-1.02502, 1.62605, 0.38203, 0.05565],
(4, 5, True): [-1.83458, 1.555, 0.42888, 0.07459],
(5, 1, False): [0.15015, 1.71718, 0.2584, 0.02507],
(5, 1, True): [-1.0707, 1.72829, 0.39037, 0.05468],
(5, 2, False): [-0.38277, 2.02985, 0.27139, 0.04513],
(5, 2, True): [-1.54974, 2.18631, 0.29592, 0.04967],
(5, 3, False): [-0.38023, 1.72586, 0.33033, 0.04188],
(5, 3, True): [-1.48415, 1.70271, 0.44016, 0.07248],
(5, 4, False): [-0.97676, 2.20429, 0.23233, 0.06543],
(5, 4, True): [-2.03144, 2.31343, 0.25394, 0.0675],
(5, 5, False): [-0.95421, 1.78775, 0.40239, 0.05642],
(5, 5, True): [-1.91679, 1.72031, 0.46434, 0.06641],
(6, 1, False): [0.13913, 1.8581, 0.28528, 0.02931],
(6, 1, True): [-1.21438, 1.87638, 0.42416, 0.05485],
(6, 2, False): [-0.35664, 2.14606, 0.29484, 0.04728],
(6, 2, True): [-1.66532, 2.32448, 0.31723, 0.05528],
(6, 3, False): [-0.35498, 1.86634, 0.35087, 0.04455],
(6, 3, True): [-1.59785, 1.85278, 0.47304, 0.07114],
(6, 4, False): [-0.91274, 2.30752, 0.26053, 0.0644],
(6, 4, True): [-2.10956, 2.43721, 0.2852, 0.06694],
(6, 5, False): [-0.89553, 1.9318, 0.41381, 0.05292],
(6, 5, True): [-1.99931, 1.87789, 0.49842, 0.04135],
(7, 1, False): [0.12974, 1.98503, 0.30606, 0.03218],
(7, 1, True): [-1.34555, 2.01647, 0.45456, 0.05018],
(7, 2, False): [-0.33519, 2.25631, 0.31659, 0.05016],
(7, 2, True): [-1.77496, 2.45806, 0.3372, 0.05741],
(7, 3, False): [-0.33377, 1.99554, 0.36742, 0.04624],
(7, 3, True): [-1.70381, 1.99863, 0.49883, 0.05092],
(7, 4, False): [-0.8596, 2.40762, 0.28334, 0.06401],
(7, 4, True): [-2.18704, 2.55828, 0.30627, 0.07091],
(7, 5, False): [-0.84606, 2.06291, 0.42505, 0.05152],
(7, 5, True): [-2.08097, 2.02139, 0.5348, 0.02343],
(8, 1, False): [0.12244, 2.10698, 0.32849, 0.03596],
(8, 1, True): [-1.46632, 2.1505, 0.48168, 0.04116],
(8, 2, False): [-0.31707, 2.36107, 0.33198, 0.04953],
(8, 2, True): [-1.87722, 2.58105, 0.35963, 0.05848],
(8, 3, False): [-0.31629, 2.11679, 0.38514, 0.04868],
(8, 3, True): [-1.80483, 2.13412, 0.52935, 0.03618],
(8, 4, False): [-0.81509, 2.50518, 0.30456, 0.06388],
(8, 4, True): [-2.26501, 2.67227, 0.33843, 0.06554],
(8, 5, False): [-0.80333, 2.18457, 0.42995, 0.0463],
(8, 5, True): [-2.16125, 2.15208, 0.58319, 0.0],
(9, 1, False): [0.11562, 2.22037, 0.34907, 0.03968],
(9, 1, True): [-1.57878, 2.27626, 0.5124, 0.03164],
(9, 2, False): [-0.30188, 2.46235, 0.35132, 0.05209],
(9, 2, True): [-1.97465, 2.70256, 0.37466, 0.06205],
(9, 3, False): [-0.30097, 2.23118, 0.39976, 0.05001],
(9, 3, True): [-1.90164, 2.26261, 0.56431, 0.0175],
(9, 4, False): [-0.77664, 2.59712, 0.32618, 0.06452],
(9, 4, True): [-2.33996, 2.78253, 0.36072, 0.06644],
(9, 5, False): [-0.76631, 2.2987, 0.43834, 0.04274],
(9, 5, True): [-2.23753, 2.27521, 0.60763, 0.0],
(10, 1, False): [0.10995, 2.3278, 0.36567, 0.04153],
(10, 1, True): [-1.6849, 2.39419, 0.5433, 0.02457],
(10, 2, False): [-0.28847, 2.55819, 0.36959, 0.05499],
(10, 2, True): [-2.06725, 2.81756, 0.38761, 0.0676],
(10, 3, False): [-0.28748, 2.33948, 0.41398, 0.05101],
(10, 3, True): [-1.99259, 2.38061, 0.59433, 0.01114],
(10, 4, False): [-0.74317, 2.68624, 0.345, 0.07032],
(10, 4, True): [-2.41409, 2.8931, 0.37487, 0.07102],
(10, 5, False): [-0.73464, 2.40692, 0.45153, 0.0434],
(10, 5, True): [-2.31364, 2.39092, 0.64313, -0.01012],
}
small_p = {
(1, 1, False): [0.2585, 0.92944, 0.25921],
(1, 1, True): [-0.17399, 0.88425, 0.29947],
(1, 2, False): [-0.45787, 1.15813, 0.37268],
(1, 2, True): [-0.76388, 1.13438, 0.39908],
(1, 3, False): [-0.57887, 0.87657, 0.32929],
(1, 3, True): [-0.88284, 0.81513, 0.366],
(1, 4, False): [-1.1926, 1.21061, 0.40386],
(1, 4, True): [-1.42909, 1.16607, 0.42899],
(1, 5, False): [-1.34428, 0.8756, 0.37809],
(1, 5, True): [-1.56285, 0.80464, 0.40703],
(2, 1, False): [0.23004, 1.12045, 0.31791],
(2, 1, True): [-0.45371, 1.06577, 0.38144],
(2, 2, False): [-0.41191, 1.36838, 0.39668],
(2, 2, True): [-0.9488, 1.32707, 0.44808],
(2, 3, False): [-0.49166, 1.11266, 0.36824],
(2, 3, True): [-1.03636, 1.04019, 0.42589],
(2, 4, False): [-1.08188, 1.42797, 0.42653],
(2, 4, True): [-1.52152, 1.36, 0.47256],
(2, 5, False): [-1.12408, 1.0565, 0.43505],
(2, 5, True): [-1.58614, 1.01208, 0.46796],
(3, 1, False): [0.20945, 1.29304, 0.36292],
(3, 1, True): [-0.60112, 1.139, 0.47837],
(3, 2, False): [-0.37491, 1.53959, 0.42397],
(3, 2, True): [-1.11163, 1.50639, 0.48662],
(3, 3, False): [-0.41411, 1.27093, 0.41524],
(3, 3, True): [-1.14285, 1.18673, 0.4906],
(3, 4, False): [-0.9946, 1.60793, 0.44771],
(3, 4, True): [-1.62609, 1.54566, 0.50619],
(3, 5, False): [-1.04988, 1.31372, 0.44802],
(3, 5, True): [-1.68976, 1.25316, 0.49896],
(4, 1, False): [0.18839, 1.46484, 0.39125],
(4, 1, True): [-0.81822, 1.35949, 0.50619],
(4, 2, False): [-0.35123, 1.705, 0.44075],
(4, 2, True): [-1.2591, 1.67286, 0.52021],
(4, 3, False): [-0.34716, 1.39436, 0.46391],
(4, 3, True): [-1.30728, 1.41428, 0.51292],
(4, 4, False): [-0.92783, 1.77056, 0.46587],
(4, 4, True): [-1.71493, 1.69609, 0.54221],
(4, 5, False): [-0.97468, 1.50704, 0.46661],
(4, 5, True): [-1.7783, 1.4453, 0.53112],
(5, 1, False): [0.17584, 1.60806, 0.424],
(5, 1, True): [-1.00705, 1.5668, 0.52487],
(5, 2, False): [-0.32186, 1.82909, 0.47183],
(5, 2, True): [-1.39492, 1.83145, 0.54756],
(5, 3, False): [-0.32204, 1.55407, 0.4884],
(5, 3, True): [-1.43499, 1.58772, 0.54359],
(5, 4, False): [-0.87005, 1.9128, 0.48361],
(5, 4, True): [-1.81929, 1.8594, 0.56629],
(5, 5, False): [-0.91534, 1.6826, 0.47972],
(5, 5, True): [-1.86297, 1.61238, 0.56196],
(6, 1, False): [0.16642, 1.7409, 0.45235],
(6, 1, True): [-1.15641, 1.72534, 0.55469],
(6, 2, False): [-0.31023, 1.97806, 0.47892],
(6, 2, True): [-1.52248, 1.98657, 0.56855],
(6, 3, False): [-0.30333, 1.70462, 0.50703],
(6, 3, True): [-1.5521, 1.74539, 0.57191],
(6, 4, False): [-0.82345, 2.04624, 0.50026],
(6, 4, True): [-1.90659, 1.99476, 0.59394],
(6, 5, False): [-0.85675, 1.81838, 0.50387],
(6, 5, True): [-1.92708, 1.73629, 0.60069],
(7, 1, False): [0.15013, 1.88779, 0.46397],
(7, 1, True): [-1.28169, 1.85521, 0.58877],
(7, 2, False): [-0.2904, 2.09042, 0.50233],
(7, 2, True): [-1.62626, 2.10378, 0.6013],
(7, 3, False): [-0.29138, 1.8506, 0.52083],
(7, 3, True): [-1.64831, 1.87115, 0.60523],
(7, 4, False): [-0.78647, 2.1757, 0.51247],
(7, 4, True): [-1.98344, 2.10977, 0.62411],
(7, 5, False): [-0.81099, 1.95374, 0.51949],
(7, 5, True): [-1.99875, 1.86512, 0.63051],
(8, 1, False): [0.14342, 2.00691, 0.48514],
(8, 1, True): [-1.3933, 1.97361, 0.62074],
(8, 2, False): [-0.27952, 2.20983, 0.51721],
(8, 2, True): [-1.74485, 2.25435, 0.61354],
(8, 3, False): [-0.28049, 1.98611, 0.53286],
(8, 3, True): [-1.74116, 1.99245, 0.63511],
(8, 4, False): [-0.74797, 2.28202, 0.53356],
(8, 4, True): [-2.07764, 2.25027, 0.64023],
(8, 5, False): [-0.76505, 2.06317, 0.54393],
(8, 5, True): [-2.04872, 1.95334, 0.67177],
(9, 1, False): [0.13505, 2.12341, 0.50439],
(9, 1, True): [-1.49339, 2.07805, 0.65464],
(9, 2, False): [-0.26881, 2.32256, 0.53025],
(9, 2, True): [-1.82677, 2.34223, 0.65004],
(9, 3, False): [-0.26657, 2.09906, 0.55384],
(9, 3, True): [-1.80085, 2.06043, 0.68234],
(9, 4, False): [-0.71672, 2.38896, 0.54931],
(9, 4, True): [-2.17306, 2.39146, 0.65252],
(9, 5, False): [-0.70907, 2.13027, 0.58668],
(9, 5, True): [-2.14411, 2.10595, 0.68478],
(10, 1, False): [0.12664, 2.23871, 0.51771],
(10, 1, True): [-1.59784, 2.19509, 0.67874],
(10, 2, False): [-0.25969, 2.4312, 0.54096],
(10, 2, True): [-1.93843, 2.48708, 0.65741],
(10, 3, False): [-0.25694, 2.21617, 0.56619],
(10, 3, True): [-1.89772, 2.1894, 0.70143],
(10, 4, False): [-0.69126, 2.49776, 0.5583],
(10, 4, True): [-2.24685, 2.4968, 0.67598],
(10, 5, False): [-0.6971, 2.28206, 0.57816],
(10, 5, True): [-2.21015, 2.208, 0.71379],
}
stat_star = {
(1, 1, False): 0.855423425047013,
(1, 1, True): 0.9074438436193457,
(1, 2, False): 2.3148213273461034,
(1, 2, True): 2.727010046970744,
(1, 3, False): 0.846390593107207,
(1, 3, True): 1.157556027201022,
(1, 4, False): 3.220377136548005,
(1, 4, True): 3.6108265020012418,
(1, 5, False): 1.7114703606421378,
(1, 5, True): 2.066325210881278,
(2, 1, False): 1.1268996107665314,
(2, 1, True): 1.3332514927355072,
(2, 2, False): 2.0512213167246456,
(2, 2, True): 2.656191837644102,
(2, 3, False): 1.058908331354388,
(2, 3, True): 1.5313322825819844,
(2, 4, False): 2.7213091542989725,
(2, 4, True): 3.2984645209852856,
(2, 5, False): 2.6006009671146497,
(2, 5, True): 2.661856653261213,
(3, 1, False): 1.263159095916295,
(3, 1, True): 2.4151349732452863,
(3, 2, False): 1.8886043232371843,
(3, 2, True): 2.6028096820968405,
(3, 3, False): 1.4879903191884682,
(3, 3, True): 2.2926969339773926,
(3, 4, False): 2.418527659154858,
(3, 4, True): 3.1039322592065988,
(3, 5, False): 1.9523612040944802,
(3, 5, True): 2.2115727453490757,
(4, 1, False): 1.290890114741129,
(4, 1, True): 2.1296963408410905,
(4, 2, False): 1.7770902061605607,
(4, 2, True): 2.5611885327765402,
(4, 3, False): 1.9340163095801728,
(4, 3, True): 1.9141318638062572,
(4, 4, False): 2.2146739201335466,
(4, 4, True): 2.9701790485477932,
(4, 5, False): 1.7408452994169448,
(4, 5, True): 2.1047247176583914,
(5, 1, False): 1.336967174239227,
(5, 1, True): 1.9131415178585627,
(5, 2, False): 1.6953274259688569,
(5, 2, True): 2.52745981091846,
(5, 3, False): 1.8124340908468068,
(5, 3, True): 1.8520883187848405,
(5, 4, False): 2.0675009559739297,
(5, 4, True): 2.8728076833515552,
(5, 5, False): 1.5978968362839456,
(5, 5, True): 2.1017517002543418,
(6, 1, False): 1.3810422398306446,
(6, 1, True): 1.8993612909227247,
(6, 2, False): 1.6324374150719114,
(6, 2, True): 2.498801004400209,
(6, 3, False): 1.72340094901749,
(6, 3, True): 1.8586513178563737,
(6, 4, False): 1.955819927102859,
(6, 4, True): 2.797145060481245,
(6, 5, False): 1.578613967104358,
(6, 5, True): 2.356249534336445,
(7, 1, False): 1.319436681229134,
(7, 1, True): 1.9955849619883248,
(7, 2, False): 1.5822190052675569,
(7, 2, True): 2.4744987764453055,
(7, 3, False): 1.65578510076754,
(7, 3, True): 2.046536484369615,
(7, 4, False): 1.8684573094851133,
(7, 4, True): 2.737241392502754,
(7, 5, False): 1.571855677342554,
(7, 5, True): 2.6006325210258505,
(8, 1, False): 1.3413558170956845,
(8, 1, True): 2.182981174661154,
(8, 2, False): 1.5416965902808288,
(8, 2, True): 2.4538471213095594,
(8, 3, False): 1.6021238307647196,
(8, 3, True): 2.2031866832480778,
(8, 4, False): 1.797595752125897,
(8, 4, True): 2.688099837236925,
(8, 5, False): 1.6561231184668357,
(8, 5, True): 2.883361281576836,
(9, 1, False): 1.3260368480749927,
(9, 1, True): 2.359689612641543,
(9, 2, False): 1.5074890058192492,
(9, 2, True): 2.435592395931648,
(9, 3, False): 1.5584090417965821,
(9, 3, True): 2.586293446202391,
(9, 4, False): 1.7393454428092985,
(9, 4, True): 2.6470908946956655,
(9, 5, False): 1.8180517504983742,
(9, 5, True): 2.818161371392247,
(10, 1, False): 1.3126519241806318,
(10, 1, True): 2.3499432601613885,
(10, 2, False): 1.4785447632683744,
(10, 2, True): 2.4199239298786215,
(10, 3, False): 1.5219767684407846,
(10, 3, True): 2.55484741648857,
(10, 4, False): 1.6902675233415512,
(10, 4, True): 2.6119272436084637,
(10, 5, False): 1.7372865030759366,
(10, 5, True): 2.7644864472524904,
}
crit_percentiles = (90, 95, 99, 99.9)
crit_vals = {
(1, 1, False): [2.4170317, 3.119659, 4.7510799, 7.0838335],
(1, 1, True): [3.2538509, 4.0643748, 5.8825257, 8.4189144],
(1, 2, False): [3.0235968, 3.6115364, 4.9094056, 6.6859696],
(1, 2, True): [3.4943406, 4.1231394, 5.4961076, 7.3531815],
(1, 3, False): [4.044319, 4.9228967, 6.8609106, 9.5203666],
(1, 3, True): [4.7771822, 5.7217442, 7.7821227, 10.557471],
(1, 4, False): [4.0317707, 4.6921341, 6.1259225, 8.0467248],
(1, 4, True): [4.4725009, 5.169214, 6.668854, 8.6632132],
(1, 5, False): [5.5958071, 6.586727, 8.7355157, 11.6171903],
(1, 5, True): [6.2656898, 7.3133165, 9.5652229, 12.5537707],
(2, 1, False): [2.1562308, 2.6846692, 3.8773621, 5.5425892],
(2, 1, True): [3.1684785, 3.8003954, 5.177742, 7.0453814],
(2, 2, False): [2.6273503, 3.0998243, 4.1327001, 5.528847],
(2, 2, True): [3.3084134, 3.8345125, 4.9642009, 6.4657839],
(2, 3, False): [3.1741284, 3.8022629, 5.1722882, 7.0241224],
(2, 3, True): [4.108262, 4.8116858, 6.3220548, 8.322478],
(2, 4, False): [3.3668869, 3.8887628, 5.0115801, 6.5052326],
(2, 4, True): [4.0126604, 4.5835675, 5.7968684, 7.3887863],
(2, 5, False): [4.1863149, 4.8834936, 6.3813095, 8.3781415],
(2, 5, True): [5.053508, 5.8168869, 7.4384998, 9.565425],
(3, 1, False): [1.998571, 2.4316514, 3.3919322, 4.709226],
(3, 1, True): [3.0729965, 3.6016775, 4.7371358, 6.2398661],
(3, 2, False): [2.3813866, 2.7820412, 3.6486786, 4.8089784],
(3, 2, True): [3.1778198, 3.6364094, 4.6114583, 5.8888408],
(3, 3, False): [2.7295224, 3.2290217, 4.3110408, 5.7599206],
(3, 3, True): [3.7471556, 4.3222818, 5.5425521, 7.1435458],
(3, 4, False): [2.9636218, 3.4007434, 4.3358236, 5.5729155],
(3, 4, True): [3.7234883, 4.2135706, 5.247283, 6.5911207],
(3, 5, False): [3.4742551, 4.0219835, 5.1911046, 6.7348191],
(3, 5, True): [4.4323554, 5.0480574, 6.3448127, 8.0277313],
(4, 1, False): [1.8897829, 2.2616928, 3.0771215, 4.1837434],
(4, 1, True): [2.9925753, 3.4545032, 4.4326745, 5.7123835],
(4, 2, False): [2.2123295, 2.5633388, 3.3177874, 4.321218],
(4, 2, True): [3.0796353, 3.4898084, 4.3536497, 5.4747288],
(4, 3, False): [2.4565534, 2.877209, 3.7798528, 4.9852682],
(4, 3, True): [3.516144, 4.0104999, 5.0504684, 6.4022435],
(4, 4, False): [2.6902225, 3.0699099, 3.877333, 4.9405835],
(4, 4, True): [3.5231152, 3.9578931, 4.867071, 6.0403311],
(4, 5, False): [3.0443998, 3.5009718, 4.4707539, 5.7457746],
(4, 5, True): [4.0501255, 4.5739556, 5.6686684, 7.0814031],
(5, 1, False): [1.8104326, 2.1394999, 2.8541086, 3.8114409],
(5, 1, True): [2.9267613, 3.3396521, 4.2078599, 5.3342038],
(5, 2, False): [2.0879588, 2.40264, 3.0748083, 3.9596152],
(5, 2, True): [3.002768, 3.3764374, 4.1585099, 5.1657752],
(5, 3, False): [2.2702787, 2.6369717, 3.4203738, 4.4521021],
(5, 3, True): [3.3535243, 3.7914038, 4.7060983, 5.8841151],
(5, 4, False): [2.4928973, 2.831033, 3.5478855, 4.4836677],
(5, 4, True): [3.3756681, 3.7687148, 4.587147, 5.6351487],
(5, 5, False): [2.7536425, 3.149282, 3.985975, 5.0799181],
(5, 5, True): [3.7890425, 4.2501858, 5.2074857, 6.4355821],
(6, 1, False): [1.7483313, 2.0453753, 2.685931, 3.5375009],
(6, 1, True): [2.8719403, 3.2474515, 4.0322637, 5.0451946],
(6, 2, False): [1.9922451, 2.2792144, 2.8891314, 3.690865],
(6, 2, True): [2.9399824, 3.2851357, 4.0031551, 4.9247226],
(6, 3, False): [2.1343676, 2.4620175, 3.1585901, 4.0720179],
(6, 3, True): [3.2311014, 3.6271964, 4.4502999, 5.5018575],
(6, 4, False): [2.3423792, 2.6488947, 3.2947623, 4.1354724],
(6, 4, True): [3.2610813, 3.6218989, 4.3702232, 5.3232767],
(6, 5, False): [2.5446232, 2.8951601, 3.633989, 4.5935586],
(6, 5, True): [3.5984454, 4.0134462, 4.8709448, 5.9622726],
(7, 1, False): [1.6985327, 1.9707636, 2.5536649, 3.3259272],
(7, 1, True): [2.825928, 3.1725169, 3.8932738, 4.8134085],
(7, 2, False): [1.9155946, 2.1802812, 2.7408759, 3.4710326],
(7, 2, True): [2.8879427, 3.2093335, 3.8753322, 4.724748],
(7, 3, False): [2.0305429, 2.3281704, 2.9569345, 3.7788337],
(7, 3, True): [3.136325, 3.4999128, 4.2519893, 5.2075305],
(7, 4, False): [2.2246175, 2.5055486, 3.0962182, 3.86164],
(7, 4, True): [3.1695552, 3.5051856, 4.1974421, 5.073436],
(7, 5, False): [2.3861201, 2.7031072, 3.3680435, 4.2305443],
(7, 5, True): [3.4533491, 3.8323234, 4.613939, 5.6044399],
(8, 1, False): [1.6569223, 1.9092423, 2.4470718, 3.1537838],
(8, 1, True): [2.7862884, 3.1097259, 3.7785302, 4.6293176],
(8, 2, False): [1.8532862, 2.0996872, 2.6186041, 3.2930359],
(8, 2, True): [2.8435812, 3.1459955, 3.769165, 4.5623681],
(8, 3, False): [1.9480198, 2.2215083, 2.7979659, 3.54771],
(8, 3, True): [3.0595184, 3.3969531, 4.0923089, 4.9739178],
(8, 4, False): [2.1289147, 2.3893773, 2.9340882, 3.6390988],
(8, 4, True): [3.094188, 3.4085297, 4.0545165, 4.8699787],
(8, 5, False): [2.2616596, 2.5515168, 3.1586476, 3.9422645],
(8, 5, True): [3.3374076, 3.6880139, 4.407457, 5.3152095],
(9, 1, False): [1.6224492, 1.8578787, 2.3580077, 3.0112501],
(9, 1, True): [2.7520721, 3.0557346, 3.6811682, 4.4739536],
(9, 2, False): [1.8008993, 2.0320841, 2.5170871, 3.1451424],
(9, 2, True): [2.8053707, 3.091422, 3.6784683, 4.4205306],
(9, 3, False): [1.8811231, 2.1353897, 2.6683796, 3.358463],
(9, 3, True): [2.9957112, 3.3114482, 3.9596061, 4.7754473],
(9, 4, False): [2.0498497, 2.2930641, 2.8018384, 3.4543646],
(9, 4, True): [3.0308611, 3.3269185, 3.9347618, 4.6993614],
(9, 5, False): [2.1610306, 2.4296727, 2.98963, 3.7067719],
(9, 5, True): [3.2429533, 3.5699095, 4.2401975, 5.0823119],
(10, 1, False): [1.5927907, 1.8145253, 2.2828013, 2.8927966],
(10, 1, True): [2.7222721, 3.009471, 3.5990544, 4.3432975],
(10, 2, False): [1.756145, 1.9744492, 2.4313123, 3.0218681],
(10, 2, True): [2.7724339, 3.0440412, 3.6004793, 4.3015151],
(10, 3, False): [1.8248841, 2.0628201, 2.5606728, 3.2029316],
(10, 3, True): [2.9416094, 3.239357, 3.8484916, 4.6144906],
(10, 4, False): [1.9833587, 2.2124939, 2.690228, 3.3020807],
(10, 4, True): [2.9767752, 3.2574924, 3.8317161, 4.5512138],
(10, 5, False): [2.0779589, 2.3285481, 2.8499681, 3.5195753],
(10, 5, True): [3.1649384, 3.4725945, 4.1003673, 4.8879723],
}
| bsd-3-clause | bb4764ff697ab2f46e9113d1fc021e69 | 48.545254 | 78 | 0.569952 | 2.066667 | false | false | false | false |
statsmodels/statsmodels | statsmodels/distributions/bernstein.py | 3 | 7397 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 17 15:35:23 2021
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
from statsmodels.tools.decorators import cache_readonly
from statsmodels.distributions.tools import (
_Grid, cdf2prob_grid, prob2cdf_grid,
_eval_bernstein_dd, _eval_bernstein_2d, _eval_bernstein_1d)
class BernsteinDistribution:
"""Distribution based on Bernstein Polynomials on unit hypercube.
Parameters
----------
cdf_grid : array_like
cdf values on a equal spaced grid of the unit hypercube [0, 1]^d.
The dimension of the arrays define how many random variables are
included in the multivariate distribution.
Attributes
----------
cdf_grid : grid of cdf values
prob_grid : grid of cell or bin probabilities
k_dim : (int) number of components, dimension of random variable
k_grid : (tuple) shape of cdf_grid
k_grid_product : (int) total number of bins in grid
_grid : Grid instance with helper methods and attributes
"""
def __init__(self, cdf_grid):
self.cdf_grid = cdf_grid = np.asarray(cdf_grid)
self.k_dim = cdf_grid.ndim
self.k_grid = cdf_grid.shape
self.k_grid_product = np.product([i-1 for i in self.k_grid])
self._grid = _Grid(self.k_grid)
@classmethod
def from_data(cls, data, k_bins):
"""Create distribution instance from data using histogram binning.
Classmethod to construct a distribution instance.
Parameters
----------
data : array_like
Data with observation in rows and random variables in columns.
Data can be 1-dimensional in the univariate case.
k_bins : int or list
Number or edges of bins to be used in numpy histogramdd.
If k_bins is a scalar int, then the number of bins of each
component will be equal to it.
Returns
-------
Instance of a Bernstein distribution
"""
data = np.asarray(data)
if np.any(data < 0) or np.any(data > 1):
raise ValueError("data needs to be in [0, 1]")
if data.ndim == 1:
data = data[:, None]
k_dim = data.shape[1]
if np.size(k_bins) == 1:
k_bins = [k_bins] * k_dim
bins = [np.linspace(-1 / ni, 1, ni + 2) for ni in k_bins]
c, e = np.histogramdd(data, bins=bins, density=False)
# TODO: check when we have zero observations, which bin?
# check bins start at 0 exept leading bin
assert all([ei[1] == 0 for ei in e])
c /= len(data)
cdf_grid = prob2cdf_grid(c)
return cls(cdf_grid)
@cache_readonly
def prob_grid(self):
return cdf2prob_grid(self.cdf_grid, prepend=None)
def cdf(self, x):
"""cdf values evaluated at x.
Parameters
----------
x : array_like
Points of multivariate random variable at which cdf is evaluated.
This can be a single point with length equal to the dimension of
the random variable, or two dimensional with points (observations)
in rows and random variables in columns.
In the univariate case, a 1-dimensional x will be interpreted as
different points for evaluation.
Returns
-------
pdf values
Notes
-----
Warning: 2-dim x with many points can be memory intensive because
currently the bernstein polynomials will be evaluated in a fully
vectorized computation.
"""
x = np.asarray(x)
if x.ndim == 1 and self.k_dim == 1:
x = x[:, None]
cdf_ = _eval_bernstein_dd(x, self.cdf_grid)
return cdf_
def pdf(self, x):
"""pdf values evaluated at x.
Parameters
----------
x : array_like
Points of multivariate random variable at which pdf is evaluated.
This can be a single point with length equal to the dimension of
the random variable, or two dimensional with points (observations)
in rows and random variables in columns.
In the univariate case, a 1-dimensional x will be interpreted as
different points for evaluation.
Returns
-------
cdf values
Notes
-----
Warning: 2-dim x with many points can be memory intensive because
currently the bernstein polynomials will be evaluated in a fully
vectorized computation.
"""
x = np.asarray(x)
if x.ndim == 1 and self.k_dim == 1:
x = x[:, None]
# TODO: check usage of k_grid_product. Should this go into eval?
pdf_ = self.k_grid_product * _eval_bernstein_dd(x, self.prob_grid)
return pdf_
def get_marginal(self, idx):
"""Get marginal BernsteinDistribution.
Parameters
----------
idx : int or list of int
Index or indices of the component for which the marginal
distribution is returned.
Returns
-------
BernsteinDistribution instance for the marginal distribution.
"""
# univariate
if self.k_dim == 1:
return self
sl = [-1] * self.k_dim
if np.shape(idx) == ():
idx = [idx]
for ii in idx:
sl[ii] = slice(None, None, None)
cdf_m = self.cdf_grid[tuple(sl)]
bpd_marginal = BernsteinDistribution(cdf_m)
return bpd_marginal
def rvs(self, nobs):
"""Generate random numbers from distribution.
Parameters
----------
nobs : int
Number of random observations to generate.
"""
rvs_mnl = np.random.multinomial(nobs, self.prob_grid.flatten())
k_comp = self.k_dim
rvs_m = []
for i in range(len(rvs_mnl)):
if rvs_mnl[i] != 0:
idx = np.unravel_index(i, self.prob_grid.shape)
rvsi = []
for j in range(k_comp):
n = self.k_grid[j]
xgi = self._grid.x_marginal[j][idx[j]]
# Note: x_marginal starts at 0
# x_marginal ends with 1 but that is not used by idx
rvsi.append(stats.beta.rvs(n * xgi + 1, n * (1-xgi) + 0,
size=rvs_mnl[i]))
rvs_m.append(np.column_stack(rvsi))
rvsm = np.concatenate(rvs_m)
return rvsm
class BernsteinDistributionBV(BernsteinDistribution):
def cdf(self, x):
cdf_ = _eval_bernstein_2d(x, self.cdf_grid)
return cdf_
def pdf(self, x):
# TODO: check usage of k_grid_product. Should this go into eval?
pdf_ = self.k_grid_product * _eval_bernstein_2d(x, self.prob_grid)
return pdf_
class BernsteinDistributionUV(BernsteinDistribution):
def cdf(self, x, method="binom"):
cdf_ = _eval_bernstein_1d(x, self.cdf_grid, method=method)
return cdf_
def pdf(self, x, method="binom"):
# TODO: check usage of k_grid_product. Should this go into eval?
pdf_ = self.k_grid_product * _eval_bernstein_1d(x, self.prob_grid,
method=method)
return pdf_
| bsd-3-clause | b602d611ff16ae4994e02a9da133bb92 | 31.585903 | 78 | 0.567122 | 3.96622 | false | false | false | false |
statsmodels/statsmodels | statsmodels/sandbox/examples/thirdparty/findow_1.py | 6 | 2569 | # -*- coding: utf-8 -*-
"""A quick look at volatility of stock returns for 2009
Just an exercise to find my way around the pandas methods.
Shows the daily rate of return, the square of it (volatility) and
a 5 day moving average of the volatility.
No guarantee for correctness.
Assumes no missing values.
colors of lines in graphs are not great
uses DataFrame and WidePanel to hold data downloaded from yahoo using matplotlib.
I have not figured out storage, so the download happens at each run
of the script.
Created on Sat Jan 30 16:30:18 2010
Author: josef-pktd
"""
import os
from statsmodels.compat.python import lzip
import numpy as np
import matplotlib.finance as fin
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pd
def getquotes(symbol, start, end):
# Taken from the no-longer-existent pandas.examples.finance
quotes = fin.quotes_historical_yahoo(symbol, start, end)
dates, open, close, high, low, volume = lzip(*quotes)
data = {
'open' : open,
'close' : close,
'high' : high,
'low' : low,
'volume' : volume
}
dates = pd.Index([dt.datetime.fromordinal(int(d)) for d in dates])
return pd.DataFrame(data, index=dates)
start_date = dt.datetime(2007, 1, 1)
end_date = dt.datetime(2009, 12, 31)
dj30 = ['MMM', 'AA', 'AXP', 'T', 'BAC', 'BA', 'CAT', 'CVX', 'CSCO',
'KO', 'DD', 'XOM', 'GE', 'HPQ', 'HD', 'INTC', 'IBM', 'JNJ',
'JPM', 'KFT', 'MCD', 'MRK', 'MSFT', 'PFE', 'PG', 'TRV',
'UTX', 'VZ', 'WMT', 'DIS']
mysym = ['msft', 'ibm', 'goog']
indexsym = ['gspc', 'dji']
# download data
dmall = {}
for sy in dj30:
dmall[sy] = getquotes(sy, start_date, end_date)
# combine into WidePanel
pawp = pd.WidePanel.fromDict(dmall)
print(pawp.values.shape)
# select closing prices
paclose = pawp.getMinorXS('close')
# take log and first difference over time
paclose_ratereturn = paclose.apply(np.log).diff()
if not os.path.exists('dj30rr'):
#if pandas is updated, then sometimes unpickling fails, and need to save again
paclose_ratereturn.save('dj30rr')
plt.figure()
paclose_ratereturn.plot()
plt.title('daily rate of return')
# square the returns
paclose_ratereturn_vol = paclose_ratereturn.apply(lambda x:np.power(x,2))
plt.figure()
plt.title('volatility (with 5 day moving average')
paclose_ratereturn_vol.plot()
# use convolution to get moving average
paclose_ratereturn_vol_mov = paclose_ratereturn_vol.apply(
lambda x:np.convolve(x,np.ones(5)/5.,'same'))
paclose_ratereturn_vol_mov.plot()
#plt.show()
| bsd-3-clause | bfe8d89aba42efc4661399f06816d792 | 26.623656 | 82 | 0.678474 | 3.036643 | false | false | false | false |
statsmodels/statsmodels | statsmodels/regression/tests/results/results_grunfeld_ols_robust_cluster.py | 6 | 23552 | import numpy as np
from statsmodels.tools.testing import ParamsTableTestBunch
est = dict(
N_clust=10,
N=200,
df_m=2,
df_r=9,
F=51.59060716590177,
r2=.8124080178314147,
rmse=94.40840193979599,
mss=7604093.484267689,
rss=1755850.432294737,
r2_a=.8105035307027997,
ll=-1191.80235741801,
ll_0=-1359.150955647688,
rank=3,
cmdline="regress invest mvalue kstock, vce(cluster company)",
title="Linear regression",
marginsok="XB default",
vce="cluster",
depvar="invest",
cmd="regress",
properties="b V",
predict="regres_p",
model="ols",
estat_cmd="regress_estat",
vcetype="Robust",
clustvar="company",
)
params_table = np.array([
.11556215606596, .01589433647768, 7.2706499090564, .00004710548549,
.07960666895505, .15151764317688, 9, 2.2621571627982,
0, .23067848754982, .08496711097464, 2.7149150406994,
.02380515903536, .03846952885627, .42288744624337, 9,
2.2621571627982, 0, -42.714369016733, 20.425202580078,
-2.0912580352272, .06604843284516, -88.919387334862, 3.4906493013959,
9, 2.2621571627982, 0]).reshape(3, 9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00025262993207, -.00065043385106, .20961897960949, -.00065043385106,
.00721940994738, -1.2171040967615, .20961897960949, -1.2171040967615,
417.18890043724]).reshape(3, 3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_cluster = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N=200,
inexog_ct=2,
exexog_ct=0,
endog_ct=0,
partial_ct=0,
N_clust=10,
df_m=2,
sdofminus=0,
dofminus=0,
r2=.8124080178314146,
rmse=93.69766358599176,
rss=1755850.432294737,
mss=7604093.484267682,
r2_a=.8105035307027995,
F=51.59060716590192,
Fp=.0000117341240941,
Fdf1=2,
Fdf2=9,
yy=13620706.07273678,
yyc=9359943.916562419,
partialcons=0,
cons=1,
jdf=0,
j=0,
ll=-1191.802357418011,
rankV=3,
rankS=3,
rankxx=3,
rankzz=3,
r2c=.8124080178314146,
r2u=.8710896173136538,
clustvar="company",
hacsubtitleV="Statistics robust to heteroskedasticity and clustering on company", # noqa:E501
hacsubtitleB="Estimates efficient for homoskedasticity only",
title="OLS estimation",
predict="ivreg2_p",
version="03.1.07",
cmdline="ivreg2 invest mvalue kstock, cluster(company)",
cmd="ivreg2",
model="ols",
depvar="invest",
vcetype="Robust",
vce="robust cluster",
partialsmall="small",
inexog="mvalue kstock",
insts="mvalue kstock",
properties="b V",
)
params_table = np.array([
.11556215606596, .01500272788516, 7.7027429245215, 1.331761148e-14,
.08615734974119, .14496696239074, np.nan, 1.9599639845401,
0, .23067848754982, .08020079648691, 2.8762618035529,
.00402415789383, .07348781490405, .38786916019559, np.nan,
1.9599639845401, 0, -42.714369016733, 19.27943055305,
-2.2155410088072, .02672295281194, -80.501358543152, -4.9273794903145,
np.nan, 1.9599639845401, 0]).reshape(3, 9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.000225081844, -.00057950714469, .1867610305767, -.00057950714469,
.00643216775713, -1.0843847053056, .1867610305767, -1.0843847053056,
371.69644244987]).reshape(3, 3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_cluster_large = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N=200,
N_g=10,
df_m=2,
df_r=9,
F=97.97910905239282,
r2=.8124080178314147,
rmse=94.40840193979599,
lag=4,
cmd="xtscc",
predict="xtscc_p",
method="Pooled OLS",
depvar="invest",
vcetype="Drisc/Kraay",
title="Regression with Driscoll-Kraay standard errors",
groupvar="company",
properties="b V",
)
params_table = np.array([
.11556215606596, .0134360177573, 8.6009231420662, .00001235433261,
.08516777225681, .14595653987512, 9, 2.2621571627982,
0, .23067848754982, .04930800664089, 4.678317037431,
.00115494570515, .11913602714384, .3422209479558, 9,
2.2621571627982, 0, -42.714369016733, 12.190347184209,
-3.5039501641153, .0066818746948, -70.290850216489, -15.137887816977,
9, 2.2621571627982, 0]).reshape(3, 9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00018052657317, -.00035661054613, -.06728261073866, -.00035661054613,
.0024312795189, -.32394785247278, -.06728261073866, -.32394785247278,
148.60456447156]).reshape(3, 3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_nw_groupsum4 = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
df_m=2,
df_r=197,
F=73.07593045506036,
N=200,
lag=4,
rank=3,
title="Regression with Newey-West standard errors",
cmd="newey",
cmdline="newey invest mvalue kstock, lag(4) force",
estat_cmd="newey_estat",
predict="newey_p",
vcetype="Newey-West",
depvar="invest",
properties="b V",
)
params_table = np.array([
.11556215606596, .01142785251475, 10.112324771147, 1.251631065e-19,
.0930255277205, .13809878441142, 197, 1.9720790337785,
0, .23067848754982, .06842168281423, 3.3714237660029,
.00089998163666, .09574552141602, .36561145368361, 197,
1.9720790337785, 0, -42.714369016733, 16.179042041128,
-2.6401049523298, .00895205094219, -74.620718612662, -10.808019420804,
197, 1.9720790337785, 0]).reshape(3, 9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.0001305958131, -.00022910455176, .00889686530849, -.00022910455176,
.00468152667913, -.88403667445531, .00889686530849, -.88403667445531,
261.76140136858]).reshape(3, 3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_nw_panel4 = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N=200,
inexog_ct=2,
exexog_ct=0,
endog_ct=0,
partial_ct=0,
df_r=9,
N_clust=10,
N_clust1=10,
N_clust2=20,
df_m=2,
sdofminus=0,
dofminus=0,
r2=.8124080178314146,
rmse=94.40840193979601,
rss=1755850.432294737,
mss=7604093.484267682,
r2_a=.8105035307027995,
F=57.99124535923564,
Fp=7.21555935862e-06,
Fdf1=2,
partialcons=0,
cons=1,
jdf=0,
j=0,
ll=-1191.802357418011,
rankV=3,
rankS=3,
rankxx=3,
rankzz=3,
r2c=.8124080178314146,
r2u=.8710896173136538,
yyc=9359943.916562419,
yy=13620706.07273678,
Fdf2=9,
clustvar="company time",
hacsubtitleV="Statistics robust to heteroskedasticity and clustering on company and time", # noqa:E501
hacsubtitleB="Estimates efficient for homoskedasticity only",
title="OLS estimation",
predict="ivreg2_p",
version="03.1.07",
cmdline="ivreg2 invest mvalue kstock, cluster(company time) small",
cmd="ivreg2",
model="ols",
depvar="invest",
vcetype="Robust",
clustvar2="time",
clustvar1="company",
vce="robust two-way cluster",
partialsmall="small",
small="small",
inexog="mvalue kstock",
insts="mvalue kstock",
properties="b V",
)
params_table = np.array([
.11556215606596, .01635175387097, 7.0672636695645, .00005873628221,
.07857191892244, .15255239320949, 9, 2.2621571627982,
0, .23067848754982, .07847391274682, 2.9395563375824,
.01649863150032, .05315816373679, .40819881136285, 9,
2.2621571627982, 0, -42.714369016733, 19.505607409785,
-2.189850750062, .05626393734425, -86.839118533508, 1.4103805000422,
9, 2.2621571627982, 0]).reshape(3, 9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00026737985466, -.00070163493529, .19641438763743, -.00070163493529,
.0061581549818, -.99627581152391, .19641438763743, -.99627581152391,
380.46872042467]).reshape(3, 3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_cluster_2groups_small = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N=200,
inexog_ct=2,
exexog_ct=0,
endog_ct=0,
partial_ct=0,
N_clust=10,
N_clust1=10,
N_clust2=20,
df_m=2,
sdofminus=0,
dofminus=0,
r2=.8124080178314146,
rmse=93.69766358599176,
rss=1755850.432294737,
mss=7604093.484267682,
r2_a=.8105035307027995,
F=57.99124535923565,
Fp=7.21555935862e-06,
Fdf1=2,
Fdf2=9,
partialcons=0,
cons=1,
jdf=0,
j=0,
ll=-1191.802357418011,
rankV=3,
rankS=3,
rankxx=3,
rankzz=3,
r2c=.8124080178314146,
r2u=.8710896173136538,
yyc=9359943.916562419,
yy=13620706.07273678,
clustvar="company time",
hacsubtitleV="Statistics robust to heteroskedasticity and clustering on company and time", # noqa:E501
hacsubtitleB="Estimates efficient for homoskedasticity only",
title="OLS estimation",
predict="ivreg2_p",
version="03.1.07",
cmdline="ivreg2 invest mvalue kstock, cluster(company time)",
cmd="ivreg2",
model="ols",
depvar="invest",
vcetype="Robust",
clustvar2="time",
clustvar1="company",
vce="robust two-way cluster",
partialsmall="small",
inexog="mvalue kstock",
insts="mvalue kstock",
properties="b V",
)
params_table = np.array([
.11556215606596, .01543448599542, 7.487269488613, 7.032121917e-14,
.08531111939505, .14581319273688, np.nan, 1.9599639845401,
0, .23067848754982, .07407184066336, 3.1142534799181,
.00184410987255, .08550034758104, .3758566275186, np.nan,
1.9599639845401, 0, -42.714369016733, 18.411420987265,
-2.319993065515, .02034125246974, -78.800091055978, -6.6286469774879,
np.nan, 1.9599639845401, 0]).reshape(3, 9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00023822335794, -.00062512499511, .17499633632219, -.00062512499511,
.00548663757926, -.88763669036779, .17499633632219, -.88763669036779,
338.98042277032]).reshape(3, 3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_cluster_2groups_large = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N=200,
inexog_ct=2,
exexog_ct=0,
endog_ct=0,
partial_ct=0,
bw=5,
N_clust=20,
df_m=2,
sdofminus=0,
dofminus=0,
r2=.8124080178314146,
rmse=93.69766358599176,
rss=1755850.432294737,
mss=7604093.484267682,
r2_a=.8105035307027995,
F=92.14467466912147,
Fp=1.66368179227e-10,
Fdf1=2,
Fdf2=19,
yy=13620706.07273678,
partialcons=0,
cons=1,
jdf=0,
j=0,
ll=-1191.802357418011,
rankV=3,
rankS=3,
rankxx=3,
rankzz=3,
r2c=.8124080178314146,
r2u=.8710896173136538,
yyc=9359943.916562419,
clustvar="year",
hacsubtitleV2="and kernel-robust to common correlated disturbances (Driscoll-Kraay)", # noqa:E501
hacsubtitleV="Statistics robust to heteroskedasticity and clustering on year", # noqa:E501
hacsubtitleB="Estimates efficient for homoskedasticity only",
title="OLS estimation",
predict="ivreg2_p",
version="03.1.07",
cmdline="ivreg2 invest mvalue kstock, dkraay(5)",
cmd="ivreg2",
model="ols",
depvar="invest",
vcetype="Robust",
vce="cluster ac bartlett bw=5",
partialsmall="small",
ivar="company",
tvar="year",
kernel="Bartlett",
inexog="mvalue kstock",
insts="mvalue kstock",
properties="b V",
)
params_table = np.array([
.11556215606596, .0134360177573, 8.6009231420662, 7.907743030e-18,
.08922804516602, .14189626696591, np.nan, 1.9599639845401,
0, .23067848754982, .04930800664089, 4.678317037431,
2.892390940e-06, .13403657038422, .32732040471542, np.nan,
1.9599639845401, 0, -42.714369016733, 12.190347184209,
-3.5039501641153, .00045841113727, -66.607010456823, -18.821727576643,
np.nan, 1.9599639845401, 0]).reshape(3, 9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00018052657317, -.00035661054613, -.06728261073866, -.00035661054613,
.0024312795189, -.32394785247278, -.06728261073866, -.32394785247278,
148.60456447156]).reshape(3, 3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_nw_groupsum4_ivreg_large = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N=200,
inexog_ct=2,
exexog_ct=0,
endog_ct=0,
partial_ct=0,
bw=5,
df_r=19,
N_clust=20,
df_m=2,
sdofminus=0,
dofminus=0,
r2=.8124080178314146,
rmse=94.40840193979601,
rss=1755850.432294737,
mss=7604093.484267682,
r2_a=.8105035307027995,
F=92.14467466912149,
Fp=1.66368179227e-10,
Fdf1=2,
Fdf2=19,
partialcons=0,
cons=1,
jdf=0,
j=0,
ll=-1191.802357418011,
rankV=3,
rankS=3,
rankxx=3,
rankzz=3,
r2c=.8124080178314146,
r2u=.8710896173136538,
yyc=9359943.916562419,
yy=13620706.07273678,
clustvar="year",
hacsubtitleV2="and kernel-robust to common correlated disturbances (Driscoll-Kraay)", # noqa:E501
hacsubtitleV="Statistics robust to heteroskedasticity and clustering on year", # noqa:E501
hacsubtitleB="Estimates efficient for homoskedasticity only",
title="OLS estimation",
predict="ivreg2_p",
version="03.1.07",
cmdline="ivreg2 invest mvalue kstock, dkraay(5) small",
cmd="ivreg2",
model="ols",
depvar="invest",
vcetype="Robust",
vce="cluster ac bartlett bw=5",
partialsmall="small",
small="small",
ivar="company",
tvar="year",
kernel="Bartlett",
inexog="mvalue kstock",
insts="mvalue kstock",
properties="b V",
)
params_table = np.array([
.11556215606596, .0138548615926, 8.3409101775303, 8.967911239e-08,
.08656359748216, .14456071464977, 19, 2.0930240544083,
0, .23067848754982, .0508450956047, 4.5368876743442,
.00022550505646, .12425847940049, .33709849569915, 19,
2.0930240544083, 0, -42.714369016733, 12.570359466158,
-3.3980228752988, .00301793225123, -69.02443375196, -16.404304281506,
19, 2.0930240544083, 0]).reshape(3, 9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00019195718975, -.00037919048186, -.07154282413568, -.00037919048186,
.00258522374705, -.34445964542925, -.07154282413568, -.34445964542925,
158.01393710842]).reshape(3, 3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_nw_groupsum4_ivreg_small = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
# ----------------------------------------------------------------
# WLS
est = dict(
N=200,
df_m=2,
df_r=197,
F=158.2726503915062,
r2=.7728224625923459,
rmse=35.1783035325949,
mss=829335.6968772264,
rss=243790.0687679817,
r2_a=.7705160916541971,
ll=-994.3622459900876,
ll_0=-1142.564592396746,
rank=3,
cmdline="regress invest mvalue kstock [aw=1/mvalue], robust",
title="Linear regression",
marginsok="XB default",
vce="robust",
depvar="invest",
cmd="regress",
properties="b V",
predict="regres_p",
model="ols",
estat_cmd="regress_estat",
wexp="= 1/mvalue",
wtype="aweight",
vcetype="Robust",
)
params_table = np.array([
.11694307068216, .00768545583365, 15.2161528494, 4.371656843e-35,
.10178674436759, .13209939699674, 197, 1.9720790337785,
0, .10410756769914, .00986959606725, 10.548310892334,
6.565731752e-21, .08464394422305, .12357119117523, 197,
1.9720790337785, 0, -9.2723336171089, 2.3458404391932,
-3.9526702081656, .00010767530575, -13.898516363832, -4.6461508703863,
197, 1.9720790337785, 0]).reshape(3, 9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00005906623137, 6.805470065e-06, -.01210153268743, 6.805470065e-06,
.00009740892653, -.01511046663892, -.01210153268743, -.01511046663892,
5.502967366154]).reshape(3, 3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_hc1_wls_small = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N_clust=10,
N=200,
df_m=2,
df_r=9,
F=22.90591346432732,
r2=.7728224625923459,
rmse=35.1783035325949,
mss=829335.6968772264,
rss=243790.0687679817,
r2_a=.7705160916541971,
ll=-994.3622459900876,
ll_0=-1142.564592396746,
rank=3,
cmdline="regress invest mvalue kstock[aw=1/mvalue], vce(cluster company)",
title="Linear regression",
marginsok="XB default",
vce="cluster",
depvar="invest",
cmd="regress",
properties="b V",
predict="regres_p",
model="ols",
estat_cmd="regress_estat",
wexp="= 1/mvalue",
wtype="aweight",
vcetype="Robust",
clustvar="company",
)
params_table = np.array([
.11694307068216, .02609630113434, 4.4812124936848, .00152974827456,
.05790913614858, .17597700521575, 9, 2.2621571627982,
0, .10410756769914, .02285882773869, 4.5543703679489,
.00137730504553, .05239730679689, .15581782860139, 9,
2.2621571627982, 0, -9.2723336171089, 5.7204731422962,
-1.6209032690934, .13948922172294, -22.212942910549, 3.6682756763312,
9, 2.2621571627982, 0]).reshape(3, 9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00068101693289, -.00006496077364, -.08926939086077, -.00006496077364,
.00052252600559, -.0697116307149, -.08926939086077, -.0697116307149,
32.723812971732]).reshape(3, 3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_cluster_wls_small = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N=200,
inexog_ct=2,
exexog_ct=0,
endog_ct=0,
partial_ct=0,
N_clust=10,
df_m=2,
sdofminus=0,
dofminus=0,
r2=.772822462592346,
rmse=34.91346937558495,
rss=243790.0687679817,
mss=829335.6968772268,
r2_a=.7705160916541972,
F=22.9059134643273,
Fp=.000294548654088,
Fdf1=2,
Fdf2=9,
yy=1401938.856802022,
yyc=1073125.765645209,
partialcons=0,
cons=1,
jdf=0,
j=0,
ll=-994.3622459900874,
rankV=3,
rankS=3,
rankxx=3,
rankzz=3,
r2c=.772822462592346,
r2u=.8261050632949187,
clustvar="company",
hacsubtitleV="Statistics robust to heteroskedasticity and clustering on company", # noqa:E501
hacsubtitleB="Estimates efficient for homoskedasticity only",
title="OLS estimation",
predict="ivreg2_p",
version="03.1.07",
cmdline="ivreg2 invest mvalue kstock [aw=1/mvalue], cluster(company)",
cmd="ivreg2",
wtype="aweight",
wexp="=1/mvalue",
model="ols",
depvar="invest",
vcetype="Robust",
vce="robust cluster",
partialsmall="small",
inexog="mvalue kstock",
insts="mvalue kstock",
properties="b V",
)
params_table = np.array([
.11694307068216, .02463240320082, 4.7475298990826, 2.059159576e-06,
.06866444755588, .16522169380844, np.nan, 1.9599639845401,
0, .10410756769914, .02157653909108, 4.8250355286218,
1.399783125e-06, .06181832816961, .14639680722867, np.nan,
1.9599639845401, 0, -9.2723336171089, 5.3995775192484,
-1.7172331694572, .08593657730569, -19.855311086568, 1.31064385235,
np.nan, 1.9599639845401, 0]).reshape(3, 9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00060675528745, -.00005787711139, -.07953498994782, -.00005787711139,
.00046554703915, -.06210991017966, -.07953498994782, -.06210991017966,
29.155437386372]).reshape(3, 3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_cluster_wls_large = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
| bsd-3-clause | ccf4b17b838e859f694e48ec01895e3e | 27.827417 | 107 | 0.654254 | 2.531384 | false | false | false | false |
statsmodels/statsmodels | statsmodels/base/l1_slsqp.py | 6 | 5604 | """
Holds files for l1 regularization of LikelihoodModel, using
scipy.optimize.slsqp
"""
import numpy as np
from scipy.optimize import fmin_slsqp
import statsmodels.base.l1_solvers_common as l1_solvers_common
def fit_l1_slsqp(
f, score, start_params, args, kwargs, disp=False, maxiter=1000,
callback=None, retall=False, full_output=False, hess=None):
"""
Solve the l1 regularized problem using scipy.optimize.fmin_slsqp().
Specifically: We convert the convex but non-smooth problem
.. math:: \\min_\\beta f(\\beta) + \\sum_k\\alpha_k |\\beta_k|
via the transformation to the smooth, convex, constrained problem in twice
as many variables (adding the "added variables" :math:`u_k`)
.. math:: \\min_{\\beta,u} f(\\beta) + \\sum_k\\alpha_k u_k,
subject to
.. math:: -u_k \\leq \\beta_k \\leq u_k.
Parameters
----------
All the usual parameters from LikelhoodModel.fit
alpha : non-negative scalar or numpy array (same size as parameters)
The weight multiplying the l1 penalty term
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been zero
if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value
size_trim_tol : float or 'auto' (default = 'auto')
For use when trim_mode === 'size'
auto_trim_tol : float
For sue when trim_mode == 'auto'. Use
qc_tol : float
Print warning and do not allow auto trim when (ii) in "Theory" (above)
is violated by this much.
qc_verbose : bool
If true, print out a full QC report upon failure
acc : float (default 1e-6)
Requested accuracy as used by slsqp
"""
start_params = np.array(start_params).ravel('F')
### Extract values
# k_params is total number of covariates,
# possibly including a leading constant.
k_params = len(start_params)
# The start point
x0 = np.append(start_params, np.fabs(start_params))
# alpha is the regularization parameter
alpha = np.array(kwargs['alpha_rescaled']).ravel('F')
# Make sure it's a vector
alpha = alpha * np.ones(k_params)
assert alpha.min() >= 0
# Convert display parameters to scipy.optimize form
disp_slsqp = _get_disp_slsqp(disp, retall)
# Set/retrieve the desired accuracy
acc = kwargs.setdefault('acc', 1e-10)
### Wrap up for use in fmin_slsqp
func = lambda x_full: _objective_func(f, x_full, k_params, alpha, *args)
f_ieqcons_wrap = lambda x_full: _f_ieqcons(x_full, k_params)
fprime_wrap = lambda x_full: _fprime(score, x_full, k_params, alpha)
fprime_ieqcons_wrap = lambda x_full: _fprime_ieqcons(x_full, k_params)
### Call the solver
results = fmin_slsqp(
func, x0, f_ieqcons=f_ieqcons_wrap, fprime=fprime_wrap, acc=acc,
iter=maxiter, disp=disp_slsqp, full_output=full_output,
fprime_ieqcons=fprime_ieqcons_wrap)
params = np.asarray(results[0][:k_params])
### Post-process
# QC
qc_tol = kwargs['qc_tol']
qc_verbose = kwargs['qc_verbose']
passed = l1_solvers_common.qc_results(
params, alpha, score, qc_tol, qc_verbose)
# Possibly trim
trim_mode = kwargs['trim_mode']
size_trim_tol = kwargs['size_trim_tol']
auto_trim_tol = kwargs['auto_trim_tol']
params, trimmed = l1_solvers_common.do_trim_params(
params, k_params, alpha, score, passed, trim_mode, size_trim_tol,
auto_trim_tol)
### Pack up return values for statsmodels optimizers
# TODO These retvals are returned as mle_retvals...but the fit was not ML.
# This could be confusing someday.
if full_output:
x_full, fx, its, imode, smode = results
fopt = func(np.asarray(x_full))
converged = (imode == 0)
warnflag = str(imode) + ' ' + smode
iterations = its
gopt = float('nan') # Objective is non-differentiable
hopt = float('nan')
retvals = {
'fopt': fopt, 'converged': converged, 'iterations': iterations,
'gopt': gopt, 'hopt': hopt, 'trimmed': trimmed,
'warnflag': warnflag}
### Return
if full_output:
return params, retvals
else:
return params
def _get_disp_slsqp(disp, retall):
if disp or retall:
if disp:
disp_slsqp = 1
if retall:
disp_slsqp = 2
else:
disp_slsqp = 0
return disp_slsqp
def _objective_func(f, x_full, k_params, alpha, *args):
"""
The regularized objective function
"""
x_params = x_full[:k_params]
x_added = x_full[k_params:]
## Return
return f(x_params, *args) + (alpha * x_added).sum()
def _fprime(score, x_full, k_params, alpha):
"""
The regularized derivative
"""
x_params = x_full[:k_params]
# The derivative just appends a vector of constants
return np.append(score(x_params), alpha)
def _f_ieqcons(x_full, k_params):
"""
The inequality constraints.
"""
x_params = x_full[:k_params]
x_added = x_full[k_params:]
# All entries in this vector must be \geq 0 in a feasible solution
return np.append(x_params + x_added, x_added - x_params)
def _fprime_ieqcons(x_full, k_params):
"""
Derivative of the inequality constraints
"""
I = np.eye(k_params) # noqa:E741
A = np.concatenate((I, I), axis=1)
B = np.concatenate((-I, I), axis=1)
C = np.concatenate((A, B), axis=0)
## Return
return C
| bsd-3-clause | b39c7652ad07f4f45637132831d2f435 | 32.357143 | 78 | 0.62384 | 3.277193 | false | false | false | false |
statsmodels/statsmodels | statsmodels/distributions/copula/elliptical.py | 3 | 8894 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 29 19:19:45 2021
Author: Josef Perktold
Author: Pamphile Roy
License: BSD-3
"""
import numpy as np
from scipy import stats
# scipy compat:
from statsmodels.compat.scipy import multivariate_t
from statsmodels.distributions.copula.copulas import Copula
class EllipticalCopula(Copula):
"""Base class for elliptical copula
This class requires subclassing and currently does not have generic
methods based on an elliptical generator.
Notes
-----
Elliptical copulas require that copula parameters are set when the
instance is created. Those parameters currently cannot be provided in the
call to methods. (This will most likely change in future versions.)
If non-empty ``args`` are provided in methods, then a ValueError is raised.
The ``args`` keyword is provided for a consistent interface across
copulas.
"""
def _handle_args(self, args):
if args != () and args is not None:
msg = ("Methods in elliptical copulas use copula parameters in"
" attributes. `arg` in the method is ignored")
raise ValueError(msg)
else:
return args
def rvs(self, nobs=1, args=(), random_state=None):
self._handle_args(args)
x = self.distr_mv.rvs(size=nobs, random_state=random_state)
return self.distr_uv.cdf(x)
def pdf(self, u, args=()):
self._handle_args(args)
ppf = self.distr_uv.ppf(u)
mv_pdf_ppf = self.distr_mv.pdf(ppf)
return mv_pdf_ppf / np.prod(self.distr_uv.pdf(ppf), axis=-1)
def cdf(self, u, args=()):
self._handle_args(args)
ppf = self.distr_uv.ppf(u)
return self.distr_mv.cdf(ppf)
def tau(self, corr=None):
"""Bivariate kendall's tau based on correlation coefficient.
Parameters
----------
corr : None or float
Pearson correlation. If corr is None, then the correlation will be
taken from the copula attribute.
Returns
-------
Kendall's tau that corresponds to pearson correlation in the
elliptical copula.
"""
if corr is None:
corr = self.corr
if corr.shape == (2, 2):
corr = corr[0, 1]
rho = 2 * np.arcsin(corr) / np.pi
return rho
def corr_from_tau(self, tau):
"""Pearson correlation from kendall's tau.
Parameters
----------
tau : array_like
Kendall's tau correlation coefficient.
Returns
-------
Pearson correlation coefficient for given tau in elliptical
copula. This can be used as parameter for an elliptical copula.
"""
corr = np.sin(tau * np.pi / 2)
return corr
class GaussianCopula(EllipticalCopula):
r"""Gaussian copula.
It is constructed from a multivariate normal distribution over
:math:`\mathbb{R}^d` by using the probability integral transform.
For a given correlation matrix :math:`R \in[-1, 1]^{d \times d}`,
the Gaussian copula with parameter matrix :math:`R` can be written
as:
.. math::
C_R^{\text{Gauss}}(u) = \Phi_R\left(\Phi^{-1}(u_1),\dots,
\Phi^{-1}(u_d) \right),
where :math:`\Phi^{-1}` is the inverse cumulative distribution function
of a standard normal and :math:`\Phi_R` is the joint cumulative
distribution function of a multivariate normal distribution with mean
vector zero and covariance matrix equal to the correlation
matrix :math:`R`.
Parameters
----------
corr : scalar or array_like
Correlation or scatter matrix for the elliptical copula. In the
bivariate case, ``corr` can be a scalar and is then considered as
the correlation coefficient. If ``corr`` is None, then the scatter
matrix is the identity matrix.
k_dim : int
Dimension, number of components in the multivariate random variable.
Notes
-----
Elliptical copulas require that copula parameters are set when the
instance is created. Those parameters currently cannot be provided in the
call to methods. (This will most likely change in future versions.)
If non-empty ``args`` are provided in methods, then a ValueError is raised.
The ``args`` keyword is provided for a consistent interface across
copulas.
References
----------
.. [1] Joe, Harry, 2014, Dependence modeling with copulas. CRC press.
p. 163
"""
def __init__(self, corr=None, k_dim=2):
super().__init__(k_dim=k_dim)
if corr is None:
corr = np.eye(k_dim)
elif k_dim == 2 and np.size(corr) == 1:
corr = np.array([[1., corr], [corr, 1.]])
self.corr = np.asarray(corr)
self.distr_uv = stats.norm
self.distr_mv = stats.multivariate_normal(cov=corr)
def dependence_tail(self, corr=None):
"""
Bivariate tail dependence parameter.
Joe (2014) p. 182
Parameters
----------
corr : any
Tail dependence for Gaussian copulas is always zero.
Argument will be ignored
Returns
-------
Lower and upper tail dependence coefficients of the copula with given
Pearson correlation coefficient.
"""
return 0, 0
def _arg_from_tau(self, tau):
# for generic compat
return self.corr_from_tau(tau)
class StudentTCopula(EllipticalCopula):
"""Student t copula.
Parameters
----------
corr : scalar or array_like
Correlation or scatter matrix for the elliptical copula. In the
bivariate case, ``corr` can be a scalar and is then considered as
the correlation coefficient. If ``corr`` is None, then the scatter
matrix is the identity matrix.
df : float (optional)
Degrees of freedom of the multivariate t distribution.
k_dim : int
Dimension, number of components in the multivariate random variable.
Notes
-----
Elliptical copulas require that copula parameters are set when the
instance is created. Those parameters currently cannot be provided in the
call to methods. (This will most likely change in future versions.)
If non-empty ``args`` are provided in methods, then a ValueError is raised.
The ``args`` keyword is provided for a consistent interface across
copulas.
References
----------
.. [1] Joe, Harry, 2014, Dependence modeling with copulas. CRC press.
p. 181
"""
def __init__(self, corr=None, df=None, k_dim=2):
super().__init__(k_dim=k_dim)
if corr is None:
corr = np.eye(k_dim)
elif k_dim == 2 and np.size(corr) == 1:
corr = np.array([[1., corr], [corr, 1.]])
self.df = df
self.corr = np.asarray(corr)
# both uv and mv are frozen distributions
self.distr_uv = stats.t(df=df)
self.distr_mv = multivariate_t(shape=corr, df=df)
def cdf(self, u, args=()):
raise NotImplementedError("CDF not available in closed form.")
# ppf = self.distr_uv.ppf(u)
# mvt = MVT([0, 0], self.corr, self.df)
# return mvt.cdf(ppf)
def spearmans_rho(self, corr=None):
"""
Bivariate Spearman's rho based on correlation coefficient.
Joe (2014) p. 182
Parameters
----------
corr : None or float
Pearson correlation. If corr is None, then the correlation will be
taken from the copula attribute.
Returns
-------
Spearman's rho that corresponds to pearson correlation in the
elliptical copula.
"""
if corr is None:
corr = self.corr
if corr.shape == (2, 2):
corr = corr[0, 1]
tau = 6 * np.arcsin(corr / 2) / np.pi
return tau
def dependence_tail(self, corr=None):
"""
Bivariate tail dependence parameter.
Joe (2014) p. 182
Parameters
----------
corr : None or float
Pearson correlation. If corr is None, then the correlation will be
taken from the copula attribute.
Returns
-------
Lower and upper tail dependence coefficients of the copula with given
Pearson correlation coefficient.
"""
if corr is None:
corr = self.corr
if corr.shape == (2, 2):
corr = corr[0, 1]
df = self.df
t = - np.sqrt((df + 1) * (1 - corr) / 1 + corr)
# Note self.distr_uv is frozen, df cannot change, use stats.t instead
lam = 2 * stats.t.cdf(t, df + 1)
return lam, lam
def _arg_from_tau(self, tau):
# for generic compat
# this does not provide an estimate of df
return self.corr_from_tau(tau)
| bsd-3-clause | d17a4955f8ff5d26a7d2a12db188b0f1 | 30.207018 | 79 | 0.60018 | 4.020796 | false | false | false | false |
statsmodels/statsmodels | statsmodels/tsa/statespace/dynamic_factor_mq.py | 3 | 195420 | # -*- coding: utf-8 -*-
"""
Dynamic factor model.
Author: Chad Fulton
License: BSD-3
"""
from collections import OrderedDict
from warnings import warn
import numpy as np
import pandas as pd
from scipy.linalg import cho_factor, cho_solve, LinAlgError
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tools.validation import int_like
from statsmodels.tools.decorators import cache_readonly
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.multivariate.pca import PCA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.statespace._quarterly_ar1 import QuarterlyAR1
from statsmodels.tsa.vector_ar.var_model import VAR
from statsmodels.tools.tools import Bunch
from statsmodels.tools.validation import string_like
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tsa.statespace import mlemodel, initialization
from statsmodels.tsa.statespace.tools import (
companion_matrix, is_invertible, constrain_stationary_univariate,
constrain_stationary_multivariate, unconstrain_stationary_univariate,
unconstrain_stationary_multivariate)
from statsmodels.tsa.statespace.kalman_smoother import (
SMOOTHER_STATE, SMOOTHER_STATE_COV, SMOOTHER_STATE_AUTOCOV)
from statsmodels.base.data import PandasData
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.summary import Summary
from statsmodels.iolib.tableformatting import fmt_params
class FactorBlock(dict):
"""
Helper class for describing and indexing a block of factors.
Parameters
----------
factor_names : tuple of str
Tuple of factor names in the block (in the order that they will appear
in the state vector).
factor_order : int
Order of the vector autoregression governing the factor block dynamics.
endog_factor_map : pd.DataFrame
Mapping from endog variable names to factor names.
state_offset : int
Offset of this factor block in the state vector.
has_endog_Q : bool
Flag if the model contains quarterly data.
Notes
-----
The goal of this class is, in particular, to make it easier to retrieve
indexes of subsets of the state vector that are associated with a
particular block of factors.
- `factors_ix` is a matrix of indices, with rows corresponding to factors
in the block and columns corresponding to lags
- `factors` is vec(factors_ix) (i.e. it stacks columns, so that it is
`factors_ix.ravel(order='F')`). Thinking about a VAR system, the first
k*p elements correspond to the equation for the first variable. The next
k*p elements correspond to the equation for the second variable, and so
on. It contains all of the lags in the state vector, which is max(5, p)
- `factors_ar` is the subset of `factors` that have nonzero coefficients,
so it contains lags up to p.
- `factors_L1` only contains the first lag of the factors
- `factors_L1_5` contains the first - fifth lags of the factors
"""
def __init__(self, factor_names, factor_order, endog_factor_map,
state_offset, k_endog_Q):
self.factor_names = factor_names
self.k_factors = len(self.factor_names)
self.factor_order = factor_order
self.endog_factor_map = endog_factor_map.loc[:, factor_names]
self.state_offset = state_offset
self.k_endog_Q = k_endog_Q
if self.k_endog_Q > 0:
self._factor_order = max(5, self.factor_order)
else:
self._factor_order = self.factor_order
self.k_states = self.k_factors * self._factor_order
# Save items
self['factors'] = self.factors
self['factors_ar'] = self.factors_ar
self['factors_ix'] = self.factors_ix
self['factors_L1'] = self.factors_L1
self['factors_L1_5'] = self.factors_L1_5
@property
def factors_ix(self):
"""Factor state index array, shaped (k_factors, lags)."""
# i.e. the position in the state vector of the second lag of the third
# factor is factors_ix[2, 1]
# ravel(order='F') gives e.g (f0.L1, f1.L1, f0.L2, f1.L2, f0.L3, ...)
# while
# ravel(order='C') gives e.g (f0.L1, f0.L2, f0.L3, f1.L1, f1.L2, ...)
o = self.state_offset
return np.reshape(o + np.arange(self.k_factors * self._factor_order),
(self._factor_order, self.k_factors)).T
@property
def factors(self):
"""Factors and all lags in the state vector (max(5, p))."""
# Note that this is equivalent to factors_ix with ravel(order='F')
o = self.state_offset
return np.s_[o:o + self.k_factors * self._factor_order]
@property
def factors_ar(self):
"""Factors and all lags used in the factor autoregression (p)."""
o = self.state_offset
return np.s_[o:o + self.k_factors * self.factor_order]
@property
def factors_L1(self):
"""Factors (first block / lag only)."""
o = self.state_offset
return np.s_[o:o + self.k_factors]
@property
def factors_L1_5(self):
"""Factors plus four lags."""
o = self.state_offset
return np.s_[o:o + self.k_factors * 5]
class DynamicFactorMQStates(dict):
"""
Helper class for describing and indexing the state vector.
Parameters
----------
k_endog_M : int
Number of monthly (or non-time-specific, if k_endog_Q=0) variables.
k_endog_Q : int
Number of quarterly variables.
endog_names : list
Names of the endogenous variables.
factors : int, list, or dict
Integer giving the number of (global) factors, a list with the names of
(global) factors, or a dictionary with:
- keys : names of endogenous variables
- values : lists of factor names.
If this is an integer, then the factor names will be 0, 1, ....
factor_orders : int or dict
Integer describing the order of the vector autoregression (VAR)
governing all factor block dynamics or dictionary with:
- keys : factor name or tuples of factor names in a block
- values : integer describing the VAR order for that factor block
If a dictionary, this defines the order of the factor blocks in the
state vector. Otherwise, factors are ordered so that factors that load
on more variables come first (and then alphabetically, to break ties).
factor_multiplicities : int or dict
This argument provides a convenient way to specify multiple factors
that load identically on variables. For example, one may want two
"global" factors (factors that load on all variables) that evolve
jointly according to a VAR. One could specify two global factors in the
`factors` argument and specify that they are in the same block in the
`factor_orders` argument, but it is easier to specify a single global
factor in the `factors` argument, and set the order in the
`factor_orders` argument, and then set the factor multiplicity to 2.
This argument must be an integer describing the factor multiplicity for
all factors or dictionary with:
- keys : factor name
- values : integer describing the factor multiplicity for the factors
in the given block
idiosyncratic_ar1 : bool
Whether or not to model the idiosyncratic component for each series as
an AR(1) process. If False, the idiosyncratic component is instead
modeled as white noise.
Attributes
----------
k_endog : int
Total number of endogenous variables.
k_states : int
Total number of state variables (those associated with the factors and
those associated with the idiosyncratic disturbances).
k_posdef : int
Total number of state disturbance terms (those associated with the
factors and those associated with the idiosyncratic disturbances).
k_endog_M : int
Number of monthly (or non-time-specific, if k_endog_Q=0) variables.
k_endog_Q : int
Number of quarterly variables.
k_factors : int
Total number of factors. Note that factor multiplicities will have
already been expanded.
k_states_factors : int
The number of state variables associated with factors (includes both
factors and lags of factors included in the state vector).
k_posdef_factors : int
The number of state disturbance terms associated with factors.
k_states_idio : int
Total number of state variables associated with idiosyncratic
disturbances.
k_posdef_idio : int
Total number of state disturbance terms associated with idiosyncratic
disturbances.
k_states_idio_M : int
The number of state variables associated with idiosyncratic
disturbances for monthly (or non-time-specific if there are no
quarterly variables) variables. If the disturbances are AR(1), then
this will be equal to `k_endog_M`, otherwise it will be equal to zero.
k_states_idio_Q : int
The number of state variables associated with idiosyncratic
disturbances for quarterly variables. This will always be equal to
`k_endog_Q * 5`, even if the disturbances are not AR(1).
k_posdef_idio_M : int
The number of state disturbance terms associated with idiosyncratic
disturbances for monthly (or non-time-specific if there are no
quarterly variables) variables. If the disturbances are AR(1), then
this will be equal to `k_endog_M`, otherwise it will be equal to zero.
k_posdef_idio_Q : int
The number of state disturbance terms associated with idiosyncratic
disturbances for quarterly variables. This will always be equal to
`k_endog_Q`, even if the disturbances are not AR(1).
idiosyncratic_ar1 : bool
Whether or not to model the idiosyncratic component for each series as
an AR(1) process.
factor_blocks : list of FactorBlock
List of `FactorBlock` helper instances for each factor block.
factor_names : list of str
List of factor names.
factors : dict
Dictionary with:
- keys : names of endogenous variables
- values : lists of factor names.
Note that factor multiplicities will have already been expanded.
factor_orders : dict
Dictionary with:
- keys : tuple of factor names
- values : integer describing autoregression order
Note that factor multiplicities will have already been expanded.
max_factor_order : int
Maximum autoregression order across all factor blocks.
factor_block_orders : pd.Series
Series containing lag orders, with the factor block (a tuple of factor
names) as the index.
factor_multiplicities : dict
Dictionary with:
- keys : factor name
- values : integer describing the factor multiplicity for the factors
in the given block
endog_factor_map : dict
Dictionary with:
- keys : endog name
- values : list of factor names
loading_counts : pd.Series
Series containing number of endogenous variables loading on each
factor, with the factor name as the index.
block_loading_counts : dict
Dictionary with:
- keys : tuple of factor names
- values : average number of endogenous variables loading on the block
(note that average is over the factors in the block)
Notes
-----
The goal of this class is, in particular, to make it easier to retrieve
indexes of subsets of the state vector.
Note that the ordering of the factor blocks in the state vector is
determined by the `factor_orders` argument if a dictionary. Otherwise,
factors are ordered so that factors that load on more variables come first
(and then alphabetically, to break ties).
- `factors_L1` is an array with the indexes of first lag of the factors
from each block. Ordered first by block, and then by lag.
- `factors_L1_5` is an array with the indexes contains the first - fifth
lags of the factors from each block. Ordered first by block, and then by
lag.
- `factors_L1_5_ix` is an array shaped (5, k_factors) with the indexes
of the first - fifth lags of the factors from each block.
- `idio_ar_L1` is an array with the indexes of the first lag of the
idiosyncratic AR states, both monthly (if appliable) and quarterly.
- `idio_ar_M` is a slice with the indexes of the idiosyncratic disturbance
states for the monthly (or non-time-specific if there are no quarterly
variables) variables. It is an empty slice if
`idiosyncratic_ar1 = False`.
- `idio_ar_Q` is a slice with the indexes of the idiosyncratic disturbance
states and all lags, for the quarterly variables. It is an empty slice if
there are no quarterly variable.
- `idio_ar_Q_ix` is an array shaped (k_endog_Q, 5) with the indexes of the
first - fifth lags of the idiosyncratic disturbance states for the
quarterly variables.
- `endog_factor_iloc` is a list of lists, with entries for each endogenous
variable. The entry for variable `i`, `endog_factor_iloc[i]` is a list of
indexes of the factors that variable `i` loads on. This does not include
any lags, but it can be used with e.g. `factors_L1_5_ix` to get lags.
"""
def __init__(self, k_endog_M, k_endog_Q, endog_names, factors,
factor_orders, factor_multiplicities, idiosyncratic_ar1):
# Save model parameterization
self.k_endog_M = k_endog_M
self.k_endog_Q = k_endog_Q
self.k_endog = self.k_endog_M + self.k_endog_Q
self.idiosyncratic_ar1 = idiosyncratic_ar1
# Validate factor-related inputs
factors_is_int = np.issubdtype(type(factors), np.integer)
factors_is_list = isinstance(factors, (list, tuple))
orders_is_int = np.issubdtype(type(factor_orders), np.integer)
if factor_multiplicities is None:
factor_multiplicities = 1
mult_is_int = np.issubdtype(type(factor_multiplicities), np.integer)
if not (factors_is_int or factors_is_list or
isinstance(factors, dict)):
raise ValueError('`factors` argument must an integer number of'
' factors, a list of global factor names, or a'
' dictionary, mapping observed variables to'
' factors.')
if not (orders_is_int or isinstance(factor_orders, dict)):
raise ValueError('`factor_orders` argument must either be an'
' integer or a dictionary.')
if not (mult_is_int or isinstance(factor_multiplicities, dict)):
raise ValueError('`factor_multiplicities` argument must either be'
' an integer or a dictionary.')
# Expand integers
# If `factors` is an integer, we assume that it denotes the number of
# global factors (factors that load on each variable)
if factors_is_int or factors_is_list:
# Validate this here for a more informative error message
if ((factors_is_int and factors == 0) or
(factors_is_list and len(factors) == 0)):
raise ValueError('The model must contain at least one factor.')
if factors_is_list:
factor_names = list(factors)
else:
factor_names = [f'{i}' for i in range(factors)]
factors = {name: factor_names[:] for name in endog_names}
_factor_names = []
for val in factors.values():
_factor_names.extend(val)
factor_names = set(_factor_names)
if orders_is_int:
factor_orders = {factor_name: factor_orders
for factor_name in factor_names}
if mult_is_int:
factor_multiplicities = {factor_name: factor_multiplicities
for factor_name in factor_names}
# Apply the factor multiplicities
factors, factor_orders = self._apply_factor_multiplicities(
factors, factor_orders, factor_multiplicities)
# Save the (potentially expanded) variables
self.factors = factors
self.factor_orders = factor_orders
self.factor_multiplicities = factor_multiplicities
# Get the mapping between endog and factors
self.endog_factor_map = self._construct_endog_factor_map(
factors, endog_names)
self.k_factors = self.endog_factor_map.shape[1]
# Validate number of factors
# TODO: could do more extensive validation here.
if self.k_factors > self.k_endog_M:
raise ValueError(f'Number of factors ({self.k_factors}) cannot be'
' greater than the number of monthly endogenous'
f' variables ({self.k_endog_M}).')
# Get `loading_counts`: factor -> # endog loading on the factor
self.loading_counts = (
self.endog_factor_map.sum(axis=0).rename('count')
.reset_index().sort_values(['count', 'factor'],
ascending=[False, True])
.set_index('factor'))
# `block_loading_counts`: block -> average of (# loading on factor)
# across each factor in the block
block_loading_counts = {
block: np.atleast_1d(
self.loading_counts.loc[list(block), 'count']).mean(axis=0)
for block in factor_orders.keys()}
ix = pd.Index(block_loading_counts.keys(), tupleize_cols=False,
name='block')
self.block_loading_counts = pd.Series(
list(block_loading_counts.values()),
index=ix, name='count').to_frame().sort_values(
['count', 'block'], ascending=[False, True])['count']
# Get the mapping between factor blocks and VAR order
# `factor_block_orders`: pd.Series of factor block -> lag order
ix = pd.Index(factor_orders.keys(), tupleize_cols=False, name='block')
self.factor_block_orders = pd.Series(
list(factor_orders.values()), index=ix, name='order')
# If the `factor_orders` variable was an integer, then it did not
# define an ordering for the factor blocks. In this case, we use the
# loading counts to do so. This ensures that e.g. global factors are
# listed first.
if orders_is_int:
keys = self.block_loading_counts.keys()
self.factor_block_orders = self.factor_block_orders.loc[keys]
self.factor_block_orders.index.name = 'block'
# Define factor_names based on factor_block_orders (instead of on those
# from `endog_factor_map`) to (a) make sure that factors are allocated
# to only one block, and (b) order the factor names to be consistent
# with the block definitions.
factor_names = pd.Series(
np.concatenate(list(self.factor_block_orders.index)))
missing = [name for name in self.endog_factor_map.columns
if name not in factor_names.tolist()]
if len(missing):
ix = pd.Index([(factor_name,) for factor_name in missing],
tupleize_cols=False, name='block')
default_block_orders = pd.Series(np.ones(len(ix), dtype=int),
index=ix, name='order')
self.factor_block_orders = (
self.factor_block_orders.append(default_block_orders))
factor_names = pd.Series(
np.concatenate(list(self.factor_block_orders.index)))
duplicates = factor_names.duplicated()
if duplicates.any():
duplicate_names = set(factor_names[duplicates])
raise ValueError('Each factor can be assigned to at most one'
' block of factors in `factor_orders`.'
f' Duplicate entries for {duplicate_names}')
self.factor_names = factor_names.tolist()
self.max_factor_order = np.max(self.factor_block_orders)
# Re-order the columns of the endog factor mapping to reflect the
# orderings of endog_names and factor_names
self.endog_factor_map = (
self.endog_factor_map.loc[endog_names, factor_names])
# Create factor block helpers, and get factor-related state and posdef
# dimensions
self.k_states_factors = 0
self.k_posdef_factors = 0
state_offset = 0
self.factor_blocks = []
for factor_names, factor_order in self.factor_block_orders.items():
block = FactorBlock(factor_names, factor_order,
self.endog_factor_map, state_offset,
self.k_endog_Q)
self.k_states_factors += block.k_states
self.k_posdef_factors += block.k_factors
state_offset += block.k_states
self.factor_blocks.append(block)
# Idiosyncratic state dimensions
self.k_states_idio_M = self.k_endog_M if idiosyncratic_ar1 else 0
self.k_states_idio_Q = self.k_endog_Q * 5
self.k_states_idio = self.k_states_idio_M + self.k_states_idio_Q
# Idiosyncratic posdef dimensions
self.k_posdef_idio_M = self.k_endog_M if self.idiosyncratic_ar1 else 0
self.k_posdef_idio_Q = self.k_endog_Q
self.k_posdef_idio = self.k_posdef_idio_M + self.k_posdef_idio_Q
# Total states, posdef
self.k_states = self.k_states_factors + self.k_states_idio
self.k_posdef = self.k_posdef_factors + self.k_posdef_idio
# Cache
self._endog_factor_iloc = None
def _apply_factor_multiplicities(self, factors, factor_orders,
factor_multiplicities):
"""
Expand `factors` and `factor_orders` to account for factor multiplity.
For example, if there is a `global` factor with multiplicity 2, then
this method expands that into `global.1` and `global.2` in both the
`factors` and `factor_orders` dictionaries.
Parameters
----------
factors : dict
Dictionary of {endog_name: list of factor names}
factor_orders : dict
Dictionary of {tuple of factor names: factor order}
factor_multiplicities : dict
Dictionary of {factor name: factor multiplicity}
Returns
-------
new_factors : dict
Dictionary of {endog_name: list of factor names}, with factor names
expanded to incorporate multiplicities.
new_factors : dict
Dictionary of {tuple of factor names: factor order}, with factor
names in each tuple expanded to incorporate multiplicities.
"""
# Expand the factors to account for the multiplicities
new_factors = {}
for endog_name, factors_list in factors.items():
new_factor_list = []
for factor_name in factors_list:
n = factor_multiplicities.get(factor_name, 1)
if n > 1:
new_factor_list += [f'{factor_name}.{i + 1}'
for i in range(n)]
else:
new_factor_list.append(factor_name)
new_factors[endog_name] = new_factor_list
# Expand the factor orders to account for the multiplicities
new_factor_orders = {}
for block, factor_order in factor_orders.items():
if not isinstance(block, tuple):
block = (block,)
new_block = []
for factor_name in block:
n = factor_multiplicities.get(factor_name, 1)
if n > 1:
new_block += [f'{factor_name}.{i + 1}'
for i in range(n)]
else:
new_block += [factor_name]
new_factor_orders[tuple(new_block)] = factor_order
return new_factors, new_factor_orders
def _construct_endog_factor_map(self, factors, endog_names):
"""
Construct mapping of observed variables to factors.
Parameters
----------
factors : dict
Dictionary of {endog_name: list of factor names}
endog_names : list of str
List of the names of the observed variables.
Returns
-------
endog_factor_map : pd.DataFrame
Boolean dataframe with `endog_names` as the index and the factor
names (computed from the `factors` input) as the columns. Each cell
is True if the associated factor is allowed to load on the
associated observed variable.
"""
# Validate that all entries in the factors dictionary have associated
# factors
missing = []
for key, value in factors.items():
if not isinstance(value, (list, tuple)) or len(value) == 0:
missing.append(key)
if len(missing):
raise ValueError('Each observed variable must be mapped to at'
' least one factor in the `factors` dictionary.'
f' Variables missing factors are: {missing}.')
# Validate that we have been told about the factors for each endog
# variable. This is because it doesn't make sense to include an
# observed variable that doesn't load on any factor
missing = set(endog_names).difference(set(factors.keys()))
if len(missing):
raise ValueError('If a `factors` dictionary is provided, then'
' it must include entries for each observed'
f' variable. Missing variables are: {missing}.')
# Figure out the set of factor names
# (0 is just a dummy value for the dict - we just do it this way to
# collect the keys, in order, without duplicates.)
factor_names = {}
for key, value in factors.items():
if isinstance(value, str):
factor_names[value] = 0
else:
factor_names.update({v: 0 for v in value})
factor_names = list(factor_names.keys())
k_factors = len(factor_names)
endog_factor_map = pd.DataFrame(
np.zeros((self.k_endog, k_factors), dtype=bool),
index=pd.Index(endog_names, name='endog'),
columns=pd.Index(factor_names, name='factor'))
for key, value in factors.items():
endog_factor_map.loc[key, value] = True
return endog_factor_map
@property
def factors_L1(self):
"""Factors."""
ix = np.arange(self.k_states_factors)
iloc = tuple(ix[block.factors_L1] for block in self.factor_blocks)
return np.concatenate(iloc)
@property
def factors_L1_5_ix(self):
"""Factors plus any lags, index shaped (5, k_factors)."""
ix = np.arange(self.k_states_factors)
iloc = []
for block in self.factor_blocks:
iloc.append(ix[block.factors_L1_5].reshape(5, block.k_factors))
return np.concatenate(iloc, axis=1)
@property
def idio_ar_L1(self):
"""Idiosyncratic AR states, (first block / lag only)."""
ix1 = self.k_states_factors
if self.idiosyncratic_ar1:
ix2 = ix1 + self.k_endog
else:
ix2 = ix1 + self.k_endog_Q
return np.s_[ix1:ix2]
@property
def idio_ar_M(self):
"""Idiosyncratic AR states for monthly variables."""
ix1 = self.k_states_factors
ix2 = ix1
if self.idiosyncratic_ar1:
ix2 += self.k_endog_M
return np.s_[ix1:ix2]
@property
def idio_ar_Q(self):
"""Idiosyncratic AR states and all lags for quarterly variables."""
# Note that this is equivalent to idio_ar_Q_ix with ravel(order='F')
ix1 = self.k_states_factors
if self.idiosyncratic_ar1:
ix1 += self.k_endog_M
ix2 = ix1 + self.k_endog_Q * 5
return np.s_[ix1:ix2]
@property
def idio_ar_Q_ix(self):
"""Idiosyncratic AR (quarterly) state index, (k_endog_Q, lags)."""
# i.e. the position in the state vector of the second lag of the third
# quarterly variable is idio_ar_Q_ix[2, 1]
# ravel(order='F') gives e.g (y1.L1, y2.L1, y1.L2, y2.L3, y1.L3, ...)
# while
# ravel(order='C') gives e.g (y1.L1, y1.L2, y1.L3, y2.L1, y2.L2, ...)
start = self.k_states_factors
if self.idiosyncratic_ar1:
start += self.k_endog_M
return (start + np.reshape(
np.arange(5 * self.k_endog_Q), (5, self.k_endog_Q)).T)
@property
def endog_factor_iloc(self):
"""List of list of int, factor indexes for each observed variable."""
# i.e. endog_factor_iloc[i] is a list of integer locations of the
# factors that load on the ith observed variable
if self._endog_factor_iloc is None:
ilocs = []
for i in range(self.k_endog):
ilocs.append(np.where(self.endog_factor_map.iloc[i])[0])
self._endog_factor_iloc = ilocs
return self._endog_factor_iloc
def __getitem__(self, key):
"""
Use square brackets to access index / slice elements.
This is convenient in highlighting the indexing / slice quality of
these attributes in the code below.
"""
if key in ['factors_L1', 'factors_L1_5_ix', 'idio_ar_L1', 'idio_ar_M',
'idio_ar_Q', 'idio_ar_Q_ix']:
return getattr(self, key)
else:
raise KeyError(key)
class DynamicFactorMQ(mlemodel.MLEModel):
r"""
Dynamic factor model with EM algorithm; option for monthly/quarterly data.
Implementation of the dynamic factor model of Bańbura and Modugno (2014)
([1]_) and Bańbura, Giannone, and Reichlin (2011) ([2]_). Uses the EM
algorithm for parameter fitting, and so can accommodate a large number of
left-hand-side variables. Specifications can include any collection of
blocks of factors, including different factor autoregression orders, and
can include AR(1) processes for idiosyncratic disturbances. Can
incorporate monthly/quarterly mixed frequency data along the lines of
Mariano and Murasawa (2011) ([4]_). A special case of this model is the
Nowcasting model of Bok et al. (2017) ([3]_). Moreover, this model can be
used to compute the news associated with updated data releases.
Parameters
----------
endog : array_like
Observed time-series process :math:`y`. See the "Notes" section for
details on how to set up a model with monthly/quarterly mixed frequency
data.
k_endog_monthly : int, optional
If specifying a monthly/quarterly mixed frequency model in which the
provided `endog` dataset contains both the monthly and quarterly data,
this variable should be used to indicate how many of the variables
are monthly. Note that when using the `k_endog_monthly` argument, the
columns with monthly variables in `endog` should be ordered first, and
the columns with quarterly variables should come afterwards. See the
"Notes" section for details on how to set up a model with
monthly/quarterly mixed frequency data.
factors : int, list, or dict, optional
Integer giving the number of (global) factors, a list with the names of
(global) factors, or a dictionary with:
- keys : names of endogenous variables
- values : lists of factor names.
If this is an integer, then the factor names will be 0, 1, .... The
default is a single factor that loads on all variables. Note that there
cannot be more factors specified than there are monthly variables.
factor_orders : int or dict, optional
Integer describing the order of the vector autoregression (VAR)
governing all factor block dynamics or dictionary with:
- keys : factor name or tuples of factor names in a block
- values : integer describing the VAR order for that factor block
If a dictionary, this defines the order of the factor blocks in the
state vector. Otherwise, factors are ordered so that factors that load
on more variables come first (and then alphabetically, to break ties).
factor_multiplicities : int or dict, optional
This argument provides a convenient way to specify multiple factors
that load identically on variables. For example, one may want two
"global" factors (factors that load on all variables) that evolve
jointly according to a VAR. One could specify two global factors in the
`factors` argument and specify that they are in the same block in the
`factor_orders` argument, but it is easier to specify a single global
factor in the `factors` argument, and set the order in the
`factor_orders` argument, and then set the factor multiplicity to 2.
This argument must be an integer describing the factor multiplicity for
all factors or dictionary with:
- keys : factor name
- values : integer describing the factor multiplicity for the factors
in the given block
idiosyncratic_ar1 : bool
Whether or not to model the idiosyncratic component for each series as
an AR(1) process. If False, the idiosyncratic component is instead
modeled as white noise.
standardize : bool or tuple, optional
If a boolean, whether or not to standardize each endogenous variable to
have mean zero and standard deviation 1 before fitting the model. See
"Notes" for details about how this option works with postestimation
output. If a tuple (usually only used internally), then the tuple must
have length 2, with each element containing a Pandas series with index
equal to the names of the endogenous variables. The first element
should contain the mean values and the second element should contain
the standard deviations. Default is True.
endog_quarterly : pandas.Series or pandas.DataFrame
Observed quarterly variables. If provided, must be a Pandas Series or
DataFrame with a DatetimeIndex or PeriodIndex at the quarterly
frequency. See the "Notes" section for details on how to set up a model
with monthly/quarterly mixed frequency data.
init_t0 : bool, optional
If True, this option initializes the Kalman filter with the
distribution for :math:`\alpha_0` rather than :math:`\alpha_1`. See
the "Notes" section for more details. This option is rarely used except
for testing. Default is False.
obs_cov_diag : bool, optional
If True and if `idiosyncratic_ar1 is True`, then this option puts small
positive values in the observation disturbance covariance matrix. This
is not required for estimation and is rarely used except for testing.
(It is sometimes used to prevent numerical errors, for example those
associated with a positive semi-definite forecast error covariance
matrix at the first time step when using EM initialization, but state
space models in Statsmodels switch to the univariate approach in those
cases, and so do not need to use this trick). Default is False.
Notes
-----
The basic model is:
.. math::
y_t & = \Lambda f_t + \epsilon_t \\
f_t & = A_1 f_{t-1} + \dots + A_p f_{t-p} + u_t
where:
- :math:`y_t` is observed data at time t
- :math:`\epsilon_t` is idiosyncratic disturbance at time t (see below for
details, including modeling serial correlation in this term)
- :math:`f_t` is the unobserved factor at time t
- :math:`u_t \sim N(0, Q)` is the factor disturbance at time t
and:
- :math:`\Lambda` is referred to as the matrix of factor loadings
- :math:`A_i` are matrices of autoregression coefficients
Furthermore, we allow the idiosyncratic disturbances to be serially
correlated, so that, if `idiosyncratic_ar1=True`,
:math:`\epsilon_{i,t} = \rho_i \epsilon_{i,t-1} + e_{i,t}`, where
:math:`e_{i,t} \sim N(0, \sigma_i^2)`. If `idiosyncratic_ar1=False`,
then we instead have :math:`\epsilon_{i,t} = e_{i,t}`.
This basic setup can be found in [1]_, [2]_, [3]_, and [4]_.
We allow for two generalizations of this model:
1. Following [2]_, we allow multiple "blocks" of factors, which are
independent from the other blocks of factors. Different blocks can be
set to load on different subsets of the observed variables, and can be
specified with different lag orders.
2. Following [4]_ and [2]_, we allow mixed frequency models in which both
monthly and quarterly data are used. See the section on "Mixed frequency
models", below, for more details.
Additional notes:
- The observed data may contain arbitrary patterns of missing entries.
**EM algorithm**
This model contains a potentially very large number of parameters, and it
can be difficult and take a prohibitively long time to numerically optimize
the likelihood function using quasi-Newton methods. Instead, the default
fitting method in this model uses the EM algorithm, as detailed in [1]_.
As a result, the model can accommodate datasets with hundreds of
observed variables.
**Mixed frequency data**
This model can handle mixed frequency data in two ways. In this section,
we only briefly describe this, and refer readers to [2]_ and [4]_ for all
details.
First, because there can be arbitrary patterns of missing data in the
observed vector, one can simply include lower frequency variables as
observed in a particular higher frequency period, and missing otherwise.
For example, in a monthly model, one could include quarterly data as
occurring on the third month of each quarter. To use this method, one
simply needs to combine the data into a single dataset at the higher
frequency that can be passed to this model as the `endog` argument.
However, depending on the type of variables used in the analysis and the
assumptions about the data generating process, this approach may not be
valid.
For example, suppose that we are interested in the growth rate of real GDP,
which is measured at a quarterly frequency. If the basic factor model is
specified at a monthly frequency, then the quarterly growth rate in the
third month of each quarter -- which is what we actually observe -- is
approximated by a particular weighted average of unobserved monthly growth
rates. We need to take this particular weight moving average into account
in constructing our model, and this is what the second approach does.
The second approach follows [2]_ and [4]_ in constructing a state space
form to explicitly model the quarterly growth rates in terms of the
unobserved monthly growth rates. To use this approach, there are two
methods:
1. Combine the monthly and quarterly data into a single dataset at the
monthly frequency, with the monthly data in the first columns and the
quarterly data in the last columns. Pass this dataset to the model as
the `endog` argument and give the number of the variables that are
monthly as the `k_endog_monthly` argument.
2. Construct a monthly dataset as a Pandas DataFrame with a DatetimeIndex
or PeriodIndex at the monthly frequency and separately construct a
quarterly dataset as a Pandas DataFrame with a DatetimeIndex or
PeriodIndex at the quarterly frequency. Pass the monthly DataFrame to
the model as the `endog` argument and pass the quarterly DataFrame to
the model as the `endog_quarterly` argument.
Note that this only incorporates one particular type of mixed frequency
data. See also Banbura et al. (2013). "Now-Casting and the Real-Time Data
Flow." for discussion about other types of mixed frequency data that are
not supported by this framework.
**Nowcasting and the news**
Through its support for monthly/quarterly mixed frequency data, this model
can allow for the nowcasting of quarterly variables based on monthly
observations. In particular, [2]_ and [3]_ use this model to construct
nowcasts of real GDP and analyze the impacts of "the news", derived from
incoming data on a real-time basis. This latter functionality can be
accessed through the `news` method of the results object.
**Standardizing data**
As is often the case in formulating a dynamic factor model, we do not
explicitly account for the mean of each observed variable. Instead, the
default behavior is to standardize each variable prior to estimation. Thus
if :math:`y_t` are the given observed data, the dynamic factor model is
actually estimated on the standardized data defined by:
.. math::
x_{i, t} = (y_{i, t} - \bar y_i) / s_i
where :math:`\bar y_i` is the sample mean and :math:`s_i` is the sample
standard deviation.
By default, if standardization is applied prior to estimation, results such
as in-sample predictions, out-of-sample forecasts, and the computation of
the "news" are reported in the scale of the original data (i.e. the model
output has the reverse transformation applied before it is returned to the
user).
Standardization can be disabled by passing `standardization=False` to the
model constructor.
**Identification of factors and loadings**
The estimated factors and the factor loadings in this model are only
identified up to an invertible transformation. As described in (the working
paper version of) [2]_, while it is possible to impose normalizations to
achieve identification, the EM algorithm does will converge regardless.
Moreover, for nowcasting and forecasting purposes, identification is not
required. This model does not impose any normalization to identify the
factors and the factor loadings.
**Miscellaneous**
There are two arguments available in the model constructor that are rarely
used but which deserve a brief mention: `init_t0` and `obs_cov_diag`. These
arguments are provided to allow exactly matching the output of other
packages that have slight differences in how the underlying state space
model is set up / applied.
- `init_t0`: state space models in Statsmodels follow Durbin and Koopman in
initializing the model with :math:`\alpha_1 \sim N(a_1, P_1)`. Other
implementations sometimes initialize instead with
:math:`\alpha_0 \sim N(a_0, P_0)`. We can accommodate this by prepending
a row of NaNs to the observed dataset.
- `obs_cov_diag`: the state space form in [1]_ incorporates non-zero (but
very small) diagonal elements for the observation disturbance covariance
matrix.
Examples
--------
Constructing and fitting a `DynamicFactorMQ` model.
>>> data = sm.datasets.macrodata.load_pandas().data.iloc[-100:]
>>> data.index = pd.period_range(start='1984Q4', end='2009Q3', freq='Q')
>>> endog = data[['infl', 'tbilrate']].resample('M').last()
>>> endog_Q = np.log(data[['realgdp', 'realcons']]).diff().iloc[1:] * 400
**Basic usage**
In the simplest case, passing only the `endog` argument results in a model
with a single factor that follows an AR(1) process. Note that because we
are not also providing an `endog_quarterly` dataset, `endog` can be a numpy
array or Pandas DataFrame with any index (it does not have to be monthly).
The `summary` method can be useful in checking the model specification.
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 1 factors in 1 blocks # of factors: 1
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
========================
Dep. variable 0
------------------------
infl X
tbilrate X
Factor blocks:
=====================
block order
---------------------
0 1
=====================
**Factors**
With `factors=2`, there will be two independent factors that will each
evolve according to separate AR(1) processes.
>>> mod = sm.tsa.DynamicFactorMQ(endog, factors=2)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 2 factors in 2 blocks # of factors: 2
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
===================================
Dep. variable 0 1
-----------------------------------
infl X X
tbilrate X X
Factor blocks:
=====================
block order
---------------------
0 1
1 1
=====================
**Factor multiplicities**
By instead specifying `factor_multiplicities=2`, we would still have two
factors, but they would be dependent and would evolve jointly according
to a VAR(1) process.
>>> mod = sm.tsa.DynamicFactorMQ(endog, factor_multiplicities=2)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 2 factors in 1 blocks # of factors: 2
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
===================================
Dep. variable 0.1 0.2
-----------------------------------
infl X X
tbilrate X X
Factor blocks:
=====================
block order
---------------------
0.1, 0.2 1
=====================
**Factor orders**
In either of the above cases, we could extend the order of the (vector)
autoregressions by using the `factor_orders` argument. For example, the
below model would contain two independent factors that each evolve
according to a separate AR(2) process:
>>> mod = sm.tsa.DynamicFactorMQ(endog, factors=2, factor_orders=2)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 2 factors in 2 blocks # of factors: 2
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
===================================
Dep. variable 0 1
-----------------------------------
infl X X
tbilrate X X
Factor blocks:
=====================
block order
---------------------
0 2
1 2
=====================
**Serial correlation in the idiosyncratic disturbances**
By default, the model allows each idiosyncratic disturbance terms to evolve
according to an AR(1) process. If preferred, they can instead be specified
to be serially independent by passing `ididosyncratic_ar1=False`.
>>> mod = sm.tsa.DynamicFactorMQ(endog, idiosyncratic_ar1=False)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 1 factors in 1 blocks # of factors: 1
+ iid idiosyncratic Idiosyncratic disturbances: iid
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
========================
Dep. variable 0
------------------------
infl X
tbilrate X
Factor blocks:
=====================
block order
---------------------
0 1
=====================
*Monthly / Quarterly mixed frequency*
To specify a monthly / quarterly mixed frequency model see the (Notes
section for more details about these models):
>>> mod = sm.tsa.DynamicFactorMQ(endog, endog_quarterly=endog_Q)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 1 factors in 1 blocks # of quarterly variables: 2
+ Mixed frequency (M/Q) # of factors: 1
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
========================
Dep. variable 0
------------------------
infl X
tbilrate X
realgdp X
realcons X
Factor blocks:
=====================
block order
---------------------
0 1
=====================
*Customize observed variable / factor loadings*
To specify that certain that certain observed variables only load on
certain factors, it is possible to pass a dictionary to the `factors`
argument.
>>> factors = {'infl': ['global']
... 'tbilrate': ['global']
... 'realgdp': ['global', 'real']
... 'realcons': ['global', 'real']}
>>> mod = sm.tsa.DynamicFactorMQ(endog, endog_quarterly=endog_Q)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 2 factors in 2 blocks # of quarterly variables: 2
+ Mixed frequency (M/Q) # of factor blocks: 2
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
===================================
Dep. variable global real
-----------------------------------
infl X
tbilrate X
realgdp X X
realcons X X
Factor blocks:
=====================
block order
---------------------
global 1
real 1
=====================
**Fitting parameters**
To fit the model, use the `fit` method. This method uses the EM algorithm
by default.
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> res = mod.fit()
>>> print(res.summary())
Dynamic Factor Results
==========================================================================
Dep. Variable: ['infl', 'tbilrate'] No. Observations: 300
Model: Dynamic Factor Model Log Likelihood -127.909
+ 1 factors in 1 blocks AIC 271.817
+ AR(1) idiosyncratic BIC 301.447
Date: Tue, 04 Aug 2020 HQIC 283.675
Time: 15:59:11 EM Iterations 83
Sample: 10-31-1984
- 09-30-2009
Covariance Type: Not computed
Observation equation:
==============================================================
Factor loadings: 0 idiosyncratic: AR(1) var.
--------------------------------------------------------------
infl -0.67 0.39 0.73
tbilrate -0.63 0.99 0.01
Transition: Factor block 0
=======================================
L1.0 error variance
---------------------------------------
0 0.98 0.01
=======================================
Warnings:
[1] Covariance matrix not calculated.
*Displaying iteration progress*
To display information about the EM iterations, use the `disp` argument.
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> res = mod.fit(disp=10)
EM start iterations, llf=-291.21
EM iteration 10, llf=-157.17, convergence criterion=0.053801
EM iteration 20, llf=-128.99, convergence criterion=0.0035545
EM iteration 30, llf=-127.97, convergence criterion=0.00010224
EM iteration 40, llf=-127.93, convergence criterion=1.3281e-05
EM iteration 50, llf=-127.92, convergence criterion=5.4725e-06
EM iteration 60, llf=-127.91, convergence criterion=2.8665e-06
EM iteration 70, llf=-127.91, convergence criterion=1.6999e-06
EM iteration 80, llf=-127.91, convergence criterion=1.1085e-06
EM converged at iteration 83, llf=-127.91,
convergence criterion=9.9004e-07 < tolerance=1e-06
**Results: forecasting, impulse responses, and more**
One the model is fitted, there are a number of methods available from the
results object. Some examples include:
*Forecasting*
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> res = mod.fit()
>>> print(res.forecast(steps=5))
infl tbilrate
2009-10 1.784169 0.260401
2009-11 1.735848 0.305981
2009-12 1.730674 0.350968
2010-01 1.742110 0.395369
2010-02 1.759786 0.439194
*Impulse responses*
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> res = mod.fit()
>>> print(res.impulse_responses(steps=5))
infl tbilrate
0 -1.511956 -1.341498
1 -1.483172 -1.315960
2 -1.454937 -1.290908
3 -1.427240 -1.266333
4 -1.400069 -1.242226
5 -1.373416 -1.218578
For other available methods (including in-sample prediction, simulation of
time series, extending the results to incorporate new data, and the news),
see the documentation for state space models.
References
----------
.. [1] Bańbura, Marta, and Michele Modugno.
"Maximum likelihood estimation of factor models on datasets with
arbitrary pattern of missing data."
Journal of Applied Econometrics 29, no. 1 (2014): 133-160.
.. [2] Bańbura, Marta, Domenico Giannone, and Lucrezia Reichlin.
"Nowcasting."
The Oxford Handbook of Economic Forecasting. July 8, 2011.
.. [3] Bok, Brandyn, Daniele Caratelli, Domenico Giannone,
Argia M. Sbordone, and Andrea Tambalotti. 2018.
"Macroeconomic Nowcasting and Forecasting with Big Data."
Annual Review of Economics 10 (1): 615-43.
https://doi.org/10.1146/annurev-economics-080217-053214.
.. [4] Mariano, Roberto S., and Yasutomo Murasawa.
"A coincident index, common factors, and monthly real GDP."
Oxford Bulletin of Economics and Statistics 72, no. 1 (2010): 27-46.
"""
def __init__(self, endog, k_endog_monthly=None, factors=1, factor_orders=1,
factor_multiplicities=None, idiosyncratic_ar1=True,
standardize=True, endog_quarterly=None, init_t0=False,
obs_cov_diag=False, **kwargs):
# Handle endog variables
if endog_quarterly is not None:
if k_endog_monthly is not None:
raise ValueError('If `endog_quarterly` is specified, then'
' `endog` must contain only monthly'
' variables, and so `k_endog_monthly` cannot'
' be specified since it will be inferred from'
' the shape of `endog`.')
endog, k_endog_monthly = self.construct_endog(
endog, endog_quarterly)
endog_is_pandas = _is_using_pandas(endog, None)
if endog_is_pandas:
if isinstance(endog, pd.Series):
endog = endog.to_frame()
else:
if np.ndim(endog) < 2:
endog = np.atleast_2d(endog).T
if k_endog_monthly is None:
k_endog_monthly = endog.shape[1]
if endog_is_pandas:
endog_names = endog.columns.tolist()
else:
if endog.shape[1] == 1:
endog_names = ['y']
else:
endog_names = [f'y{i + 1}' for i in range(endog.shape[1])]
self.k_endog_M = int_like(k_endog_monthly, 'k_endog_monthly')
self.k_endog_Q = endog.shape[1] - self.k_endog_M
# Compute helper for handling factors / state indexing
s = self._s = DynamicFactorMQStates(
self.k_endog_M, self.k_endog_Q, endog_names, factors,
factor_orders, factor_multiplicities, idiosyncratic_ar1)
# Save parameterization
self.factors = factors
self.factor_orders = factor_orders
self.factor_multiplicities = factor_multiplicities
self.endog_factor_map = self._s.endog_factor_map
self.factor_block_orders = self._s.factor_block_orders
self.factor_names = self._s.factor_names
self.k_factors = self._s.k_factors
self.k_factor_blocks = len(self.factor_block_orders)
self.max_factor_order = self._s.max_factor_order
self.idiosyncratic_ar1 = idiosyncratic_ar1
self.init_t0 = init_t0
self.obs_cov_diag = obs_cov_diag
if self.init_t0:
# TODO: test each of these options
if endog_is_pandas:
ix = pd.period_range(endog.index[0] - 1, endog.index[-1],
freq=endog.index.freq)
endog = endog.reindex(ix)
else:
endog = np.c_[[np.nan] * endog.shape[1], endog.T].T
# Standardize endog, if requested
# Note: endog_mean and endog_std will always each be 1-dimensional with
# length equal to the number of endog variables
if isinstance(standardize, tuple) and len(standardize) == 2:
endog_mean, endog_std = standardize
# Validate the input
n = endog.shape[1]
if (isinstance(endog_mean, pd.Series) and not
endog_mean.index.equals(pd.Index(endog_names))):
raise ValueError('Invalid value passed for `standardize`:'
' if a Pandas Series, must have index'
f' {endog_names}. Got {endog_mean.index}.')
else:
endog_mean = np.atleast_1d(endog_mean)
if (isinstance(endog_std, pd.Series) and not
endog_std.index.equals(pd.Index(endog_names))):
raise ValueError('Invalid value passed for `standardize`:'
' if a Pandas Series, must have index'
f' {endog_names}. Got {endog_std.index}.')
else:
endog_std = np.atleast_1d(endog_std)
if (np.shape(endog_mean) != (n,) or np.shape(endog_std) != (n,)):
raise ValueError('Invalid value passed for `standardize`: each'
f' element must be shaped ({n},).')
standardize = True
# Make sure we have Pandas if endog is Pandas
if endog_is_pandas:
endog_mean = pd.Series(endog_mean, index=endog_names)
endog_std = pd.Series(endog_std, index=endog_names)
elif standardize in [1, True]:
endog_mean = endog.mean(axis=0)
endog_std = endog.std(axis=0)
elif standardize in [0, False]:
endog_mean = np.zeros(endog.shape[1])
endog_std = np.ones(endog.shape[1])
else:
raise ValueError('Invalid value passed for `standardize`.')
self._endog_mean = endog_mean
self._endog_std = endog_std
self.standardize = standardize
if np.any(self._endog_std < 1e-10):
ix = np.where(self._endog_std < 1e-10)
names = np.array(endog_names)[ix[0]].tolist()
raise ValueError('Constant variable(s) found in observed'
' variables, but constants cannot be included'
f' in this model. These variables are: {names}.')
if self.standardize:
endog = (endog - self._endog_mean) / self._endog_std
# Observation / states slices
o = self._o = {
'M': np.s_[:self.k_endog_M],
'Q': np.s_[self.k_endog_M:]}
# Construct the basic state space representation
super().__init__(endog, k_states=s.k_states, k_posdef=s.k_posdef,
**kwargs)
# Revert the standardization for orig_endog
if self.standardize:
self.data.orig_endog = (
self.data.orig_endog * self._endog_std + self._endog_mean)
# State initialization
# Note: we could just initialize the entire thing as stationary, but
# doing each block separately should be faster and avoid numerical
# issues
if 'initialization' not in kwargs:
self.ssm.initialize(self._default_initialization())
# Fixed components of the state space representation
# > design
if self.idiosyncratic_ar1:
self['design', o['M'], s['idio_ar_M']] = np.eye(self.k_endog_M)
multipliers = [1, 2, 3, 2, 1]
for i in range(len(multipliers)):
m = multipliers[i]
self['design', o['Q'], s['idio_ar_Q_ix'][:, i]] = (
m * np.eye(self.k_endog_Q))
# > obs cov
if self.obs_cov_diag:
self['obs_cov'] = np.eye(self.k_endog) * 1e-4
# > transition
for block in s.factor_blocks:
if block.k_factors == 1:
tmp = 0
else:
tmp = np.zeros((block.k_factors, block.k_factors))
self['transition', block['factors'], block['factors']] = (
companion_matrix([1] + [tmp] * block._factor_order).T)
if self.k_endog_Q == 1:
tmp = 0
else:
tmp = np.zeros((self.k_endog_Q, self.k_endog_Q))
self['transition', s['idio_ar_Q'], s['idio_ar_Q']] = (
companion_matrix([1] + [tmp] * 5).T)
# > selection
ix1 = ix2 = 0
for block in s.factor_blocks:
ix2 += block.k_factors
self['selection', block['factors_ix'][:, 0], ix1:ix2] = (
np.eye(block.k_factors))
ix1 = ix2
if self.idiosyncratic_ar1:
ix2 = ix1 + self.k_endog_M
self['selection', s['idio_ar_M'], ix1:ix2] = np.eye(self.k_endog_M)
ix1 = ix2
ix2 = ix1 + self.k_endog_Q
self['selection', s['idio_ar_Q_ix'][:, 0], ix1:ix2] = (
np.eye(self.k_endog_Q))
# Parameters
self.params = OrderedDict([
('loadings', np.sum(self.endog_factor_map.values)),
('factor_ar', np.sum([block.k_factors**2 * block.factor_order
for block in s.factor_blocks])),
('factor_cov', np.sum([block.k_factors * (block.k_factors + 1) // 2
for block in s.factor_blocks])),
('idiosyncratic_ar1',
self.k_endog if self.idiosyncratic_ar1 else 0),
('idiosyncratic_var', self.k_endog)])
self.k_params = np.sum(list(self.params.values()))
# Parameter slices
ix = np.split(np.arange(self.k_params),
np.cumsum(list(self.params.values()))[:-1])
self._p = dict(zip(self.params.keys(), ix))
# Cache
self._loading_constraints = {}
# Initialization kwarg keys, e.g. for cloning
self._init_keys += [
'factors', 'factor_orders', 'factor_multiplicities',
'idiosyncratic_ar1', 'standardize', 'init_t0',
'obs_cov_diag'] + list(kwargs.keys())
@classmethod
def construct_endog(cls, endog_monthly, endog_quarterly):
"""
Construct a combined dataset from separate monthly and quarterly data.
Parameters
----------
endog_monthly : array_like
Monthly dataset. If a quarterly dataset is given, then this must
be a Pandas object with a PeriodIndex or DatetimeIndex at a monthly
frequency.
endog_quarterly : array_like or None
Quarterly dataset. If not None, then this must be a Pandas object
with a PeriodIndex or DatetimeIndex at a quarterly frequency.
Returns
-------
endog : array_like
If both endog_monthly and endog_quarterly were given, this is a
Pandas DataFrame with a PeriodIndex at the monthly frequency, with
all of the columns from `endog_monthly` ordered first and the
columns from `endog_quarterly` ordered afterwards. Otherwise it is
simply the input `endog_monthly` dataset.
k_endog_monthly : int
The number of monthly variables (which are ordered first) in the
returned `endog` dataset.
"""
# Create combined dataset
if endog_quarterly is not None:
# Validate endog_monthly
base_msg = ('If given both monthly and quarterly data'
' then the monthly dataset must be a Pandas'
' object with a date index at a monthly frequency.')
if not isinstance(endog_monthly, (pd.Series, pd.DataFrame)):
raise ValueError('Given monthly dataset is not a'
' Pandas object. ' + base_msg)
elif endog_monthly.index.inferred_type not in ("datetime64",
"period"):
raise ValueError('Given monthly dataset has an'
' index with non-date values. ' + base_msg)
elif not getattr(endog_monthly.index, 'freqstr', 'N')[0] == 'M':
freqstr = getattr(endog_monthly.index, 'freqstr', 'None')
raise ValueError('Index of given monthly dataset has a'
' non-monthly frequency (to check this,'
' examine the `freqstr` attribute of the'
' index of the dataset - it should start with'
' M if it is monthly).'
f' Got {freqstr}. ' + base_msg)
# Validate endog_quarterly
base_msg = ('If a quarterly dataset is given, then it must be a'
' Pandas object with a date index at a quarterly'
' frequency.')
if not isinstance(endog_quarterly, (pd.Series, pd.DataFrame)):
raise ValueError('Given quarterly dataset is not a'
' Pandas object. ' + base_msg)
elif endog_quarterly.index.inferred_type not in ("datetime64",
"period"):
raise ValueError('Given quarterly dataset has an'
' index with non-date values. ' + base_msg)
elif not getattr(endog_quarterly.index, 'freqstr', 'N')[0] == 'Q':
freqstr = getattr(endog_quarterly.index, 'freqstr', 'None')
raise ValueError('Index of given quarterly dataset'
' has a non-quarterly frequency (to check'
' this, examine the `freqstr` attribute of'
' the index of the dataset - it should start'
' with Q if it is quarterly).'
f' Got {freqstr}. ' + base_msg)
# Convert to PeriodIndex, if applicable
if hasattr(endog_monthly.index, 'to_period'):
endog_monthly = endog_monthly.to_period('M')
if hasattr(endog_quarterly.index, 'to_period'):
endog_quarterly = endog_quarterly.to_period('Q')
# Combine the datasets
endog = pd.concat([
endog_monthly,
endog_quarterly.resample('M', convention='end').first()],
axis=1)
# Make sure we didn't accidentally get duplicate column names
column_counts = endog.columns.value_counts()
if column_counts.max() > 1:
columns = endog.columns.values.astype(object)
for name in column_counts.index:
count = column_counts.loc[name]
if count == 1:
continue
mask = columns == name
columns[mask] = [f'{name}{i + 1}' for i in range(count)]
endog.columns = columns
else:
endog = endog_monthly.copy()
shape = endog_monthly.shape
k_endog_monthly = shape[1] if len(shape) == 2 else 1
return endog, k_endog_monthly
def clone(self, endog, k_endog_monthly=None, endog_quarterly=None,
retain_standardization=False, **kwargs):
"""
Clone state space model with new data and optionally new specification.
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
k_endog_monthly : int, optional
If specifying a monthly/quarterly mixed frequency model in which
the provided `endog` dataset contains both the monthly and
quarterly data, this variable should be used to indicate how many
of the variables are monthly.
endog_quarterly : array_like, optional
Observations of quarterly variables. If provided, must be a
Pandas Series or DataFrame with a DatetimeIndex or PeriodIndex at
the quarterly frequency.
kwargs
Keyword arguments to pass to the new model class to change the
model specification.
Returns
-------
model : DynamicFactorMQ instance
"""
if retain_standardization and self.standardize:
kwargs['standardize'] = (self._endog_mean, self._endog_std)
mod = self._clone_from_init_kwds(
endog, k_endog_monthly=k_endog_monthly,
endog_quarterly=endog_quarterly, **kwargs)
return mod
@property
def _res_classes(self):
return {'fit': (DynamicFactorMQResults, mlemodel.MLEResultsWrapper)}
def _default_initialization(self):
s = self._s
init = initialization.Initialization(self.k_states)
for block in s.factor_blocks:
init.set(block['factors'], 'stationary')
if self.idiosyncratic_ar1:
for i in range(s['idio_ar_M'].start, s['idio_ar_M'].stop):
init.set(i, 'stationary')
init.set(s['idio_ar_Q'], 'stationary')
return init
def _get_endog_names(self, truncate=None, as_string=None):
if truncate is None:
truncate = False if as_string is False or self.k_endog == 1 else 24
if as_string is False and truncate is not False:
raise ValueError('Can only truncate endog names if they'
' are returned as a string.')
if as_string is None:
as_string = truncate is not False
# The base `endog_names` property is only a list if there are at least
# two variables; often, we need it to be a list
endog_names = self.endog_names
if not isinstance(endog_names, list):
endog_names = [endog_names]
if as_string:
endog_names = [str(name) for name in endog_names]
if truncate is not False:
n = truncate
endog_names = [name if len(name) <= n else name[:n] + '...'
for name in endog_names]
return endog_names
@property
def _model_name(self):
model_name = [
'Dynamic Factor Model',
f'{self.k_factors} factors in {self.k_factor_blocks} blocks']
if self.k_endog_Q > 0:
model_name.append('Mixed frequency (M/Q)')
error_type = 'AR(1)' if self.idiosyncratic_ar1 else 'iid'
model_name.append(f'{error_type} idiosyncratic')
return model_name
def summary(self, truncate_endog_names=None):
"""
Create a summary table describing the model.
Parameters
----------
truncate_endog_names : int, optional
The number of characters to show for names of observed variables.
Default is 24 if there is more than one observed variable, or
an unlimited number of there is only one.
"""
# Get endog names
endog_names = self._get_endog_names(truncate=truncate_endog_names,
as_string=True)
title = 'Model Specification: Dynamic Factor Model'
if self._index_dates:
ix = self._index
d = ix[0]
sample = ['%s' % d]
d = ix[-1]
sample += ['- ' + '%s' % d]
else:
sample = [str(0), ' - ' + str(self.nobs)]
# Standardize the model name as a list of str
model_name = self._model_name
# - Top summary table ------------------------------------------------
top_left = []
top_left.append(('Model:', [model_name[0]]))
for i in range(1, len(model_name)):
top_left.append(('', ['+ ' + model_name[i]]))
top_left += [
('Sample:', [sample[0]]),
('', [sample[1]])]
top_right = []
if self.k_endog_Q > 0:
top_right += [
('# of monthly variables:', [self.k_endog_M]),
('# of quarterly variables:', [self.k_endog_Q])]
else:
top_right += [('# of observed variables:', [self.k_endog])]
if self.k_factor_blocks == 1:
top_right += [('# of factors:', [self.k_factors])]
else:
top_right += [('# of factor blocks:', [self.k_factor_blocks])]
top_right += [('Idiosyncratic disturbances:',
['AR(1)' if self.idiosyncratic_ar1 else 'iid']),
('Standardize variables:', [self.standardize])]
summary = Summary()
self.model = self
summary.add_table_2cols(self, gleft=top_left, gright=top_right,
title=title)
table_ix = 1
del self.model
# - Endog / factor map -----------------------------------------------
data = self.endog_factor_map.replace({True: 'X', False: ''})
data.index = endog_names
try:
items = data.items()
except AttributeError:
# Remove after pandas 1.5 is minimum
items = data.iteritems()
for name, col in items:
data[name] = data[name] + (' ' * (len(name) // 2))
data.index.name = 'Dep. variable'
data = data.reset_index()
params_data = data.values
params_header = data.columns.map(str).tolist()
params_stubs = None
title = 'Observed variables / factor loadings'
table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
summary.tables.insert(table_ix, table)
table_ix += 1
# - Factor blocks summary table --------------------------------------
data = self.factor_block_orders.reset_index()
data['block'] = data['block'].map(
lambda factor_names: ', '.join(factor_names))
data[['order']] = (
data[['order']].applymap(str))
params_data = data.values
params_header = data.columns.map(str).tolist()
params_stubs = None
title = 'Factor blocks:'
table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
summary.tables.insert(table_ix, table)
table_ix += 1
return summary
def __str__(self):
"""Summary tables showing model specification."""
return str(self.summary())
@property
def state_names(self):
"""(list of str) List of human readable names for unobserved states."""
# Factors
state_names = []
for block in self._s.factor_blocks:
state_names += [f'{name}' for name in block.factor_names[:]]
for s in range(1, block._factor_order):
state_names += [f'L{s}.{name}'
for name in block.factor_names]
# Monthly error
endog_names = self._get_endog_names()
if self.idiosyncratic_ar1:
endog_names_M = endog_names[self._o['M']]
state_names += [f'eps_M.{name}' for name in endog_names_M]
endog_names_Q = endog_names[self._o['Q']]
# Quarterly error
state_names += [f'eps_Q.{name}' for name in endog_names_Q]
for s in range(1, 5):
state_names += [f'L{s}.eps_Q.{name}' for name in endog_names_Q]
return state_names
@property
def param_names(self):
"""(list of str) List of human readable parameter names."""
param_names = []
# Loadings
# So that Lambda = params[ix].reshape(self.k_endog, self.k_factors)
# (where Lambda stacks Lambda_M and Lambda_Q)
endog_names = self._get_endog_names(as_string=False)
for endog_name in endog_names:
for block in self._s.factor_blocks:
for factor_name in block.factor_names:
if self.endog_factor_map.loc[endog_name, factor_name]:
param_names.append(
f'loading.{factor_name}->{endog_name}')
# Factor VAR
for block in self._s.factor_blocks:
for to_factor in block.factor_names:
param_names += [f'L{i}.{from_factor}->{to_factor}'
for i in range(1, block.factor_order + 1)
for from_factor in block.factor_names]
# Factor covariance
for i in range(len(self._s.factor_blocks)):
block = self._s.factor_blocks[i]
param_names += [f'fb({i}).cov.chol[{j + 1},{k + 1}]'
for j in range(block.k_factors)
for k in range(j + 1)]
# Error AR(1)
if self.idiosyncratic_ar1:
endog_names_M = endog_names[self._o['M']]
param_names += [f'L1.eps_M.{name}' for name in endog_names_M]
endog_names_Q = endog_names[self._o['Q']]
param_names += [f'L1.eps_Q.{name}' for name in endog_names_Q]
# Error innovation variances
param_names += [f'sigma2.{name}' for name in endog_names]
return param_names
@property
def start_params(self):
"""(array) Starting parameters for maximum likelihood estimation."""
params = np.zeros(self.k_params, dtype=np.float64)
# (1) estimate factors one at a time, where the first step uses
# PCA on all `endog` variables that load on the first factor, and
# subsequent steps use residuals from the previous steps.
# TODO: what about factors that only load on quarterly variables?
endog_factor_map_M = self.endog_factor_map.iloc[:self.k_endog_M]
factors = []
endog = (pd.DataFrame(self.endog).interpolate()
.fillna(method='backfill')
.values)
for name in self.factor_names:
# Try to retrieve this from monthly variables, which is most
# consistent
endog_ix = np.where(endog_factor_map_M.loc[:, name])[0]
# But fall back to quarterly if necessary
if len(endog_ix) == 0:
endog_ix = np.where(self.endog_factor_map.loc[:, name])[0]
factor_endog = endog[:, endog_ix]
res_pca = PCA(factor_endog, ncomp=1, method='eig', normalize=False)
factors.append(res_pca.factors)
endog[:, endog_ix] -= res_pca.projection
factors = np.concatenate(factors, axis=1)
# (2) Estimate coefficients for each endog, one at a time (OLS for
# monthly variables, restricted OLS for quarterly). Also, compute
# residuals.
loadings = []
resid = []
for i in range(self.k_endog_M):
factor_ix = self._s.endog_factor_iloc[i]
factor_exog = factors[:, factor_ix]
mod_ols = OLS(self.endog[:, i], exog=factor_exog, missing='drop')
res_ols = mod_ols.fit()
loadings += res_ols.params.tolist()
resid.append(res_ols.resid)
for i in range(self.k_endog_M, self.k_endog):
factor_ix = self._s.endog_factor_iloc[i]
factor_exog = lagmat(factors[:, factor_ix], 4, original='in')
mod_glm = GLM(self.endog[:, i], factor_exog, missing='drop')
res_glm = mod_glm.fit_constrained(self.loading_constraints(i))
loadings += res_glm.params[:len(factor_ix)].tolist()
resid.append(res_glm.resid_response)
params[self._p['loadings']] = loadings
# (3) For each factor block, use an AR or VAR model to get coefficients
# and covariance estimate
# Factor transitions
stationary = True
factor_ar = []
factor_cov = []
i = 0
for block in self._s.factor_blocks:
factors_endog = factors[:, i:i + block.k_factors]
i += block.k_factors
if block.factor_order == 0:
continue
if block.k_factors == 1:
mod_factors = SARIMAX(factors_endog,
order=(block.factor_order, 0, 0))
sp = mod_factors.start_params
block_factor_ar = sp[:-1]
block_factor_cov = sp[-1:]
coefficient_matrices = mod_factors.start_params[:-1]
elif block.k_factors > 1:
mod_factors = VAR(factors_endog)
res_factors = mod_factors.fit(
maxlags=block.factor_order, ic=None, trend='n')
block_factor_ar = res_factors.params.T.ravel()
L = np.linalg.cholesky(res_factors.sigma_u)
block_factor_cov = L[np.tril_indices_from(L)]
coefficient_matrices = np.transpose(
np.reshape(block_factor_ar,
(block.k_factors, block.k_factors,
block.factor_order)), (2, 0, 1))
# Test for stationarity
stationary = is_invertible([1] + list(-coefficient_matrices))
# Check for stationarity
if not stationary:
warn('Non-stationary starting factor autoregressive'
' parameters found for factor block'
f' {block.factor_names}. Using zeros as starting'
' parameters.')
block_factor_ar[:] = 0
cov_factor = np.diag(factors_endog.std(axis=0))
block_factor_cov = (
cov_factor[np.tril_indices(block.k_factors)])
factor_ar += block_factor_ar.tolist()
factor_cov += block_factor_cov.tolist()
params[self._p['factor_ar']] = factor_ar
params[self._p['factor_cov']] = factor_cov
# (4) Use residuals from step (2) to estimate the idiosyncratic
# component
# Idiosyncratic component
if self.idiosyncratic_ar1:
idio_ar1 = []
idio_var = []
for i in range(self.k_endog_M):
mod_idio = SARIMAX(resid[i], order=(1, 0, 0), trend='c')
sp = mod_idio.start_params
idio_ar1.append(np.clip(sp[1], -0.99, 0.99))
idio_var.append(np.clip(sp[-1], 1e-5, np.inf))
for i in range(self.k_endog_M, self.k_endog):
y = self.endog[:, i].copy()
y[~np.isnan(y)] = resid[i]
mod_idio = QuarterlyAR1(y)
res_idio = mod_idio.fit(maxiter=10, return_params=True,
disp=False)
res_idio = mod_idio.fit_em(res_idio, maxiter=5,
return_params=True)
idio_ar1.append(np.clip(res_idio[0], -0.99, 0.99))
idio_var.append(np.clip(res_idio[1], 1e-5, np.inf))
params[self._p['idiosyncratic_ar1']] = idio_ar1
params[self._p['idiosyncratic_var']] = idio_var
else:
idio_var = [np.var(resid[i]) for i in range(self.k_endog_M)]
for i in range(self.k_endog_M, self.k_endog):
y = self.endog[:, i].copy()
y[~np.isnan(y)] = resid[i]
mod_idio = QuarterlyAR1(y)
res_idio = mod_idio.fit(return_params=True, disp=False)
idio_var.append(np.clip(res_idio[1], 1e-5, np.inf))
params[self._p['idiosyncratic_var']] = idio_var
return params
def transform_params(self, unconstrained):
"""
Transform parameters from optimizer space to model space.
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation.
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evaluation.
"""
constrained = unconstrained.copy()
# Stationary factor VAR
unconstrained_factor_ar = unconstrained[self._p['factor_ar']]
constrained_factor_ar = []
i = 0
for block in self._s.factor_blocks:
length = block.k_factors**2 * block.factor_order
tmp_coeff = np.reshape(
unconstrained_factor_ar[i:i + length],
(block.k_factors, block.k_factors * block.factor_order))
tmp_cov = np.eye(block.k_factors)
tmp_coeff, _ = constrain_stationary_multivariate(tmp_coeff,
tmp_cov)
constrained_factor_ar += tmp_coeff.ravel().tolist()
i += length
constrained[self._p['factor_ar']] = constrained_factor_ar
# Stationary idiosyncratic AR(1)
if self.idiosyncratic_ar1:
idio_ar1 = unconstrained[self._p['idiosyncratic_ar1']]
constrained[self._p['idiosyncratic_ar1']] = [
constrain_stationary_univariate(idio_ar1[i:i + 1])[0]
for i in range(self.k_endog)]
# Positive idiosyncratic variances
constrained[self._p['idiosyncratic_var']] = (
constrained[self._p['idiosyncratic_var']]**2)
return constrained
def untransform_params(self, constrained):
"""
Transform parameters from model space to optimizer space.
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer.
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evaluation, to
be transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
"""
unconstrained = constrained.copy()
# Stationary factor VAR
constrained_factor_ar = constrained[self._p['factor_ar']]
unconstrained_factor_ar = []
i = 0
for block in self._s.factor_blocks:
length = block.k_factors**2 * block.factor_order
tmp_coeff = np.reshape(
constrained_factor_ar[i:i + length],
(block.k_factors, block.k_factors * block.factor_order))
tmp_cov = np.eye(block.k_factors)
tmp_coeff, _ = unconstrain_stationary_multivariate(tmp_coeff,
tmp_cov)
unconstrained_factor_ar += tmp_coeff.ravel().tolist()
i += length
unconstrained[self._p['factor_ar']] = unconstrained_factor_ar
# Stationary idiosyncratic AR(1)
if self.idiosyncratic_ar1:
idio_ar1 = constrained[self._p['idiosyncratic_ar1']]
unconstrained[self._p['idiosyncratic_ar1']] = [
unconstrain_stationary_univariate(idio_ar1[i:i + 1])[0]
for i in range(self.k_endog)]
# Positive idiosyncratic variances
unconstrained[self._p['idiosyncratic_var']] = (
unconstrained[self._p['idiosyncratic_var']]**0.5)
return unconstrained
def update(self, params, **kwargs):
"""
Update the parameters of the model.
Parameters
----------
params : array_like
Array of new parameters.
transformed : bool, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True.
"""
params = super().update(params, **kwargs)
# Local copies
o = self._o
s = self._s
p = self._p
# Loadings
loadings = params[p['loadings']]
start = 0
for i in range(self.k_endog_M):
iloc = self._s.endog_factor_iloc[i]
k_factors = len(iloc)
factor_ix = s['factors_L1'][iloc]
self['design', i, factor_ix] = loadings[start:start + k_factors]
start += k_factors
multipliers = np.array([1, 2, 3, 2, 1])[:, None]
for i in range(self.k_endog_M, self.k_endog):
iloc = self._s.endog_factor_iloc[i]
k_factors = len(iloc)
factor_ix = s['factors_L1_5_ix'][:, iloc]
self['design', i, factor_ix.ravel()] = np.ravel(
loadings[start:start + k_factors] * multipliers)
start += k_factors
# Factor VAR
factor_ar = params[p['factor_ar']]
start = 0
for block in s.factor_blocks:
k_params = block.k_factors**2 * block.factor_order
A = np.reshape(
factor_ar[start:start + k_params],
(block.k_factors, block.k_factors * block.factor_order))
start += k_params
self['transition', block['factors_L1'], block['factors_ar']] = A
# Factor covariance
factor_cov = params[p['factor_cov']]
start = 0
ix1 = 0
for block in s.factor_blocks:
k_params = block.k_factors * (block.k_factors + 1) // 2
L = np.zeros((block.k_factors, block.k_factors),
dtype=params.dtype)
L[np.tril_indices_from(L)] = factor_cov[start:start + k_params]
start += k_params
Q = L @ L.T
ix2 = ix1 + block.k_factors
self['state_cov', ix1:ix2, ix1:ix2] = Q
ix1 = ix2
# Error AR(1)
if self.idiosyncratic_ar1:
alpha = np.diag(params[p['idiosyncratic_ar1']])
self['transition', s['idio_ar_L1'], s['idio_ar_L1']] = alpha
# Error variances
if self.idiosyncratic_ar1:
self['state_cov', self.k_factors:, self.k_factors:] = (
np.diag(params[p['idiosyncratic_var']]))
else:
idio_var = params[p['idiosyncratic_var']]
self['obs_cov', o['M'], o['M']] = np.diag(idio_var[o['M']])
self['state_cov', self.k_factors:, self.k_factors:] = (
np.diag(idio_var[o['Q']]))
@property
def loglike_constant(self):
"""
Constant term in the joint log-likelihood function.
Useful in facilitating comparisons to other packages that exclude the
constant from the log-likelihood computation.
"""
return -0.5 * (1 - np.isnan(self.endog)).sum() * np.log(2 * np.pi)
def loading_constraints(self, i):
r"""
Matrix formulation of quarterly variables' factor loading constraints.
Parameters
----------
i : int
Index of the `endog` variable to compute constraints for.
Returns
-------
R : array (k_constraints, k_factors * 5)
q : array (k_constraints,)
Notes
-----
If the factors were known, then the factor loadings for the ith
quarterly variable would be computed by a linear regression of the form
y_i = A_i' f + B_i' L1.f + C_i' L2.f + D_i' L3.f + E_i' L4.f
where:
- f is (k_i x 1) and collects all of the factors that load on y_i
- L{j}.f is (k_i x 1) and collects the jth lag of each factor
- A_i, ..., E_i are (k_i x 1) and collect factor loadings
As the observed variable is quarterly while the factors are monthly, we
want to restrict the estimated regression coefficients to be:
y_i = A_i f + 2 A_i L1.f + 3 A_i L2.f + 2 A_i L3.f + A_i L4.f
Stack the unconstrained coefficients: \Lambda_i = [A_i' B_i' ... E_i']'
Then the constraints can be written as follows, for l = 1, ..., k_i
- 2 A_{i,l} - B_{i,l} = 0
- 3 A_{i,l} - C_{i,l} = 0
- 2 A_{i,l} - D_{i,l} = 0
- A_{i,l} - E_{i,l} = 0
So that k_constraints = 4 * k_i. In matrix form the constraints are:
.. math::
R \Lambda_i = q
where :math:`\Lambda_i` is shaped `(k_i * 5,)`, :math:`R` is shaped
`(k_constraints, k_i * 5)`, and :math:`q` is shaped `(k_constraints,)`.
For example, for the case that k_i = 2, we can write:
| 2 0 -1 0 0 0 0 0 0 0 | | A_{i,1} | | 0 |
| 0 2 0 -1 0 0 0 0 0 0 | | A_{i,2} | | 0 |
| 3 0 0 0 -1 0 0 0 0 0 | | B_{i,1} | | 0 |
| 0 3 0 0 0 -1 0 0 0 0 | | B_{i,2} | | 0 |
| 2 0 0 0 0 0 -1 0 0 0 | | C_{i,1} | = | 0 |
| 0 2 0 0 0 0 0 -1 0 0 | | C_{i,2} | | 0 |
| 1 0 0 0 0 0 0 0 -1 0 | | D_{i,1} | | 0 |
| 0 1 0 0 0 0 0 0 0 -1 | | D_{i,2} | | 0 |
| E_{i,1} | | 0 |
| E_{i,2} | | 0 |
"""
if i < self.k_endog_M:
raise ValueError('No constraints for monthly variables.')
if i not in self._loading_constraints:
k_factors = self.endog_factor_map.iloc[i].sum()
R = np.zeros((k_factors * 4, k_factors * 5))
q = np.zeros(R.shape[0])
# Let R = [R_1 R_2]
# Then R_1 is multiples of the identity matrix
multipliers = np.array([1, 2, 3, 2, 1])
R[:, :k_factors] = np.reshape(
(multipliers[1:] * np.eye(k_factors)[..., None]).T,
(k_factors * 4, k_factors))
# And R_2 is the identity
R[:, k_factors:] = np.diag([-1] * (k_factors * 4))
self._loading_constraints[i] = (R, q)
return self._loading_constraints[i]
def fit(self, start_params=None, transformed=True, includes_fixed=False,
cov_type='none', cov_kwds=None, method='em', maxiter=500,
tolerance=1e-6, em_initialization=True, mstep_method=None,
full_output=1, disp=False, callback=None, return_params=False,
optim_score=None, optim_complex_step=None, optim_hessian=None,
flags=None, low_memory=False, llf_decrease_action='revert',
llf_decrease_tolerance=1e-4, **kwargs):
"""
Fits the model by maximum likelihood via Kalman filter.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
transformed : bool, optional
Whether or not `start_params` is already transformed. Default is
True.
includes_fixed : bool, optional
If parameters were previously fixed with the `fix_params` method,
this argument describes whether or not `start_params` also includes
the fixed parameters, in addition to the free parameters. Default
is False.
cov_type : str, optional
The `cov_type` keyword governs the method for calculating the
covariance matrix of parameter estimates. Can be one of:
- 'opg' for the outer product of gradient estimator
- 'oim' for the observed information matrix estimator, calculated
using the method of Harvey (1989)
- 'approx' for the observed information matrix estimator,
calculated using a numerical approximation of the Hessian matrix.
- 'robust' for an approximate (quasi-maximum likelihood) covariance
matrix that may be valid even in the presence of some
misspecifications. Intermediate calculations use the 'oim'
method.
- 'robust_approx' is the same as 'robust' except that the
intermediate calculations use the 'approx' method.
- 'none' for no covariance matrix calculation.
Default is 'none', since computing this matrix can be very slow
when there are a large number of parameters.
cov_kwds : dict or None, optional
A dictionary of arguments affecting covariance matrix computation.
**opg, oim, approx, robust, robust_approx**
- 'approx_complex_step' : bool, optional - If True, numerical
approximations are computed using complex-step methods. If False,
numerical approximations are computed using finite difference
methods. Default is True.
- 'approx_centered' : bool, optional - If True, numerical
approximations computed using finite difference methods use a
centered approximation. Default is False.
method : str, optional
The `method` determines which solver from `scipy.optimize`
is used, and it can be chosen from among the following strings:
- 'em' for the EM algorithm
- 'newton' for Newton-Raphson
- 'nm' for Nelder-Mead
- 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
- 'lbfgs' for limited-memory BFGS with optional box constraints
- 'powell' for modified Powell's method
- 'cg' for conjugate gradient
- 'ncg' for Newton-conjugate gradient
- 'basinhopping' for global basin-hopping solver
The explicit arguments in `fit` are passed to the solver,
with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports.
maxiter : int, optional
The maximum number of iterations to perform.
tolerance : float, optional
Tolerance to use for convergence checking when using the EM
algorithm. To set the tolerance for other methods, pass
the optimizer-specific keyword argument(s).
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool, optional
Set to True to print convergence messages.
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
optim_score : {'harvey', 'approx'} or None, optional
The method by which the score vector is calculated. 'harvey' uses
the method from Harvey (1989), 'approx' uses either finite
difference or complex step differentiation depending upon the
value of `optim_complex_step`, and None uses the built-in gradient
approximation of the optimizer. Default is None. This keyword is
only relevant if the optimization method uses the score.
optim_complex_step : bool, optional
Whether or not to use complex step differentiation when
approximating the score; if False, finite difference approximation
is used. Default is True. This keyword is only relevant if
`optim_score` is set to 'harvey' or 'approx'.
optim_hessian : {'opg','oim','approx'}, optional
The method by which the Hessian is numerically approximated. 'opg'
uses outer product of gradients, 'oim' uses the information
matrix formula from Harvey (1989), and 'approx' uses numerical
approximation. This keyword is only relevant if the
optimization method uses the Hessian matrix.
low_memory : bool, optional
If set to True, techniques are applied to substantially reduce
memory usage. If used, some features of the results object will
not be available (including smoothed results and in-sample
prediction), although out-of-sample forecasting is possible.
Note that this option is not available when using the EM algorithm
(which is the default for this model). Default is False.
llf_decrease_action : {'ignore', 'warn', 'revert'}, optional
Action to take if the log-likelihood decreases in an EM iteration.
'ignore' continues the iterations, 'warn' issues a warning but
continues the iterations, while 'revert' ends the iterations and
returns the result from the last good iteration. Default is 'warn'.
llf_decrease_tolerance : float, optional
Minimum size of the log-likelihood decrease required to trigger a
warning or to end the EM iterations. Setting this value slightly
larger than zero allows small decreases in the log-likelihood that
may be caused by numerical issues. If set to zero, then any
decrease will trigger the `llf_decrease_action`. Default is 1e-4.
**kwargs
Additional keyword arguments to pass to the optimizer.
Returns
-------
MLEResults
See Also
--------
statsmodels.base.model.LikelihoodModel.fit
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
if method == 'em':
return self.fit_em(
start_params=start_params, transformed=transformed,
cov_type=cov_type, cov_kwds=cov_kwds, maxiter=maxiter,
tolerance=tolerance, em_initialization=em_initialization,
mstep_method=mstep_method, full_output=full_output, disp=disp,
return_params=return_params, low_memory=low_memory,
llf_decrease_action=llf_decrease_action,
llf_decrease_tolerance=llf_decrease_tolerance, **kwargs)
else:
return super().fit(
start_params=start_params, transformed=transformed,
includes_fixed=includes_fixed, cov_type=cov_type,
cov_kwds=cov_kwds, method=method, maxiter=maxiter,
full_output=full_output, disp=disp,
callback=callback, return_params=return_params,
optim_score=optim_score,
optim_complex_step=optim_complex_step,
optim_hessian=optim_hessian, flags=flags,
low_memory=low_memory, **kwargs)
def fit_em(self, start_params=None, transformed=True, cov_type='none',
cov_kwds=None, maxiter=500, tolerance=1e-6, disp=False,
em_initialization=True, mstep_method=None, full_output=True,
return_params=False, low_memory=False,
llf_decrease_action='revert', llf_decrease_tolerance=1e-4):
"""
Fits the model by maximum likelihood via the EM algorithm.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is to use `DynamicFactorMQ.start_params`.
transformed : bool, optional
Whether or not `start_params` is already transformed. Default is
True.
cov_type : str, optional
The `cov_type` keyword governs the method for calculating the
covariance matrix of parameter estimates. Can be one of:
- 'opg' for the outer product of gradient estimator
- 'oim' for the observed information matrix estimator, calculated
using the method of Harvey (1989)
- 'approx' for the observed information matrix estimator,
calculated using a numerical approximation of the Hessian matrix.
- 'robust' for an approximate (quasi-maximum likelihood) covariance
matrix that may be valid even in the presence of some
misspecifications. Intermediate calculations use the 'oim'
method.
- 'robust_approx' is the same as 'robust' except that the
intermediate calculations use the 'approx' method.
- 'none' for no covariance matrix calculation.
Default is 'none', since computing this matrix can be very slow
when there are a large number of parameters.
cov_kwds : dict or None, optional
A dictionary of arguments affecting covariance matrix computation.
**opg, oim, approx, robust, robust_approx**
- 'approx_complex_step' : bool, optional - If True, numerical
approximations are computed using complex-step methods. If False,
numerical approximations are computed using finite difference
methods. Default is True.
- 'approx_centered' : bool, optional - If True, numerical
approximations computed using finite difference methods use a
centered approximation. Default is False.
maxiter : int, optional
The maximum number of EM iterations to perform.
tolerance : float, optional
Parameter governing convergence of the EM algorithm. The
`tolerance` is the minimum relative increase in the likelihood
for which convergence will be declared. A smaller value for the
`tolerance` will typically yield more precise parameter estimates,
but will typically require more EM iterations. Default is 1e-6.
disp : int or bool, optional
Controls printing of EM iteration progress. If an integer, progress
is printed at every `disp` iterations. A value of True is
interpreted as the value of 1. Default is False (nothing will be
printed).
em_initialization : bool, optional
Whether or not to also update the Kalman filter initialization
using the EM algorithm. Default is True.
mstep_method : {None, 'missing', 'nonmissing'}, optional
The EM algorithm maximization step. If there are no NaN values
in the dataset, this can be set to "nonmissing" (which is slightly
faster) or "missing", otherwise it must be "missing". Default is
"nonmissing" if there are no NaN values or "missing" if there are.
full_output : bool, optional
Set to True to have all available output from EM iterations in
the Results object's mle_retvals attribute.
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
low_memory : bool, optional
This option cannot be used with the EM algorithm and will raise an
error if set to True. Default is False.
llf_decrease_action : {'ignore', 'warn', 'revert'}, optional
Action to take if the log-likelihood decreases in an EM iteration.
'ignore' continues the iterations, 'warn' issues a warning but
continues the iterations, while 'revert' ends the iterations and
returns the result from the last good iteration. Default is 'warn'.
llf_decrease_tolerance : float, optional
Minimum size of the log-likelihood decrease required to trigger a
warning or to end the EM iterations. Setting this value slightly
larger than zero allows small decreases in the log-likelihood that
may be caused by numerical issues. If set to zero, then any
decrease will trigger the `llf_decrease_action`. Default is 1e-4.
Returns
-------
DynamicFactorMQResults
See Also
--------
statsmodels.tsa.statespace.mlemodel.MLEModel.fit
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
if self._has_fixed_params:
raise NotImplementedError('Cannot fit using the EM algorithm while'
' holding some parameters fixed.')
if low_memory:
raise ValueError('Cannot fit using the EM algorithm when using'
' low_memory option.')
if start_params is None:
start_params = self.start_params
transformed = True
else:
start_params = np.array(start_params, ndmin=1)
if not transformed:
start_params = self.transform_params(start_params)
llf_decrease_action = string_like(
llf_decrease_action, 'llf_decrease_action',
options=['ignore', 'warn', 'revert'])
disp = int(disp)
# Perform expectation-maximization
s = self._s
llf = []
params = [start_params]
init = None
inits = [self.ssm.initialization]
i = 0
delta = 0
terminate = False
# init_stationary = None if em_initialization else True
while i < maxiter and not terminate and (i < 1 or (delta > tolerance)):
out = self._em_iteration(params[-1], init=init,
mstep_method=mstep_method)
new_llf = out[0].llf_obs.sum()
# If we are not using EM initialization, then we need to check for
# non-stationary parameters
if not em_initialization:
self.update(out[1])
switch_init = []
T = self['transition']
init = self.ssm.initialization
iloc = np.arange(self.k_states)
# We may only have global initialization if we have no
# quarterly variables and idiosyncratic_ar1=False
if self.k_endog_Q == 0 and not self.idiosyncratic_ar1:
block = s.factor_blocks[0]
if init.initialization_type == 'stationary':
Tb = T[block['factors'], block['factors']]
if not np.all(np.linalg.eigvals(Tb) < (1 - 1e-10)):
init.set(block['factors'], 'diffuse')
switch_init.append(
'factor block:'
f' {tuple(block.factor_names)}')
else:
# Factor blocks
for block in s.factor_blocks:
b = tuple(iloc[block['factors']])
init_type = init.blocks[b].initialization_type
if init_type == 'stationary':
Tb = T[block['factors'], block['factors']]
if not np.all(np.linalg.eigvals(Tb) < (1 - 1e-10)):
init.set(block['factors'], 'diffuse')
switch_init.append(
'factor block:'
f' {tuple(block.factor_names)}')
if self.idiosyncratic_ar1:
endog_names = self._get_endog_names(as_string=True)
# Monthly variables
for j in range(s['idio_ar_M'].start, s['idio_ar_M'].stop):
init_type = init.blocks[(j,)].initialization_type
if init_type == 'stationary':
if not np.abs(T[j, j]) < (1 - 1e-10):
init.set(j, 'diffuse')
name = endog_names[j - s['idio_ar_M'].start]
switch_init.append(
'idiosyncratic AR(1) for monthly'
f' variable: {name}')
# Quarterly variables
if self.k_endog_Q > 0:
b = tuple(iloc[s['idio_ar_Q']])
init_type = init.blocks[b].initialization_type
if init_type == 'stationary':
Tb = T[s['idio_ar_Q'], s['idio_ar_Q']]
if not np.all(np.linalg.eigvals(Tb) < (1 - 1e-10)):
init.set(s['idio_ar_Q'], 'diffuse')
switch_init.append(
'idiosyncratic AR(1) for the'
' block of quarterly variables')
if len(switch_init) > 0:
warn('Non-stationary parameters found at EM iteration'
f' {i + 1}, which is not compatible with'
' stationary initialization. Initialization was'
' switched to diffuse for the following: '
f' {switch_init}, and fitting was restarted.')
results = self.fit_em(
start_params=params[-1], transformed=transformed,
cov_type=cov_type, cov_kwds=cov_kwds,
maxiter=maxiter, tolerance=tolerance,
em_initialization=em_initialization,
mstep_method=mstep_method, full_output=full_output,
disp=disp, return_params=return_params,
low_memory=low_memory,
llf_decrease_action=llf_decrease_action,
llf_decrease_tolerance=llf_decrease_tolerance)
self.ssm.initialize(self._default_initialization())
return results
# Check for decrease in the log-likelihood
# Note: allow a little numerical error before declaring a decrease
llf_decrease = (
i > 0 and (new_llf - llf[-1]) < -llf_decrease_tolerance)
if llf_decrease_action == 'revert' and llf_decrease:
warn(f'Log-likelihood decreased at EM iteration {i + 1}.'
f' Reverting to the results from EM iteration {i}'
' (prior to the decrease) and returning the solution.')
# Terminated iteration
i -= 1
terminate = True
else:
if llf_decrease_action == 'warn' and llf_decrease:
warn(f'Log-likelihood decreased at EM iteration {i + 1},'
' which can indicate numerical issues.')
llf.append(new_llf)
params.append(out[1])
if em_initialization:
init = initialization.Initialization(
self.k_states, 'known',
constant=out[0].smoothed_state[..., 0],
stationary_cov=out[0].smoothed_state_cov[..., 0])
inits.append(init)
if i > 0:
delta = (2 * np.abs(llf[-1] - llf[-2]) /
(np.abs(llf[-1]) + np.abs(llf[-2])))
else:
delta = np.inf
# If `disp` is not False, display the first iteration
if disp and i == 0:
print(f'EM start iterations, llf={llf[-1]:.5g}')
# Print output every `disp` observations
elif disp and ((i + 1) % disp) == 0:
print(f'EM iteration {i + 1}, llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}')
# Advance the iteration counter
i += 1
# Check for convergence
not_converged = (i == maxiter and delta > tolerance)
# If no convergence without explicit termination, warn users
if not_converged:
warn(f'EM reached maximum number of iterations ({maxiter}),'
f' without achieving convergence: llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}'
f' (while specified tolerance was {tolerance:.5g})')
# If `disp` is not False, display the final iteration
if disp:
if terminate:
print(f'EM terminated at iteration {i}, llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}'
f' (while specified tolerance was {tolerance:.5g})')
elif not_converged:
print(f'EM reached maximum number of iterations ({maxiter}),'
f' without achieving convergence: llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}'
f' (while specified tolerance was {tolerance:.5g})')
else:
print(f'EM converged at iteration {i}, llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}'
f' < tolerance={tolerance:.5g}')
# Just return the fitted parameters if requested
if return_params:
result = params[-1]
# Otherwise construct the results class if desired
else:
if em_initialization:
base_init = self.ssm.initialization
self.ssm.initialization = init
# Note that because we are using params[-1], we are actually using
# the results from one additional iteration compared to the
# iteration at which we declared convergence.
result = self.smooth(params[-1], transformed=True,
cov_type=cov_type, cov_kwds=cov_kwds)
if em_initialization:
self.ssm.initialization = base_init
# Save the output
if full_output:
llf.append(result.llf)
em_retvals = Bunch(**{'params': np.array(params),
'llf': np.array(llf),
'iter': i,
'inits': inits})
em_settings = Bunch(**{'method': 'em',
'tolerance': tolerance,
'maxiter': maxiter})
else:
em_retvals = None
em_settings = None
result._results.mle_retvals = em_retvals
result._results.mle_settings = em_settings
return result
def _em_iteration(self, params0, init=None, mstep_method=None):
"""EM iteration."""
# (E)xpectation step
res = self._em_expectation_step(params0, init=init)
# (M)aximization step
params1 = self._em_maximization_step(res, params0,
mstep_method=mstep_method)
return res, params1
def _em_expectation_step(self, params0, init=None):
"""EM expectation step."""
# (E)xpectation step
self.update(params0)
# Re-initialize state, if new initialization is given
if init is not None:
base_init = self.ssm.initialization
self.ssm.initialization = init
# Perform smoothing, only saving what is required
res = self.ssm.smooth(
SMOOTHER_STATE | SMOOTHER_STATE_COV | SMOOTHER_STATE_AUTOCOV,
update_filter=False)
res.llf_obs = np.array(
self.ssm._kalman_filter.loglikelihood, copy=True)
# Reset initialization
if init is not None:
self.ssm.initialization = base_init
return res
def _em_maximization_step(self, res, params0, mstep_method=None):
"""EM maximization step."""
s = self._s
a = res.smoothed_state.T[..., None]
cov_a = res.smoothed_state_cov.transpose(2, 0, 1)
acov_a = res.smoothed_state_autocov.transpose(2, 0, 1)
# E[a_t a_t'], t = 0, ..., T
Eaa = cov_a.copy() + np.matmul(a, a.transpose(0, 2, 1))
# E[a_t a_{t-1}'], t = 1, ..., T
Eaa1 = acov_a[:-1] + np.matmul(a[1:], a[:-1].transpose(0, 2, 1))
# Observation equation
has_missing = np.any(res.nmissing)
if mstep_method is None:
mstep_method = 'missing' if has_missing else 'nonmissing'
mstep_method = mstep_method.lower()
if mstep_method == 'nonmissing' and has_missing:
raise ValueError('Cannot use EM algorithm option'
' `mstep_method="nonmissing"` with missing data.')
if mstep_method == 'nonmissing':
func = self._em_maximization_obs_nonmissing
elif mstep_method == 'missing':
func = self._em_maximization_obs_missing
else:
raise ValueError('Invalid maximization step method: "%s".'
% mstep_method)
# TODO: compute H is pretty slow
Lambda, H = func(res, Eaa, a, compute_H=(not self.idiosyncratic_ar1))
# Factor VAR and covariance
factor_ar = []
factor_cov = []
for b in s.factor_blocks:
A = Eaa[:-1, b['factors_ar'], b['factors_ar']].sum(axis=0)
B = Eaa1[:, b['factors_L1'], b['factors_ar']].sum(axis=0)
C = Eaa[1:, b['factors_L1'], b['factors_L1']].sum(axis=0)
nobs = Eaa.shape[0] - 1
# want: x = B A^{-1}, so solve: x A = B or solve: A' x' = B'
try:
f_A = cho_solve(cho_factor(A), B.T).T
except LinAlgError:
# Fall back to general solver if there are problems with
# postive-definiteness
f_A = np.linalg.solve(A, B.T).T
f_Q = (C - f_A @ B.T) / nobs
factor_ar += f_A.ravel().tolist()
factor_cov += (
np.linalg.cholesky(f_Q)[np.tril_indices_from(f_Q)].tolist())
# Idiosyncratic AR(1) and variances
if self.idiosyncratic_ar1:
ix = s['idio_ar_L1']
Ad = Eaa[:-1, ix, ix].sum(axis=0).diagonal()
Bd = Eaa1[:, ix, ix].sum(axis=0).diagonal()
Cd = Eaa[1:, ix, ix].sum(axis=0).diagonal()
nobs = Eaa.shape[0] - 1
alpha = Bd / Ad
sigma2 = (Cd - alpha * Bd) / nobs
else:
ix = s['idio_ar_L1']
C = Eaa[:, ix, ix].sum(axis=0)
sigma2 = np.r_[H.diagonal()[self._o['M']],
C.diagonal() / Eaa.shape[0]]
# Save parameters
params1 = np.zeros_like(params0)
loadings = []
for i in range(self.k_endog):
iloc = self._s.endog_factor_iloc[i]
factor_ix = s['factors_L1'][iloc]
loadings += Lambda[i, factor_ix].tolist()
params1[self._p['loadings']] = loadings
params1[self._p['factor_ar']] = factor_ar
params1[self._p['factor_cov']] = factor_cov
if self.idiosyncratic_ar1:
params1[self._p['idiosyncratic_ar1']] = alpha
params1[self._p['idiosyncratic_var']] = sigma2
return params1
def _em_maximization_obs_nonmissing(self, res, Eaa, a, compute_H=False):
"""EM maximization step, observation equation without missing data."""
s = self._s
dtype = Eaa.dtype
# Observation equation (non-missing)
# Note: we only compute loadings for monthly variables because
# quarterly variables will always have missing entries, so we would
# never choose this method in that case
k = s.k_states_factors
Lambda = np.zeros((self.k_endog, k), dtype=dtype)
for i in range(self.k_endog):
y = self.endog[:, i:i + 1]
iloc = self._s.endog_factor_iloc[i]
factor_ix = s['factors_L1'][iloc]
ix = (np.s_[:],) + np.ix_(factor_ix, factor_ix)
A = Eaa[ix].sum(axis=0)
B = y.T @ a[:, factor_ix, 0]
if self.idiosyncratic_ar1:
ix1 = s.k_states_factors + i
ix2 = ix1 + 1
B -= Eaa[:, ix1:ix2, factor_ix].sum(axis=0)
# want: x = B A^{-1}, so solve: x A = B or solve: A' x' = B'
try:
Lambda[i, factor_ix] = cho_solve(cho_factor(A), B.T).T
except LinAlgError:
# Fall back to general solver if there are problems with
# postive-definiteness
Lambda[i, factor_ix] = np.linalg.solve(A, B.T).T
# Compute new obs cov
# Note: this is unnecessary if `idiosyncratic_ar1=True`.
# This is written in a slightly more general way than
# Banbura and Modugno (2014), equation (7); see instead equation (13)
# of Wu et al. (1996)
# "An algorithm for estimating parameters of state-space models"
if compute_H:
Z = self['design'].copy()
Z[:, :k] = Lambda
BL = self.endog.T @ a[..., 0] @ Z.T
C = self.endog.T @ self.endog
H = (C + -BL - BL.T + Z @ Eaa.sum(axis=0) @ Z.T) / self.nobs
else:
H = np.zeros((self.k_endog, self.k_endog), dtype=dtype) * np.nan
return Lambda, H
def _em_maximization_obs_missing(self, res, Eaa, a, compute_H=False):
"""EM maximization step, observation equation with missing data."""
s = self._s
dtype = Eaa.dtype
# Observation equation (missing)
k = s.k_states_factors
Lambda = np.zeros((self.k_endog, k), dtype=dtype)
W = (1 - res.missing.T)
mask = W.astype(bool)
# Compute design for monthly
# Note: the relevant A changes for each i
for i in range(self.k_endog_M):
iloc = self._s.endog_factor_iloc[i]
factor_ix = s['factors_L1'][iloc]
m = mask[:, i]
yt = self.endog[m, i:i + 1]
ix = np.ix_(m, factor_ix, factor_ix)
Ai = Eaa[ix].sum(axis=0)
Bi = yt.T @ a[np.ix_(m, factor_ix)][..., 0]
if self.idiosyncratic_ar1:
ix1 = s.k_states_factors + i
ix2 = ix1 + 1
Bi -= Eaa[m, ix1:ix2][..., factor_ix].sum(axis=0)
# want: x = B A^{-1}, so solve: x A = B or solve: A' x' = B'
try:
Lambda[i, factor_ix] = cho_solve(cho_factor(Ai), Bi.T).T
except LinAlgError:
# Fall back to general solver if there are problems with
# postive-definiteness
Lambda[i, factor_ix] = np.linalg.solve(Ai, Bi.T).T
# Compute unrestricted design for quarterly
# See Banbura at al. (2011), where this is described in Appendix C,
# between equations (13) and (14).
if self.k_endog_Q > 0:
# Note: the relevant A changes for each i
multipliers = np.array([1, 2, 3, 2, 1])[:, None]
for i in range(self.k_endog_M, self.k_endog):
iloc = self._s.endog_factor_iloc[i]
factor_ix = s['factors_L1_5_ix'][:, iloc].ravel().tolist()
R, _ = self.loading_constraints(i)
iQ = i - self.k_endog_M
m = mask[:, i]
yt = self.endog[m, i:i + 1]
ix = np.ix_(m, factor_ix, factor_ix)
Ai = Eaa[ix].sum(axis=0)
BiQ = yt.T @ a[np.ix_(m, factor_ix)][..., 0]
if self.idiosyncratic_ar1:
ix = (np.s_[:],) + np.ix_(s['idio_ar_Q_ix'][iQ], factor_ix)
Eepsf = Eaa[ix]
BiQ -= (multipliers * Eepsf[m].sum(axis=0)).sum(axis=0)
# Note that there was a typo in Banbura et al. (2011) for
# the formula applying the restrictions. In their notation,
# they show (C D C')^{-1} while it should be (C D^{-1} C')^{-1}
# Note: in reality, this is:
# unrestricted - Aii @ R.T @ RARi @ (R @ unrestricted - q)
# where the restrictions are defined as: R @ unrestricted = q
# However, here q = 0, so we can simplify.
try:
L_and_lower = cho_factor(Ai)
# x = BQ A^{-1}, or x A = BQ, so solve A' x' = (BQ)'
unrestricted = cho_solve(L_and_lower, BiQ.T).T[0]
AiiRT = cho_solve(L_and_lower, R.T)
L_and_lower = cho_factor(R @ AiiRT)
RAiiRTiR = cho_solve(L_and_lower, R)
restricted = unrestricted - AiiRT @ RAiiRTiR @ unrestricted
except LinAlgError:
# Fall back to slower method if there are problems with
# postive-definiteness
Aii = np.linalg.inv(Ai)
unrestricted = (BiQ @ Aii)[0]
RARi = np.linalg.inv(R @ Aii @ R.T)
restricted = (unrestricted -
Aii @ R.T @ RARi @ R @ unrestricted)
Lambda[i, factor_ix] = restricted
# Compute new obs cov
# Note: this is unnecessary if `idiosyncratic_ar1=True`.
# See Banbura and Modugno (2014), equation (12)
# This does not literally follow their formula, e.g. multiplying by the
# W_t selection matrices, because those formulas require loops that are
# relatively slow. The formulation here is vectorized.
if compute_H:
Z = self['design'].copy()
Z[:, :Lambda.shape[1]] = Lambda
y = np.nan_to_num(self.endog)
C = y.T @ y
W = W[..., None]
IW = 1 - W
WL = W * Z
WLT = WL.transpose(0, 2, 1)
BL = y[..., None] @ a.transpose(0, 2, 1) @ WLT
A = Eaa
BLT = BL.transpose(0, 2, 1)
IWT = IW.transpose(0, 2, 1)
H = (C + (-BL - BLT + WL @ A @ WLT +
IW * self['obs_cov'] * IWT).sum(axis=0)) / self.nobs
else:
H = np.zeros((self.k_endog, self.k_endog), dtype=dtype) * np.nan
return Lambda, H
def smooth(self, params, transformed=True, includes_fixed=False,
complex_step=False, cov_type='none', cov_kwds=None,
return_ssm=False, results_class=None,
results_wrapper_class=None, **kwargs):
"""
Kalman smoothing.
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
return_ssm : bool,optional
Whether or not to return only the state space output or a full
results object. Default is to return a full results object.
cov_type : str, optional
See `MLEResults.fit` for a description of covariance matrix types
for results object. Default is None.
cov_kwds : dict or None, optional
See `MLEResults.get_robustcov_results` for a description required
keywords for alternative covariance estimators
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
"""
return super().smooth(
params, transformed=transformed, includes_fixed=includes_fixed,
complex_step=complex_step, cov_type=cov_type, cov_kwds=cov_kwds,
return_ssm=return_ssm, results_class=results_class,
results_wrapper_class=results_wrapper_class, **kwargs)
def filter(self, params, transformed=True, includes_fixed=False,
complex_step=False, cov_type='none', cov_kwds=None,
return_ssm=False, results_class=None,
results_wrapper_class=None, low_memory=False, **kwargs):
"""
Kalman filtering.
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
return_ssm : bool,optional
Whether or not to return only the state space output or a full
results object. Default is to return a full results object.
cov_type : str, optional
See `MLEResults.fit` for a description of covariance matrix types
for results object. Default is 'none'.
cov_kwds : dict or None, optional
See `MLEResults.get_robustcov_results` for a description required
keywords for alternative covariance estimators
low_memory : bool, optional
If set to True, techniques are applied to substantially reduce
memory usage. If used, some features of the results object will
not be available (including in-sample prediction), although
out-of-sample forecasting is possible. Default is False.
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
"""
return super().filter(
params, transformed=transformed, includes_fixed=includes_fixed,
complex_step=complex_step, cov_type=cov_type, cov_kwds=cov_kwds,
return_ssm=return_ssm, results_class=results_class,
results_wrapper_class=results_wrapper_class, **kwargs)
def simulate(self, params, nsimulations, measurement_shocks=None,
state_shocks=None, initial_state=None, anchor=None,
repetitions=None, exog=None, extend_model=None,
extend_kwargs=None, transformed=True, includes_fixed=False,
original_scale=True, **kwargs):
r"""
Simulate a new time series following the state space model.
Parameters
----------
params : array_like
Array of parameters to use in constructing the state space
representation to use when simulating.
nsimulations : int
The number of observations to simulate. If the model is
time-invariant this can be any number. If the model is
time-varying, then this number must be less than or equal to the
number of observations.
measurement_shocks : array_like, optional
If specified, these are the shocks to the measurement equation,
:math:`\varepsilon_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_endog`, where `k_endog` is the
same as in the state space model.
state_shocks : array_like, optional
If specified, these are the shocks to the state equation,
:math:`\eta_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the
same as in the state space model.
initial_state : array_like, optional
If specified, this is the initial state vector to use in
simulation, which should be shaped (`k_states` x 1), where
`k_states` is the same as in the state space model. If unspecified,
but the model has been initialized, then that initialization is
used. This must be specified if `anchor` is anything other than
"start" or 0 (or else you can use the `simulate` method on a
results object rather than on the model object).
anchor : int, str, or datetime, optional
First period for simulation. The simulation will be conditional on
all existing datapoints prior to the `anchor`. Type depends on the
index of the given `endog` in the model. Two special cases are the
strings 'start' and 'end'. `start` refers to beginning the
simulation at the first period of the sample, and `end` refers to
beginning the simulation at the first period after the sample.
Integer values can run from 0 to `nobs`, or can be negative to
apply negative indexing. Finally, if a date/time index was provided
to the model, then this argument can be a date string to parse or a
datetime type. Default is 'start'.
repetitions : int, optional
Number of simulated paths to generate. Default is 1 simulated path.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
transformed : bool, optional
Whether or not `params` is already transformed. Default is
True.
includes_fixed : bool, optional
If parameters were previously fixed with the `fix_params` method,
this argument describes whether or not `params` also includes
the fixed parameters, in addition to the free parameters. Default
is False.
original_scale : bool, optional
If the model specification standardized the data, whether or not
to return simulations in the original scale of the data (i.e.
before it was standardized by the model). Default is True.
Returns
-------
simulated_obs : ndarray
An array of simulated observations. If `repetitions=None`, then it
will be shaped (nsimulations x k_endog) or (nsimulations,) if
`k_endog=1`. Otherwise it will be shaped
(nsimulations x k_endog x repetitions). If the model was given
Pandas input then the output will be a Pandas object. If
`k_endog > 1` and `repetitions` is not None, then the output will
be a Pandas DataFrame that has a MultiIndex for the columns, with
the first level containing the names of the `endog` variables and
the second level containing the repetition number.
"""
# Get usual simulations (in the possibly-standardized scale)
sim = super().simulate(
params, nsimulations, measurement_shocks=measurement_shocks,
state_shocks=state_shocks, initial_state=initial_state,
anchor=anchor, repetitions=repetitions, exog=exog,
extend_model=extend_model, extend_kwargs=extend_kwargs,
transformed=transformed, includes_fixed=includes_fixed, **kwargs)
# If applicable, convert predictions back to original space
if self.standardize and original_scale:
use_pandas = isinstance(self.data, PandasData)
shape = sim.shape
if use_pandas:
# pd.Series (k_endog=1, replications=None)
if len(shape) == 1:
sim = sim * self._endog_std[0] + self._endog_mean[0]
# pd.DataFrame (k_endog > 1, replications=None)
# [or]
# pd.DataFrame with MultiIndex (replications > 0)
elif len(shape) == 2:
sim = (sim.multiply(self._endog_std, axis=1, level=0)
.add(self._endog_mean, axis=1, level=0))
else:
# 1-dim array (k_endog=1, replications=None)
if len(shape) == 1:
sim = sim * self._endog_std + self._endog_mean
# 2-dim array (k_endog > 1, replications=None)
elif len(shape) == 2:
sim = sim * self._endog_std + self._endog_mean
# 3-dim array with MultiIndex (replications > 0)
else:
# Get arrays into the form that can be used for
# broadcasting
std = np.atleast_2d(self._endog_std)[..., None]
mean = np.atleast_2d(self._endog_mean)[..., None]
sim = sim * std + mean
return sim
def impulse_responses(self, params, steps=1, impulse=0,
orthogonalized=False, cumulative=False, anchor=None,
exog=None, extend_model=None, extend_kwargs=None,
transformed=True, includes_fixed=False,
original_scale=True, **kwargs):
"""
Impulse response function.
Parameters
----------
params : array_like
Array of model parameters.
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 1. Note that for time-invariant models, the initial
impulse is not counted as a step, so if `steps=1`, the output will
have 2 entries.
impulse : int or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1`. Alternatively, a custom impulse vector may be
provided; must be shaped `k_posdef x 1`.
orthogonalized : bool, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : bool, optional
Whether or not to return cumulative impulse responses. Default is
False.
anchor : int, str, or datetime, optional
Time point within the sample for the state innovation impulse. Type
depends on the index of the given `endog` in the model. Two special
cases are the strings 'start' and 'end', which refer to setting the
impulse at the first and last points of the sample, respectively.
Integer values can run from 0 to `nobs - 1`, or can be negative to
apply negative indexing. Finally, if a date/time index was provided
to the model, then this argument can be a date string to parse or a
datetime type. Default is 'start'.
exog : array_like, optional
New observations of exogenous regressors for our-of-sample periods,
if applicable.
transformed : bool, optional
Whether or not `params` is already transformed. Default is
True.
includes_fixed : bool, optional
If parameters were previously fixed with the `fix_params` method,
this argument describes whether or not `params` also includes
the fixed parameters, in addition to the free parameters. Default
is False.
original_scale : bool, optional
If the model specification standardized the data, whether or not
to return impulse responses in the original scale of the data (i.e.
before it was standardized by the model). Default is True.
**kwargs
If the model has time-varying design or transition matrices and the
combination of `anchor` and `steps` implies creating impulse
responses for the out-of-sample period, then these matrices must
have updated values provided for the out-of-sample steps. For
example, if `design` is a time-varying component, `nobs` is 10,
`anchor=1`, and `steps` is 15, a (`k_endog` x `k_states` x 7)
matrix must be provided with the new design matrix values.
Returns
-------
impulse_responses : ndarray
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. For a time-invariant model, the
impulse responses are given for `steps + 1` elements (this gives
the "initial impulse" followed by `steps` responses for the
important cases of VAR and SARIMAX models), while for time-varying
models the impulse responses are only given for `steps` elements
(to avoid having to unexpectedly provide updated time-varying
matrices).
"""
# Get usual simulations (in the possibly-standardized scale)
irfs = super().impulse_responses(
params, steps=steps, impulse=impulse,
orthogonalized=orthogonalized, cumulative=cumulative,
anchor=anchor, exog=exog, extend_model=extend_model,
extend_kwargs=extend_kwargs, transformed=transformed,
includes_fixed=includes_fixed, original_scale=original_scale,
**kwargs)
# If applicable, convert predictions back to original space
if self.standardize and original_scale:
use_pandas = isinstance(self.data, PandasData)
shape = irfs.shape
if use_pandas:
# pd.Series (k_endog=1, replications=None)
if len(shape) == 1:
irfs = irfs * self._endog_std[0]
# pd.DataFrame (k_endog > 1)
# [or]
# pd.DataFrame with MultiIndex (replications > 0)
elif len(shape) == 2:
irfs = irfs.multiply(self._endog_std, axis=1, level=0)
else:
# 1-dim array (k_endog=1)
if len(shape) == 1:
irfs = irfs * self._endog_std
# 2-dim array (k_endog > 1)
elif len(shape) == 2:
irfs = irfs * self._endog_std
return irfs
class DynamicFactorMQResults(mlemodel.MLEResults):
"""
Results from fitting a dynamic factor model
"""
def __init__(self, model, params, filter_results, cov_type=None, **kwargs):
super(DynamicFactorMQResults, self).__init__(
model, params, filter_results, cov_type, **kwargs)
@property
def factors(self):
"""
Estimates of unobserved factors.
Returns
-------
out : Bunch
Has the following attributes shown in Notes.
Notes
-----
The output is a bunch of the following format:
- `filtered`: a time series array with the filtered estimate of
the component
- `filtered_cov`: a time series array with the filtered estimate of
the variance/covariance of the component
- `smoothed`: a time series array with the smoothed estimate of
the component
- `smoothed_cov`: a time series array with the smoothed estimate of
the variance/covariance of the component
- `offset`: an integer giving the offset in the state vector where
this component begins
"""
out = None
if self.model.k_factors > 0:
iloc = self.model._s.factors_L1
ix = np.array(self.model.state_names)[iloc].tolist()
out = Bunch(
filtered=self.states.filtered.loc[:, ix],
filtered_cov=self.states.filtered_cov.loc[np.s_[ix, :], ix],
smoothed=None, smoothed_cov=None)
if self.smoothed_state is not None:
out.smoothed = self.states.smoothed.loc[:, ix]
if self.smoothed_state_cov is not None:
out.smoothed_cov = (
self.states.smoothed_cov.loc[np.s_[ix, :], ix])
return out
def get_coefficients_of_determination(self, method='individual',
which=None):
"""
Get coefficients of determination (R-squared) for variables / factors.
Parameters
----------
method : {'individual', 'joint', 'cumulative'}, optional
The type of R-squared values to generate. "individual" plots
the R-squared of each variable on each factor; "joint" plots the
R-squared of each variable on each factor that it loads on;
"cumulative" plots the successive R-squared values as each
additional factor is added to the regression, for each variable.
Default is 'individual'.
which: {None, 'filtered', 'smoothed'}, optional
Whether to compute R-squared values based on filtered or smoothed
estimates of the factors. Default is 'smoothed' if smoothed results
are available and 'filtered' otherwise.
Returns
-------
rsquared : pd.DataFrame or pd.Series
The R-squared values from regressions of observed variables on
one or more of the factors. If method='individual' or
method='cumulative', this will be a Pandas DataFrame with observed
variables as the index and factors as the columns . If
method='joint', will be a Pandas Series with observed variables as
the index.
See Also
--------
plot_coefficients_of_determination
coefficients_of_determination
"""
from statsmodels.tools import add_constant
method = string_like(method, 'method', options=['individual', 'joint',
'cumulative'])
if which is None:
which = 'filtered' if self.smoothed_state is None else 'smoothed'
k_endog = self.model.k_endog
k_factors = self.model.k_factors
ef_map = self.model._s.endog_factor_map
endog_names = self.model.endog_names
factor_names = self.model.factor_names
if method == 'individual':
coefficients = np.zeros((k_endog, k_factors))
for i in range(k_factors):
exog = add_constant(self.factors[which].iloc[:, i])
for j in range(k_endog):
if ef_map.iloc[j, i]:
endog = self.filter_results.endog[j]
coefficients[j, i] = (
OLS(endog, exog, missing='drop').fit().rsquared)
else:
coefficients[j, i] = np.nan
coefficients = pd.DataFrame(coefficients, index=endog_names,
columns=factor_names)
elif method == 'joint':
coefficients = np.zeros((k_endog,))
exog = add_constant(self.factors[which])
for j in range(k_endog):
endog = self.filter_results.endog[j]
ix = np.r_[True, ef_map.iloc[j]].tolist()
X = exog.loc[:, ix]
coefficients[j] = (
OLS(endog, X, missing='drop').fit().rsquared)
coefficients = pd.Series(coefficients, index=endog_names)
elif method == 'cumulative':
coefficients = np.zeros((k_endog, k_factors))
exog = add_constant(self.factors[which])
for j in range(k_endog):
endog = self.filter_results.endog[j]
for i in range(k_factors):
if self.model._s.endog_factor_map.iloc[j, i]:
ix = np.r_[True, ef_map.iloc[j, :i + 1],
[False] * (k_factors - i - 1)]
X = exog.loc[:, ix.astype(bool).tolist()]
coefficients[j, i] = (
OLS(endog, X, missing='drop').fit().rsquared)
else:
coefficients[j, i] = np.nan
coefficients = pd.DataFrame(coefficients, index=endog_names,
columns=factor_names)
return coefficients
@cache_readonly
def coefficients_of_determination(self):
"""
Individual coefficients of determination (:math:`R^2`).
Coefficients of determination (:math:`R^2`) from regressions of
endogenous variables on individual estimated factors.
Returns
-------
coefficients_of_determination : ndarray
A `k_endog` x `k_factors` array, where
`coefficients_of_determination[i, j]` represents the :math:`R^2`
value from a regression of factor `j` and a constant on endogenous
variable `i`.
Notes
-----
Although it can be difficult to interpret the estimated factor loadings
and factors, it is often helpful to use the coefficients of
determination from univariate regressions to assess the importance of
each factor in explaining the variation in each endogenous variable.
In models with many variables and factors, this can sometimes lend
interpretation to the factors (for example sometimes one factor will
load primarily on real variables and another on nominal variables).
See Also
--------
get_coefficients_of_determination
plot_coefficients_of_determination
"""
return self.get_coefficients_of_determination(method='individual')
def plot_coefficients_of_determination(self, method='individual',
which=None, endog_labels=None,
fig=None, figsize=None):
"""
Plot coefficients of determination (R-squared) for variables / factors.
Parameters
----------
method : {'individual', 'joint', 'cumulative'}, optional
The type of R-squared values to generate. "individual" plots
the R-squared of each variable on each factor; "joint" plots the
R-squared of each variable on each factor that it loads on;
"cumulative" plots the successive R-squared values as each
additional factor is added to the regression, for each variable.
Default is 'individual'.
which: {None, 'filtered', 'smoothed'}, optional
Whether to compute R-squared values based on filtered or smoothed
estimates of the factors. Default is 'smoothed' if smoothed results
are available and 'filtered' otherwise.
endog_labels : bool, optional
Whether or not to label the endogenous variables along the x-axis
of the plots. Default is to include labels if there are 5 or fewer
endogenous variables.
fig : Figure, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
The endogenous variables are arranged along the x-axis according to
their position in the model's `endog` array.
See Also
--------
get_coefficients_of_determination
"""
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
method = string_like(method, 'method', options=['individual', 'joint',
'cumulative'])
# Should we label endogenous variables?
if endog_labels is None:
endog_labels = self.model.k_endog <= 5
# Plot the coefficients of determination
rsquared = self.get_coefficients_of_determination(method=method,
which=which)
if method in ['individual', 'cumulative']:
plot_idx = 1
for factor_name, coeffs in rsquared.T.iterrows():
# Create the new axis
ax = fig.add_subplot(self.model.k_factors, 1, plot_idx)
ax.set_ylim((0, 1))
ax.set(title=f'{factor_name}', ylabel=r'$R^2$')
coeffs.plot(ax=ax, kind='bar')
if plot_idx < len(rsquared.columns) or not endog_labels:
ax.xaxis.set_ticklabels([])
plot_idx += 1
elif method == 'joint':
ax = fig.add_subplot(1, 1, 1)
ax.set_ylim((0, 1))
ax.set(title=r'$R^2$ - regression on all loaded factors',
ylabel=r'$R^2$')
rsquared.plot(ax=ax, kind='bar')
if not endog_labels:
ax.xaxis.set_ticklabels([])
return fig
def get_prediction(self, start=None, end=None, dynamic=False,
information_set='predicted', signal_only=False,
original_scale=True, index=None, exog=None,
extend_model=None, extend_kwargs=None, **kwargs):
r"""
In-sample prediction and out-of-sample forecasting.
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
dynamic : bool, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
information_set : str, optional
The information set to condition each prediction on. Default is
"predicted", which computes predictions of period t values
conditional on observed data through period t-1; these are
one-step-ahead predictions, and correspond with the typical
`fittedvalues` results attribute. Alternatives are "filtered",
which computes predictions of period t values conditional on
observed data through period t, and "smoothed", which computes
predictions of period t values conditional on the entire dataset
(including also future observations t+1, t+2, ...).
signal_only : bool, optional
Whether to compute forecasts of only the "signal" component of
the observation equation. Default is False. For example, the
observation equation of a time-invariant model is
:math:`y_t = d + Z \alpha_t + \varepsilon_t`, and the "signal"
component is then :math:`Z \alpha_t`. If this argument is set to
True, then forecasts of the "signal" :math:`Z \alpha_t` will be
returned. Otherwise, the default is for forecasts of :math:`y_t`
to be returned.
original_scale : bool, optional
If the model specification standardized the data, whether or not
to return predictions in the original scale of the data (i.e.
before it was standardized by the model). Default is True.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : ndarray
Array of out of in-sample predictions and / or out-of-sample
forecasts. An (npredict x k_endog) array.
"""
# Get usual predictions (in the possibly-standardized scale)
res = super().get_prediction(start=start, end=end, dynamic=dynamic,
information_set=information_set,
signal_only=signal_only,
index=index, exog=exog,
extend_model=extend_model,
extend_kwargs=extend_kwargs, **kwargs)
# If applicable, convert predictions back to original space
if self.model.standardize and original_scale:
prediction_results = res.prediction_results
k_endog, _ = prediction_results.endog.shape
mean = np.array(self.model._endog_mean)
std = np.array(self.model._endog_std)
if self.model.k_endog > 1:
mean = mean[None, :]
std = std[None, :]
res._results._predicted_mean = (
res._results._predicted_mean * std + mean)
if k_endog == 1:
res._results._var_pred_mean *= std**2
else:
res._results._var_pred_mean = (
std * res._results._var_pred_mean * std.T)
return res
def news(self, comparison, impact_date=None, impacted_variable=None,
start=None, end=None, periods=None, exog=None,
comparison_type=None, state_index=None, return_raw=False,
tolerance=1e-10, endog_quarterly=None, original_scale=True,
**kwargs):
"""
Compute impacts from updated data (news and revisions).
Parameters
----------
comparison : array_like or MLEResults
An updated dataset with updated and/or revised data from which the
news can be computed, or an updated or previous results object
to use in computing the news.
impact_date : int, str, or datetime, optional
A single specific period of impacts from news and revisions to
compute. Can also be a date string to parse or a datetime type.
This argument cannot be used in combination with `start`, `end`, or
`periods`. Default is the first out-of-sample observation.
impacted_variable : str, list, array, or slice, optional
Observation variable label or slice of labels specifying that only
specific impacted variables should be shown in the News output. The
impacted variable(s) describe the variables that were *affected* by
the news. If you do not know the labels for the variables, check
the `endog_names` attribute of the model instance.
start : int, str, or datetime, optional
The first period of impacts from news and revisions to compute.
Can also be a date string to parse or a datetime type. Default is
the first out-of-sample observation.
end : int, str, or datetime, optional
The last period of impacts from news and revisions to compute.
Can also be a date string to parse or a datetime type. Default is
the first out-of-sample observation.
periods : int, optional
The number of periods of impacts from news and revisions to
compute.
exog : array_like, optional
Array of exogenous regressors for the out-of-sample period, if
applicable.
comparison_type : {None, 'previous', 'updated'}
This denotes whether the `comparison` argument represents a
*previous* results object or dataset or an *updated* results object
or dataset. If not specified, then an attempt is made to determine
the comparison type.
state_index : array_like or "common", optional
An optional index specifying a subset of states to use when
constructing the impacts of revisions and news. For example, if
`state_index=[0, 1]` is passed, then only the impacts to the
observed variables arising from the impacts to the first two
states will be returned. If the string "common" is passed and the
model includes idiosyncratic AR(1) components, news will only be
computed based on the common states. Default is to use all states.
return_raw : bool, optional
Whether or not to return only the specific output or a full
results object. Default is to return a full results object.
tolerance : float, optional
The numerical threshold for determining zero impact. Default is
that any impact less than 1e-10 is assumed to be zero.
endog_quarterly : array_like, optional
New observations of quarterly variables, if `comparison` was
provided as an updated monthly dataset. If this argument is
provided, it must be a Pandas Series or DataFrame with a
DatetimeIndex or PeriodIndex at the quarterly frequency.
References
----------
.. [1] Bańbura, Marta, and Michele Modugno.
"Maximum likelihood estimation of factor models on datasets with
arbitrary pattern of missing data."
Journal of Applied Econometrics 29, no. 1 (2014): 133-160.
.. [2] Bańbura, Marta, Domenico Giannone, and Lucrezia Reichlin.
"Nowcasting."
The Oxford Handbook of Economic Forecasting. July 8, 2011.
.. [3] Bańbura, Marta, Domenico Giannone, Michele Modugno, and Lucrezia
Reichlin.
"Now-casting and the real-time data flow."
In Handbook of economic forecasting, vol. 2, pp. 195-237.
Elsevier, 2013.
"""
if state_index == 'common':
state_index = (
np.arange(self.model.k_states - self.model.k_endog))
news_results = super().news(
comparison, impact_date=impact_date,
impacted_variable=impacted_variable, start=start, end=end,
periods=periods, exog=exog, comparison_type=comparison_type,
state_index=state_index, return_raw=return_raw,
tolerance=tolerance, endog_quarterly=endog_quarterly, **kwargs)
# If we have standardized the data, we may want to report the news in
# the original scale. If so, we need to modify the data to "undo" the
# standardization.
if not return_raw and self.model.standardize and original_scale:
endog_mean = self.model._endog_mean
endog_std = self.model._endog_std
# Don't need to add in the mean for the impacts, since they are
# the difference of two forecasts
news_results.total_impacts = (
news_results.total_impacts * endog_std)
news_results.update_impacts = (
news_results.update_impacts * endog_std)
if news_results.revision_impacts is not None:
news_results.revision_impacts = (
news_results.revision_impacts * endog_std)
# Update forecasts
for name in ['prev_impacted_forecasts', 'news', 'revisions',
'update_realized', 'update_forecasts',
'revised', 'revised_prev', 'post_impacted_forecasts']:
dta = getattr(news_results, name)
# for pd.Series, dta.multiply(...) and (sometimes) dta.add(...)
# remove the name attribute; save it now so that we can add it
# back in
orig_name = None
if hasattr(dta, 'name'):
orig_name = dta.name
dta = dta.multiply(endog_std, level=1)
if name not in ['news', 'revisions']:
dta = dta.add(endog_mean, level=1)
# add back in the name attribute if it was removed
if orig_name is not None:
dta.name = orig_name
setattr(news_results, name, dta)
# For the weights: rows correspond to update (date, variable) and
# columns correspond to the impacted variable.
# 1. Because we have modified the updates (realized, forecasts, and
# forecast errors) to be in the scale of the original updated
# variable, we need to essentially reverse that change for each
# row of the weights by dividing by the standard deviation of
# that row's updated variable
# 2. Because we want the impacts to be in the scale of the original
# impacted variable, we need to multiply each column by the
# standard deviation of that column's impacted variable
news_results.weights = (
news_results.weights.divide(endog_std, axis=0, level=1)
.multiply(endog_std, axis=1, level=1))
news_results.revision_weights = (
news_results.revision_weights
.divide(endog_std, axis=0, level=1)
.multiply(endog_std, axis=1, level=1))
return news_results
def get_smoothed_decomposition(self, decomposition_of='smoothed_state',
state_index=None, original_scale=True):
r"""
Decompose smoothed output into contributions from observations
Parameters
----------
decomposition_of : {"smoothed_state", "smoothed_signal"}
The object to perform a decomposition of. If it is set to
"smoothed_state", then the elements of the smoothed state vector
are decomposed into the contributions of each observation. If it
is set to "smoothed_signal", then the predictions of the
observation vector based on the smoothed state vector are
decomposed. Default is "smoothed_state".
state_index : array_like, optional
An optional index specifying a subset of states to use when
constructing the decomposition of the "smoothed_signal". For
example, if `state_index=[0, 1]` is passed, then only the
contributions of observed variables to the smoothed signal arising
from the first two states will be returned. Note that if not all
states are used, the contributions will not sum to the smoothed
signal. Default is to use all states.
original_scale : bool, optional
If the model specification standardized the data, whether or not
to return simulations in the original scale of the data (i.e.
before it was standardized by the model). Default is True.
Returns
-------
data_contributions : pd.DataFrame
Contributions of observations to the decomposed object. If the
smoothed state is being decomposed, then `data_contributions` is
shaped `(k_states x nobs, k_endog x nobs)` with a `pd.MultiIndex`
index corresponding to `state_to x date_to` and `pd.MultiIndex`
columns corresponding to `variable_from x date_from`. If the
smoothed signal is being decomposed, then `data_contributions` is
shaped `(k_endog x nobs, k_endog x nobs)` with `pd.MultiIndex`-es
corresponding to `variable_to x date_to` and
`variable_from x date_from`.
obs_intercept_contributions : pd.DataFrame
Contributions of the observation intercept to the decomposed
object. If the smoothed state is being decomposed, then
`obs_intercept_contributions` is
shaped `(k_states x nobs, k_endog x nobs)` with a `pd.MultiIndex`
index corresponding to `state_to x date_to` and `pd.MultiIndex`
columns corresponding to `obs_intercept_from x date_from`. If the
smoothed signal is being decomposed, then
`obs_intercept_contributions` is shaped
`(k_endog x nobs, k_endog x nobs)` with `pd.MultiIndex`-es
corresponding to `variable_to x date_to` and
`obs_intercept_from x date_from`.
state_intercept_contributions : pd.DataFrame
Contributions of the state intercept to the decomposed
object. If the smoothed state is being decomposed, then
`state_intercept_contributions` is
shaped `(k_states x nobs, k_states x nobs)` with a `pd.MultiIndex`
index corresponding to `state_to x date_to` and `pd.MultiIndex`
columns corresponding to `state_intercept_from x date_from`. If the
smoothed signal is being decomposed, then
`state_intercept_contributions` is shaped
`(k_endog x nobs, k_states x nobs)` with `pd.MultiIndex`-es
corresponding to `variable_to x date_to` and
`state_intercept_from x date_from`.
prior_contributions : pd.DataFrame
Contributions of the prior to the decomposed object. If the
smoothed state is being decomposed, then `prior_contributions` is
shaped `(nobs x k_states, k_states)`, with a `pd.MultiIndex`
index corresponding to `state_to x date_to` and columns
corresponding to elements of the prior mean (aka "initial state").
If the smoothed signal is being decomposed, then
`prior_contributions` is shaped `(nobs x k_endog, k_states)`,
with a `pd.MultiIndex` index corresponding to
`variable_to x date_to` and columns corresponding to elements of
the prior mean.
Notes
-----
Denote the smoothed state at time :math:`t` by :math:`\alpha_t`. Then
the smoothed signal is :math:`Z_t \alpha_t`, where :math:`Z_t` is the
design matrix operative at time :math:`t`.
"""
# De-meaning the data is like putting the mean into the observation
# intercept. To compute the decomposition correctly in the original
# scale, we need to account for this, so we fill in the observation
# intercept temporarily
if self.model.standardize and original_scale:
cache_obs_intercept = self.model['obs_intercept']
self.model['obs_intercept'] = self.model._endog_mean
# Compute the contributions
(data_contributions, obs_intercept_contributions,
state_intercept_contributions, prior_contributions) = (
super().get_smoothed_decomposition(
decomposition_of=decomposition_of, state_index=state_index))
# Replace the original observation intercept
if self.model.standardize and original_scale:
self.model['obs_intercept'] = cache_obs_intercept
# Reverse the effect of dividing by the standard deviation
if (decomposition_of == 'smoothed_signal'
and self.model.standardize and original_scale):
endog_std = self.model._endog_std
data_contributions = (
data_contributions.multiply(endog_std, axis=0, level=0))
obs_intercept_contributions = (
obs_intercept_contributions.multiply(
endog_std, axis=0, level=0))
state_intercept_contributions = (
state_intercept_contributions.multiply(
endog_std, axis=0, level=0))
prior_contributions = (
prior_contributions.multiply(endog_std, axis=0, level=0))
return (data_contributions, obs_intercept_contributions,
state_intercept_contributions, prior_contributions)
def append(self, endog, endog_quarterly=None, refit=False, fit_kwargs=None,
copy_initialization=True, retain_standardization=True,
**kwargs):
"""
Recreate the results object with new data appended to original data.
Creates a new result object applied to a dataset that is created by
appending new data to the end of the model's original data. The new
results can then be used for analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
endog_quarterly : array_like, optional
New observations of quarterly variables. If provided, must be a
Pandas Series or DataFrame with a DatetimeIndex or PeriodIndex at
the quarterly frequency.
refit : bool, optional
Whether to re-fit the parameters, based on the combined dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`) or `filter` /
`smooth`.
copy_initialization : bool, optional
Whether or not to copy the initialization from the current results
set to the new model. Default is True.
retain_standardization : bool, optional
Whether or not to use the mean and standard deviations that were
used to standardize the data in the current model in the new model.
Default is True.
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results from both the
original dataset and the new dataset.
Notes
-----
The `endog` and `exog` arguments to this method must be formatted in
the same way (e.g. Pandas Series versus Numpy array) as were the
`endog` and `exog` arrays passed to the original model.
The `endog` (and, if applicable, `endog_quarterly`) arguments to this
method should consist of new observations that occurred directly after
the last element of `endog`. For any other kind of dataset, see the
`apply` method.
This method will apply filtering to all of the original data as well
as to the new data. To apply filtering only to the new data (which
can be much faster if the original dataset is large), see the `extend`
method.
See Also
--------
extend
apply
"""
# Construct the combined dataset, if necessary
endog, k_endog_monthly = DynamicFactorMQ.construct_endog(
endog, endog_quarterly)
# Check for compatible dimensions
k_endog = endog.shape[1] if len(endog.shape) == 2 else 1
if (k_endog_monthly != self.model.k_endog_M or
k_endog != self.model.k_endog):
raise ValueError('Cannot append data of a different dimension to'
' a model.')
kwargs['k_endog_monthly'] = k_endog_monthly
return super().append(
endog, refit=refit, fit_kwargs=fit_kwargs,
copy_initialization=copy_initialization,
retain_standardization=retain_standardization, **kwargs)
def extend(self, endog, endog_quarterly=None, fit_kwargs=None,
retain_standardization=True, **kwargs):
"""
Recreate the results object for new data that extends original data.
Creates a new result object applied to a new dataset that is assumed to
follow directly from the end of the model's original data. The new
results can then be used for analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
endog_quarterly : array_like, optional
New observations of quarterly variables. If provided, must be a
Pandas Series or DataFrame with a DatetimeIndex or PeriodIndex at
the quarterly frequency.
fit_kwargs : dict, optional
Keyword arguments to pass to `filter` or `smooth`.
retain_standardization : bool, optional
Whether or not to use the mean and standard deviations that were
used to standardize the data in the current model in the new model.
Default is True.
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results only for the new
dataset.
See Also
--------
append
apply
Notes
-----
The `endog` argument to this method should consist of new observations
that occurred directly after the last element of the model's original
`endog` array. For any other kind of dataset, see the `apply` method.
This method will apply filtering only to the new data provided by the
`endog` argument, which can be much faster than re-filtering the entire
dataset. However, the returned results object will only have results
for the new data. To retrieve results for both the new data and the
original data, see the `append` method.
"""
# Construct the combined dataset, if necessary
endog, k_endog_monthly = DynamicFactorMQ.construct_endog(
endog, endog_quarterly)
# Check for compatible dimensions
k_endog = endog.shape[1] if len(endog.shape) == 2 else 1
if (k_endog_monthly != self.model.k_endog_M or
k_endog != self.model.k_endog):
raise ValueError('Cannot append data of a different dimension to'
' a model.')
kwargs['k_endog_monthly'] = k_endog_monthly
return super().extend(
endog, fit_kwargs=fit_kwargs,
retain_standardization=retain_standardization, **kwargs)
def apply(self, endog, k_endog_monthly=None, endog_quarterly=None,
refit=False, fit_kwargs=None, copy_initialization=False,
retain_standardization=True, **kwargs):
"""
Apply the fitted parameters to new data unrelated to the original data.
Creates a new result object using the current fitted parameters,
applied to a completely new dataset that is assumed to be unrelated to
the model's original data. The new results can then be used for
analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
k_endog_monthly : int, optional
If specifying a monthly/quarterly mixed frequency model in which
the provided `endog` dataset contains both the monthly and
quarterly data, this variable should be used to indicate how many
of the variables are monthly.
endog_quarterly : array_like, optional
New observations of quarterly variables. If provided, must be a
Pandas Series or DataFrame with a DatetimeIndex or PeriodIndex at
the quarterly frequency.
refit : bool, optional
Whether to re-fit the parameters, using the new dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`) or `filter` /
`smooth`.
copy_initialization : bool, optional
Whether or not to copy the initialization from the current results
set to the new model. Default is False.
retain_standardization : bool, optional
Whether or not to use the mean and standard deviations that were
used to standardize the data in the current model in the new model.
Default is True.
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results only for the new
dataset.
See Also
--------
statsmodels.tsa.statespace.mlemodel.MLEResults.append
statsmodels.tsa.statespace.mlemodel.MLEResults.apply
Notes
-----
The `endog` argument to this method should consist of new observations
that are not necessarily related to the original model's `endog`
dataset. For observations that continue that original dataset by follow
directly after its last element, see the `append` and `extend` methods.
"""
mod = self.model.clone(endog, k_endog_monthly=k_endog_monthly,
endog_quarterly=endog_quarterly,
retain_standardization=retain_standardization,
**kwargs)
if copy_initialization:
init = initialization.Initialization.from_results(
self.filter_results)
mod.ssm.initialization = init
res = self._apply(mod, refit=refit, fit_kwargs=fit_kwargs, **kwargs)
return res
def summary(self, alpha=.05, start=None, title=None, model_name=None,
display_params=True, display_diagnostics=False,
display_params_as_list=False, truncate_endog_names=None,
display_max_endog=3):
"""
Summarize the Model.
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
title : str, optional
The title used for the summary table.
model_name : str, optional
The name of the model used. Default is to use model class name.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
mod = self.model
# Default title / model name
if title is None:
title = 'Dynamic Factor Results'
if model_name is None:
model_name = self.model._model_name
# Get endog names
endog_names = self.model._get_endog_names(
truncate=truncate_endog_names)
# Get extra elements for top summary table
extra_top_left = None
extra_top_right = []
mle_retvals = getattr(self, 'mle_retvals', None)
mle_settings = getattr(self, 'mle_settings', None)
if mle_settings is not None and mle_settings.method == 'em':
extra_top_right += [('EM Iterations', [f'{mle_retvals.iter}'])]
# Get the basic summary tables
summary = super().summary(
alpha=alpha, start=start, title=title, model_name=model_name,
display_params=(display_params and display_params_as_list),
display_diagnostics=display_diagnostics,
truncate_endog_names=truncate_endog_names,
display_max_endog=display_max_endog,
extra_top_left=extra_top_left, extra_top_right=extra_top_right)
# Get tables of parameters
table_ix = 1
if not display_params_as_list:
# Observation equation table
data = pd.DataFrame(
self.filter_results.design[:, mod._s['factors_L1'], 0],
index=endog_names, columns=mod.factor_names)
data = data.applymap(lambda s: '%.2f' % s)
# Idiosyncratic terms
# data[' '] = ' '
k_idio = 1
if mod.idiosyncratic_ar1:
data[' idiosyncratic: AR(1)'] = (
self.params[mod._p['idiosyncratic_ar1']])
k_idio += 1
data['var.'] = self.params[mod._p['idiosyncratic_var']]
data.iloc[:, -k_idio:] = data.iloc[:, -k_idio:].applymap(
lambda s: '%.2f' % s)
data.index.name = 'Factor loadings:'
# Clear entries for non-loading factors
base_iloc = np.arange(mod.k_factors)
for i in range(mod.k_endog):
iloc = [j for j in base_iloc
if j not in mod._s.endog_factor_iloc[i]]
data.iloc[i, iloc] = '.'
data = data.reset_index()
# Build the table
params_data = data.values
params_header = data.columns.tolist()
params_stubs = None
title = 'Observation equation:'
table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
summary.tables.insert(table_ix, table)
table_ix += 1
# Factor transitions
ix1 = 0
ix2 = 0
for i in range(len(mod._s.factor_blocks)):
block = mod._s.factor_blocks[i]
ix2 += block.k_factors
T = self.filter_results.transition
lag_names = []
for j in range(block.factor_order):
lag_names += [f'L{j + 1}.{name}'
for name in block.factor_names]
data = pd.DataFrame(T[block.factors_L1, block.factors_ar, 0],
index=block.factor_names,
columns=lag_names)
data.index.name = ''
data = data.applymap(lambda s: '%.2f' % s)
Q = self.filter_results.state_cov
# data[' '] = ''
if block.k_factors == 1:
data[' error variance'] = Q[ix1, ix1]
else:
data[' error covariance'] = block.factor_names
for j in range(block.k_factors):
data[block.factor_names[j]] = Q[ix1:ix2, ix1 + j]
data.iloc[:, -block.k_factors:] = (
data.iloc[:, -block.k_factors:].applymap(
lambda s: '%.2f' % s))
data = data.reset_index()
params_data = data.values
params_header = data.columns.tolist()
params_stubs = None
title = f'Transition: Factor block {i}'
table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
summary.tables.insert(table_ix, table)
table_ix += 1
ix1 = ix2
return summary
| bsd-3-clause | d5132c6a92012ac2da1f1a4e461a81ec | 44.392102 | 79 | 0.572889 | 4.234393 | false | false | false | false |
statsmodels/statsmodels | statsmodels/tsa/statespace/tests/results/results_varmax.py | 7 | 8709 | """
Results for VARMAX tests
Results from Stata using script `test_varmax_stata.do`.
See also Stata time series documentation, in particular `dfactor`.
Data from:
http://www.jmulti.de/download/datasets/e1.dat
Author: Chad Fulton
License: Simplified-BSD
"""
# See http://www.jmulti.de/download/datasets/e1.dat
# 1960:Q1 - 1982Q4
lutkepohl_data = [
[180, 451, 415], [179, 465, 421], [185, 485, 434], [192, 493, 448],
[211, 509, 459], [202, 520, 458], [207, 521, 479], [214, 540, 487],
[231, 548, 497], [229, 558, 510], [234, 574, 516], [237, 583, 525],
[206, 591, 529], [250, 599, 538], [259, 610, 546], [263, 627, 555],
[264, 642, 574], [280, 653, 574], [282, 660, 586], [292, 694, 602],
[286, 709, 617], [302, 734, 639], [304, 751, 653], [307, 763, 668],
[317, 766, 679], [314, 779, 686], [306, 808, 697], [304, 785, 688],
[292, 794, 704], [275, 799, 699], [273, 799, 709], [301, 812, 715],
[280, 837, 724], [289, 853, 746], [303, 876, 758], [322, 897, 779],
[315, 922, 798], [339, 949, 816], [364, 979, 837], [371, 988, 858],
[375, 1025, 881], [432, 1063, 905], [453, 1104, 934], [460, 1131, 968],
[475, 1137, 983], [496, 1178, 1013], [494, 1211, 1034], [498, 1256, 1064],
[526, 1290, 1101], [519, 1314, 1102], [516, 1346, 1145], [531, 1385, 1173],
[573, 1416, 1216], [551, 1436, 1229], [538, 1462, 1242], [532, 1493, 1267],
[558, 1516, 1295], [524, 1557, 1317], [525, 1613, 1355], [519, 1642, 1371],
[526, 1690, 1402], [510, 1759, 1452], [519, 1756, 1485], [538, 1780, 1516],
[549, 1807, 1549], [570, 1831, 1567], [559, 1873, 1588], [584, 1897, 1631],
[611, 1910, 1650], [597, 1943, 1685], [603, 1976, 1722], [619, 2018, 1752],
[635, 2040, 1774], [658, 2070, 1807], [675, 2121, 1831], [700, 2132, 1842],
[692, 2199, 1890], [759, 2253, 1958], [782, 2276, 1948], [816, 2318, 1994],
[844, 2369, 2061], [830, 2423, 2056], [853, 2457, 2102], [852, 2470, 2121],
[833, 2521, 2145], [860, 2545, 2164], [870, 2580, 2206], [830, 2620, 2225],
[801, 2639, 2235], [824, 2618, 2237], [831, 2628, 2250], [830, 2651, 2271],
]
lutkepohl_var1 = {
'params': [
-0.25034303, 0.28759168, 0.81626475, # Phi, row 1
0.023383, 0.19048278, 0.66502259, # Phi, row 2
-0.01492992, 0.53796097, 0.28114733, # Phi, row 3
# .00199294, # Covariance, lower triangle
# .00006096, .00012986,
# .00018523, .00011695, .00016188,
# Note: the following are the Cholesky of the covariance
# matrix defined just above
0.04464236, # Cholesky, lower triangle
0.00136552, 0.01354125,
0.0029089, 0.00834324, 0.00915471
],
'var_oim': [
.01319669, .19413864, .2386643,
.0012437, .01829378, .02234399,
.00107749, .01584584, .01938099,
1.061e-07,
4.981e-09, 4.549e-09,
9.211e-10, 5.825e-10, 7.016e-10],
'loglike': 587.8481018831948,
'aic': -1145.696,
'bic': -1110.934,
}
lutkepohl_var1_diag = {
'params': [
-0.24817904, 0.29283012, 0.80794938, # Phi, row 1
0.02282985, 0.19672157, 0.66329776, # Phi, row 2
-0.01522531, 0.53500874, 0.28859213, # Phi, row 3
0.00199106, 0.00018529, 0.00016179 # Variances, diagonal
],
'var_oim': [
.01314245, .1902972, .23400828,
.00124336, .01840132, .02229946,
.00107322, .01558391, .01909303,
1.057e-07, 9.233e-10, 7.011e-10
],
'loglike': 562.8168476509002,
'aic': -1101.634,
'bic': -1073.824
}
lutkepohl_var1_diag_meas = {
'params': [
-0.24817904, 0.29283012, 0.80794938, # Phi, row 1
0.02282985, 0.19672157, 0.66329776, # Phi, row 2
-0.01522531, 0.53500874, 0.28859213, # Phi, row 3
0.00199106, 0.00018529, 0.00016179, # Variances, diagonal
0, 0, 0 # Measurement error variances
],
'var_oim': [
.01314245, .1902972, .23400828,
.00124336, .01840132, .02229946,
.00107322, .01558391, .01909303,
1.057e-07, 9.233e-10, 7.011e-10,
None, None, None
],
'loglike': 562.8168476509002,
'aic': None,
'bic': None
}
lutkepohl_var1_obs_intercept = {
'params': [
-.24762, .25961003, .75992623, # Phi, row 1
.03186854, -.07271862, .23697765, # Phi, row 2
-.0053055, .2362571, -.19438311, # Phi, row 3
.00199116, .00013515, .00009937 # Variances, diagonal
],
'obs_intercept': [.01799302, .02065458, .01987525], # Intercepts
'var_oim': [
.01317874, .2311403, .33481866,
.00090084, .0157839, .0229119,
.00065737, .01149729, .01661236,
# .00001802, 1.818e-06, 1.086e-06, # Intercept parameters
1.057e-07, 4.869e-10, 2.630e-10],
'loglike': 593.5252693885262,
'aic': -1101.634,
'bic': -1073.824
}
lutkepohl_var1_exog = {
'params': [
-.25549409, .31149462, .92674046, # Phi, row 1
.02935715, .13699757, .5059042, # Phi, row 2
-.00540021, .4598014, .06065565, # Phi, row 3
-.00007533, .00012771, .00018224, # exog
# .00200617, # Covariance, lower triangle
# .00007053, .00017216,
# .00013934, .00010021, .00013833
# Note: the following are the Cholesky of the covariance
# matrix defined just above
.04479029, # Cholesky, lower triangle
.00157467, .01302614,
.00311094, .00731692, .00866687
],
'var_oim': [
.01350243, .20429977, .29684366, # Phi, row 1
.00115871, .01753203, .02547371, # Phi, row 2
.000931, .01408662, .02046759, # Phi, row 3
3.720e-08, 3.192e-09, 2.565e-09 # exog
],
'loglike': 587.4157014188437,
'aic': None,
'bic': None
}
lutkepohl_var1_exog2 = {
'params': [
-.2552236, .21722691, .81525457, # Phi, row 1
.02998355, -.08130972, .24772266, # Phi, row 2
-.00476998, .24016112, -.19910237, # Phi, row 3
.00811096, -.00015244, # exog, y1
.01878355, -.00005086, # exog, y2
.01889825, 2.577e-06, # exog, y3
# .00199918, # Covariance, lower triangle
# .00005435, .00013469,
# .00012306, .00006251, .00010039
# Note: the following are the Cholesky of the covariance
# matrix defined just above
.04471219, # Cholesky, lower triangle
.00121555, .01102644,
.00275227, .00536569, .00800152
],
'var_oim': None,
# 'loglike': 600.9801664685759, # From Stata
'loglike': 600.65449034396283, # From VARMAX (regression test)
'aic': None,
'bic': None
}
lutkepohl_var2 = {
'params': [
-.25244981, .62528114, # Phi_1, row 1
-.13011679, .58173748, # Phi_1, row 2
.05369178, .35716349, # Phi_2, row 1
.03861472, .43812606, # Phi_2, row 2
# .00197786, # Covariance, lower triangle
# .00008091, .00018269
0.04447314, # Covariance cholesky, lower triangle
0.0018193, 0.01339329
],
'var_oim': [
.01315844, .11805816, # Phi_1, row 1
.01321036, .11300702, # Phi_1, row 2
.00122666, .01064478, # Phi_2, row 1
.0012571, .0106738, # Phi_2, row 2
1.048e-07, # Covariance, lower triangle
4.994e-09, 8.940e-10
],
'loglike': 343.3149718445623,
'aic': -664.6299,
'bic': -639.1376
}
fred_varma11 = {
'params': [
.80580312, 0, # Phi_1, row 1
.17348681, -.48093755, # Phi_1, row 2
-.51890703, 0, # Theta_1, row 1
0, 0, # Theta_1, row 2
.0000582, .00003815, # Variances
],
'var_oim': [
.00272999, 0, # Phi_1, row 1
.00164152, .00248576, # Phi_1, row 2
.0049259, 0, # Theta_1, row 1
0, 0, # Theta_1, row 2
1.529e-11, 6.572e-12, # Variances
],
'loglike': 3156.056423235071,
'aic': -6300.113,
'bic': -6275.551
}
fred_vma1 = {
'params': [
.24803941, 0, # Theta_1, row 1
0, 0, # Theta_1, row 2
.00006514, .00004621, # Variances
],
'var_oim': [
.00154773, 0, # Theta_1, row 1
0, 0, # Theta_1, row 2
1.916e-11, 9.639e-12, # Variances
],
'loglike': 3088.909619417645,
'aic': -6171.819,
'bic': -6159.539
}
| bsd-3-clause | d834e61af559afa3c1afe84d6ef83eb5 | 36.217949 | 79 | 0.523711 | 2.449789 | false | false | false | false |
statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | 3 | 76840 | # -*- coding: utf-8 -*-
"""
Vector Autoregression (VAR) processes
References
----------
Lütkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
from __future__ import annotations
from statsmodels.compat.python import lrange
from collections import defaultdict
from io import StringIO
import numpy as np
import pandas as pd
import scipy.stats as stats
import statsmodels.base.wrapper as wrap
from statsmodels.iolib.table import SimpleTable
from statsmodels.tools.decorators import cache_readonly, deprecated_alias
from statsmodels.tools.linalg import logdet_symm
from statsmodels.tools.sm_exceptions import OutputWarning
from statsmodels.tools.validation import array_like
from statsmodels.tsa.base.tsa_model import (
TimeSeriesModel,
TimeSeriesResultsWrapper,
)
import statsmodels.tsa.tsatools as tsa
from statsmodels.tsa.tsatools import duplication_matrix, unvec, vec
from statsmodels.tsa.vector_ar import output, plotting, util
from statsmodels.tsa.vector_ar.hypothesis_test_results import (
CausalityTestResults,
NormalityTestResults,
WhitenessTestResults,
)
from statsmodels.tsa.vector_ar.irf import IRAnalysis
from statsmodels.tsa.vector_ar.output import VARSummary
# -------------------------------------------------------------------------------
# VAR process routines
def ma_rep(coefs, maxn=10):
r"""
MA(\infty) representation of VAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
maxn : int
Number of MA matrices to compute
Notes
-----
VAR(p) process as
.. math:: y_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + u_t
can be equivalently represented as
.. math:: y_t = \mu + \sum_{i=0}^\infty \Phi_i u_{t-i}
e.g. can recursively compute the \Phi_i matrices with \Phi_0 = I_k
Returns
-------
phis : ndarray (maxn + 1 x k x k)
"""
p, k, k = coefs.shape
phis = np.zeros((maxn + 1, k, k))
phis[0] = np.eye(k)
# recursively compute Phi matrices
for i in range(1, maxn + 1):
for j in range(1, i + 1):
if j > p:
break
phis[i] += np.dot(phis[i - j], coefs[j - 1])
return phis
def is_stable(coefs, verbose=False):
"""
Determine stability of VAR(p) system by examining the eigenvalues of the
VAR(1) representation
Parameters
----------
coefs : ndarray (p x k x k)
Returns
-------
is_stable : bool
"""
A_var1 = util.comp_matrix(coefs)
eigs = np.linalg.eigvals(A_var1)
if verbose:
print("Eigenvalues of VAR(1) rep")
for val in np.abs(eigs):
print(val)
return (np.abs(eigs) <= 1).all()
def var_acf(coefs, sig_u, nlags=None):
"""
Compute autocovariance function ACF_y(h) up to nlags of stable VAR(p)
process
Parameters
----------
coefs : ndarray (p x k x k)
Coefficient matrices A_i
sig_u : ndarray (k x k)
Covariance of white noise process u_t
nlags : int, optional
Defaults to order p of system
Notes
-----
Ref: Lütkepohl p.28-29
Returns
-------
acf : ndarray, (p, k, k)
"""
p, k, _ = coefs.shape
if nlags is None:
nlags = p
# p x k x k, ACF for lags 0, ..., p-1
result = np.zeros((nlags + 1, k, k))
result[:p] = _var_acf(coefs, sig_u)
# yule-walker equations
for h in range(p, nlags + 1):
# compute ACF for lag=h
# G(h) = A_1 G(h-1) + ... + A_p G(h-p)
for j in range(p):
result[h] += np.dot(coefs[j], result[h - j - 1])
return result
def _var_acf(coefs, sig_u):
"""
Compute autocovariance function ACF_y(h) for h=1,...,p
Notes
-----
Lütkepohl (2005) p.29
"""
p, k, k2 = coefs.shape
assert k == k2
A = util.comp_matrix(coefs)
# construct VAR(1) noise covariance
SigU = np.zeros((k * p, k * p))
SigU[:k, :k] = sig_u
# vec(ACF) = (I_(kp)^2 - kron(A, A))^-1 vec(Sigma_U)
vecACF = np.linalg.solve(np.eye((k * p) ** 2) - np.kron(A, A), vec(SigU))
acf = unvec(vecACF)
acf = [acf[:k, k * i : k * (i + 1)] for i in range(p)]
acf = np.array(acf)
return acf
def forecast_cov(ma_coefs, sigma_u, steps):
r"""
Compute theoretical forecast error variance matrices
Parameters
----------
steps : int
Number of steps ahead
Notes
-----
.. math:: \mathrm{MSE}(h) = \sum_{i=0}^{h-1} \Phi \Sigma_u \Phi^T
Returns
-------
forc_covs : ndarray (steps x neqs x neqs)
"""
neqs = len(sigma_u)
forc_covs = np.zeros((steps, neqs, neqs))
prior = np.zeros((neqs, neqs))
for h in range(steps):
# Sigma(h) = Sigma(h-1) + Phi Sig_u Phi'
phi = ma_coefs[h]
var = phi @ sigma_u @ phi.T
forc_covs[h] = prior = prior + var
return forc_covs
mse = forecast_cov
def forecast(y, coefs, trend_coefs, steps, exog=None):
"""
Produce linear minimum MSE forecast
Parameters
----------
y : ndarray (k_ar x neqs)
coefs : ndarray (k_ar x neqs x neqs)
trend_coefs : ndarray (1 x neqs) or (neqs)
steps : int
exog : ndarray (trend_coefs.shape[1] x neqs)
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lütkepohl p. 37
"""
p = len(coefs)
k = len(coefs[0])
if y.shape[0] < p:
raise ValueError(
f"y must by have at least order ({p}) observations. "
f"Got {y.shape[0]}."
)
# initial value
forcs = np.zeros((steps, k))
if exog is not None and trend_coefs is not None:
forcs += np.dot(exog, trend_coefs)
# to make existing code (with trend_coefs=intercept and without exog) work:
elif exog is None and trend_coefs is not None:
forcs += trend_coefs
# h=0 forecast should be latest observation
# forcs[0] = y[-1]
# make indices easier to think about
for h in range(1, steps + 1):
# y_t(h) = intercept + sum_1^p A_i y_t_(h-i)
f = forcs[h - 1]
for i in range(1, p + 1):
# slightly hackish
if h - i <= 0:
# e.g. when h=1, h-1 = 0, which is y[-1]
prior_y = y[h - i - 1]
else:
# e.g. when h=2, h-1=1, which is forcs[0]
prior_y = forcs[h - i - 1]
# i=1 is coefs[0]
f = f + np.dot(coefs[i - 1], prior_y)
forcs[h - 1] = f
return forcs
def _forecast_vars(steps, ma_coefs, sig_u):
"""_forecast_vars function used by VECMResults. Note that the definition
of the local variable covs is the same as in VARProcess and as such it
differs from the one in VARResults!
Parameters
----------
steps
ma_coefs
sig_u
Returns
-------
"""
covs = mse(ma_coefs, sig_u, steps)
# Take diagonal for each cov
neqs = len(sig_u)
inds = np.arange(neqs)
return covs[:, inds, inds]
def forecast_interval(
y, coefs, trend_coefs, sig_u, steps=5, alpha=0.05, exog=1
):
assert 0 < alpha < 1
q = util.norm_signif_level(alpha)
point_forecast = forecast(y, coefs, trend_coefs, steps, exog)
ma_coefs = ma_rep(coefs, steps)
sigma = np.sqrt(_forecast_vars(steps, ma_coefs, sig_u))
forc_lower = point_forecast - q * sigma
forc_upper = point_forecast + q * sigma
return point_forecast, forc_lower, forc_upper
def var_loglike(resid, omega, nobs):
r"""
Returns the value of the VAR(p) log-likelihood.
Parameters
----------
resid : ndarray (T x K)
omega : ndarray
Sigma hat matrix. Each element i,j is the average product of the
OLS residual for variable i and the OLS residual for variable j or
np.dot(resid.T,resid)/nobs. There should be no correction for the
degrees of freedom.
nobs : int
Returns
-------
llf : float
The value of the loglikelihood function for a VAR(p) model
Notes
-----
The loglikelihood function for the VAR(p) is
.. math::
-\left(\frac{T}{2}\right)
\left(\ln\left|\Omega\right|-K\ln\left(2\pi\right)-K\right)
"""
logdet = logdet_symm(np.asarray(omega))
neqs = len(omega)
part1 = -(nobs * neqs / 2) * np.log(2 * np.pi)
part2 = -(nobs / 2) * (logdet + neqs)
return part1 + part2
def _reordered(self, order):
# Create new arrays to hold rearranged results from .fit()
endog = self.endog
endog_lagged = self.endog_lagged
params = self.params
sigma_u = self.sigma_u
names = self.names
k_ar = self.k_ar
endog_new = np.zeros_like(endog)
endog_lagged_new = np.zeros_like(endog_lagged)
params_new_inc = np.zeros_like(params)
params_new = np.zeros_like(params)
sigma_u_new_inc = np.zeros_like(sigma_u)
sigma_u_new = np.zeros_like(sigma_u)
num_end = len(self.params[0])
names_new = []
# Rearrange elements and fill in new arrays
k = self.k_trend
for i, c in enumerate(order):
endog_new[:, i] = self.endog[:, c]
if k > 0:
params_new_inc[0, i] = params[0, i]
endog_lagged_new[:, 0] = endog_lagged[:, 0]
for j in range(k_ar):
params_new_inc[i + j * num_end + k, :] = self.params[
c + j * num_end + k, :
]
endog_lagged_new[:, i + j * num_end + k] = endog_lagged[
:, c + j * num_end + k
]
sigma_u_new_inc[i, :] = sigma_u[c, :]
names_new.append(names[c])
for i, c in enumerate(order):
params_new[:, i] = params_new_inc[:, c]
sigma_u_new[:, i] = sigma_u_new_inc[:, c]
return VARResults(
endog=endog_new,
endog_lagged=endog_lagged_new,
params=params_new,
sigma_u=sigma_u_new,
lag_order=self.k_ar,
model=self.model,
trend="c",
names=names_new,
dates=self.dates,
)
def orth_ma_rep(results, maxn=10, P=None):
r"""Compute Orthogonalized MA coefficient matrices using P matrix such
that :math:`\Sigma_u = PP^\prime`. P defaults to the Cholesky
decomposition of :math:`\Sigma_u`
Parameters
----------
results : VARResults or VECMResults
maxn : int
Number of coefficient matrices to compute
P : ndarray (neqs x neqs), optional
Matrix such that Sigma_u = PP', defaults to the Cholesky decomposition.
Returns
-------
coefs : ndarray (maxn x neqs x neqs)
"""
if P is None:
P = results._chol_sigma_u
ma_mats = results.ma_rep(maxn=maxn)
return np.array([np.dot(coefs, P) for coefs in ma_mats])
def test_normality(results, signif=0.05):
"""
Test assumption of normal-distributed errors using Jarque-Bera-style
omnibus Chi^2 test
Parameters
----------
results : VARResults or statsmodels.tsa.vecm.vecm.VECMResults
signif : float
The test's significance level.
Notes
-----
H0 (null) : data are generated by a Gaussian-distributed process
Returns
-------
result : NormalityTestResults
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series*
*Analysis*. Springer.
.. [2] Kilian, L. & Demiroglu, U. (2000). "Residual-Based Tests for
Normality in Autoregressions: Asymptotic Theory and Simulation
Evidence." Journal of Business & Economic Statistics
"""
resid_c = results.resid - results.resid.mean(0)
sig = np.dot(resid_c.T, resid_c) / results.nobs
Pinv = np.linalg.inv(np.linalg.cholesky(sig))
w = np.dot(Pinv, resid_c.T)
b1 = (w ** 3).sum(1)[:, None] / results.nobs
b2 = (w ** 4).sum(1)[:, None] / results.nobs - 3
lam_skew = results.nobs * np.dot(b1.T, b1) / 6
lam_kurt = results.nobs * np.dot(b2.T, b2) / 24
lam_omni = float(lam_skew + lam_kurt)
omni_dist = stats.chi2(results.neqs * 2)
omni_pvalue = float(omni_dist.sf(lam_omni))
crit_omni = float(omni_dist.ppf(1 - signif))
return NormalityTestResults(
lam_omni, crit_omni, omni_pvalue, results.neqs * 2, signif
)
class LagOrderResults:
"""
Results class for choosing a model's lag order.
Parameters
----------
ics : dict
The keys are the strings ``"aic"``, ``"bic"``, ``"hqic"``, and
``"fpe"``. A corresponding value is a list of information criteria for
various numbers of lags.
selected_orders : dict
The keys are the strings ``"aic"``, ``"bic"``, ``"hqic"``, and
``"fpe"``. The corresponding value is an integer specifying the number
of lags chosen according to a given criterion (key).
vecm : bool, default: `False`
`True` indicates that the model is a VECM. In case of a VAR model
this argument must be `False`.
Notes
-----
In case of a VECM the shown lags are lagged differences.
"""
def __init__(self, ics, selected_orders, vecm=False):
self.title = ("VECM" if vecm else "VAR") + " Order Selection"
self.title += " (* highlights the minimums)"
self.ics = ics
self.selected_orders = selected_orders
self.vecm = vecm
self.aic = selected_orders["aic"]
self.bic = selected_orders["bic"]
self.hqic = selected_orders["hqic"]
self.fpe = selected_orders["fpe"]
def summary(self): # basically copied from (now deleted) print_ic_table()
cols = sorted(self.ics) # ["aic", "bic", "hqic", "fpe"]
str_data = np.array(
[["%#10.4g" % v for v in self.ics[c]] for c in cols], dtype=object
).T
# mark minimum with an asterisk
for i, col in enumerate(cols):
idx = int(self.selected_orders[col]), i
str_data[idx] += "*"
return SimpleTable(
str_data,
[col.upper() for col in cols],
lrange(len(str_data)),
title=self.title,
)
def __str__(self):
return (
f"<{self.__module__}.{self.__class__.__name__} object. Selected "
f"orders are: AIC -> {str(self.aic)}, BIC -> {str(self.bic)}, "
f"FPE -> {str(self.fpe)}, HQIC -> {str(self.hqic)}>"
)
# -------------------------------------------------------------------------------
# VARProcess class: for known or unknown VAR process
class VAR(TimeSeriesModel):
r"""
Fit VAR(p) process and do lag order selection
.. math:: y_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + u_t
Parameters
----------
endog : array_like
2-d endogenous response variable. The independent variable.
exog : array_like
2-d exogenous variable.
dates : array_like
must match number of rows of endog
References
----------
Lütkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
y = deprecated_alias("y", "endog", remove_version="0.11.0")
def __init__(
self, endog, exog=None, dates=None, freq=None, missing="none"
):
super().__init__(endog, exog, dates, freq, missing=missing)
if self.endog.ndim == 1:
raise ValueError("Only gave one variable to VAR")
self.neqs = self.endog.shape[1]
self.n_totobs = len(endog)
def predict(self, params, start=None, end=None, lags=1, trend="c"):
"""
Returns in-sample predictions or forecasts
"""
params = np.array(params)
if start is None:
start = lags
# Handle start, end
(
start,
end,
out_of_sample,
prediction_index,
) = self._get_prediction_index(start, end)
if end < start:
raise ValueError("end is before start")
if end == start + out_of_sample:
return np.array([])
k_trend = util.get_trendorder(trend)
k = self.neqs
k_ar = lags
predictedvalues = np.zeros((end + 1 - start + out_of_sample, k))
if k_trend != 0:
intercept = params[:k_trend]
predictedvalues += intercept
y = self.endog
x = util.get_var_endog(y, lags, trend=trend, has_constant="raise")
fittedvalues = np.dot(x, params)
fv_start = start - k_ar
pv_end = min(len(predictedvalues), len(fittedvalues) - fv_start)
fv_end = min(len(fittedvalues), end - k_ar + 1)
predictedvalues[:pv_end] = fittedvalues[fv_start:fv_end]
if not out_of_sample:
return predictedvalues
# fit out of sample
y = y[-k_ar:]
coefs = params[k_trend:].reshape((k_ar, k, k)).swapaxes(1, 2)
predictedvalues[pv_end:] = forecast(y, coefs, intercept, out_of_sample)
return predictedvalues
def fit(
self,
maxlags: int | None = None,
method="ols",
ic=None,
trend="c",
verbose=False,
):
# todo: this code is only supporting deterministic terms as exog.
# This means that all exog-variables have lag 0. If dealing with
# different exogs is necessary, a `lags_exog`-parameter might make
# sense (e.g. a sequence of ints specifying lags).
# Alternatively, leading zeros for exog-variables with smaller number
# of lags than the maximum number of exog-lags might work.
"""
Fit the VAR model
Parameters
----------
maxlags : {int, None}, default None
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend : str {"c", "ct", "ctt", "n"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"n" - co constant, no trend
Note that these are prepended to the columns of the dataset.
Returns
-------
VARResults
Estimation results
Notes
-----
See Lütkepohl pp. 146-153 for implementation details.
"""
lags = maxlags
if trend not in ["c", "ct", "ctt", "n"]:
raise ValueError("trend '{}' not supported for VAR".format(trend))
if ic is not None:
selections = self.select_order(maxlags=maxlags)
if not hasattr(selections, ic):
raise ValueError(
"%s not recognized, must be among %s"
% (ic, sorted(selections))
)
lags = getattr(selections, ic)
if verbose:
print(selections)
print("Using %d based on %s criterion" % (lags, ic))
else:
if lags is None:
lags = 1
k_trend = util.get_trendorder(trend)
orig_exog_names = self.exog_names
self.exog_names = util.make_lag_names(self.endog_names, lags, k_trend)
self.nobs = self.n_totobs - lags
# add exog to data.xnames (necessary because the length of xnames also
# determines the allowed size of VARResults.params)
if self.exog is not None:
if orig_exog_names:
x_names_to_add = orig_exog_names
else:
x_names_to_add = [
("exog%d" % i) for i in range(self.exog.shape[1])
]
self.data.xnames = (
self.data.xnames[:k_trend]
+ x_names_to_add
+ self.data.xnames[k_trend:]
)
self.data.cov_names = pd.MultiIndex.from_product(
(self.data.xnames, self.data.ynames)
)
return self._estimate_var(lags, trend=trend)
def _estimate_var(self, lags, offset=0, trend="c"):
"""
lags : int
Lags of the endogenous variable.
offset : int
Periods to drop from beginning-- for order selection so it's an
apples-to-apples comparison
trend : {str, None}
As per above
"""
# have to do this again because select_order does not call fit
self.k_trend = k_trend = util.get_trendorder(trend)
if offset < 0: # pragma: no cover
raise ValueError("offset must be >= 0")
nobs = self.n_totobs - lags - offset
endog = self.endog[offset:]
exog = None if self.exog is None else self.exog[offset:]
z = util.get_var_endog(endog, lags, trend=trend, has_constant="raise")
if exog is not None:
# TODO: currently only deterministic terms supported (exoglags==0)
# and since exoglags==0, x will be an array of size 0.
x = util.get_var_endog(
exog[-nobs:], 0, trend="n", has_constant="raise"
)
x_inst = exog[-nobs:]
x = np.column_stack((x, x_inst))
del x_inst # free memory
temp_z = z
z = np.empty((x.shape[0], x.shape[1] + z.shape[1]))
z[:, : self.k_trend] = temp_z[:, : self.k_trend]
z[:, self.k_trend : self.k_trend + x.shape[1]] = x
z[:, self.k_trend + x.shape[1] :] = temp_z[:, self.k_trend :]
del temp_z, x # free memory
# the following modification of z is necessary to get the same results
# as JMulTi for the constant-term-parameter...
for i in range(self.k_trend):
if (np.diff(z[:, i]) == 1).all(): # modify the trend-column
z[:, i] += lags
# make the same adjustment for the quadratic term
if (np.diff(np.sqrt(z[:, i])) == 1).all():
z[:, i] = (np.sqrt(z[:, i]) + lags) ** 2
y_sample = endog[lags:]
# Lütkepohl p75, about 5x faster than stated formula
params = np.linalg.lstsq(z, y_sample, rcond=1e-15)[0]
resid = y_sample - np.dot(z, params)
# Unbiased estimate of covariance matrix $\Sigma_u$ of the white noise
# process $u$
# equivalent definition
# .. math:: \frac{1}{T - Kp - 1} Y^\prime (I_T - Z (Z^\prime Z)^{-1}
# Z^\prime) Y
# Ref: Lütkepohl p.75
# df_resid right now is T - Kp - 1, which is a suggested correction
avobs = len(y_sample)
if exog is not None:
k_trend += exog.shape[1]
df_resid = avobs - (self.neqs * lags + k_trend)
sse = np.dot(resid.T, resid)
if df_resid:
omega = sse / df_resid
else:
omega = np.full_like(sse, np.nan)
varfit = VARResults(
endog,
z,
params,
omega,
lags,
names=self.endog_names,
trend=trend,
dates=self.data.dates,
model=self,
exog=self.exog,
)
return VARResultsWrapper(varfit)
def select_order(self, maxlags=None, trend="c"):
"""
Compute lag order selections based on each of the available information
criteria
Parameters
----------
maxlags : int
if None, defaults to 12 * (nobs/100.)**(1./4)
trend : str {"n", "c", "ct", "ctt"}
* "n" - no deterministic terms
* "c" - constant term
* "ct" - constant and linear term
* "ctt" - constant, linear, and quadratic term
Returns
-------
selections : LagOrderResults
"""
ntrend = len(trend) if trend.startswith("c") else 0
max_estimable = (self.n_totobs - self.neqs - ntrend) // (1 + self.neqs)
if maxlags is None:
maxlags = int(round(12 * (len(self.endog) / 100.0) ** (1 / 4.0)))
# TODO: This expression shows up in a bunch of places, but
# in some it is `int` and in others `np.ceil`. Also in some
# it multiplies by 4 instead of 12. Let's put these all in
# one place and document when to use which variant.
# Ensure enough obs to estimate model with maxlags
maxlags = min(maxlags, max_estimable)
else:
if maxlags > max_estimable:
raise ValueError(
"maxlags is too large for the number of observations and "
"the number of equations. The largest model cannot be "
"estimated."
)
ics = defaultdict(list)
p_min = 0 if self.exog is not None or trend != "n" else 1
for p in range(p_min, maxlags + 1):
# exclude some periods to same amount of data used for each lag
# order
result = self._estimate_var(p, offset=maxlags - p, trend=trend)
for k, v in result.info_criteria.items():
ics[k].append(v)
selected_orders = dict(
(k, np.array(v).argmin() + p_min) for k, v in ics.items()
)
return LagOrderResults(ics, selected_orders, vecm=False)
@classmethod
def from_formula(
cls, formula, data, subset=None, drop_cols=None, *args, **kwargs
):
"""
Not implemented. Formulas are not supported for VAR models.
"""
raise NotImplementedError("formulas are not supported for VAR models.")
class VARProcess:
"""
Class represents a known VAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
coefficients for lags of endog, part or params reshaped
coefs_exog : ndarray
parameters for trend and user provided exog
sigma_u : ndarray (k x k)
residual covariance
names : sequence (length k)
_params_info : dict
internal dict to provide information about the composition of `params`,
specifically `k_trend` (trend order) and `k_exog_user` (the number of
exog variables provided by the user).
If it is None, then coefs_exog are assumed to be for the intercept and
trend.
"""
def __init__(
self, coefs, coefs_exog, sigma_u, names=None, _params_info=None
):
self.k_ar = len(coefs)
self.neqs = coefs.shape[1]
self.coefs = coefs
self.coefs_exog = coefs_exog
# Note reshaping 1-D coefs_exog to 2_D makes unit tests fail
self.sigma_u = sigma_u
self.names = names
if _params_info is None:
_params_info = {}
self.k_exog_user = _params_info.get("k_exog_user", 0)
if self.coefs_exog is not None:
k_ex = self.coefs_exog.shape[0] if self.coefs_exog.ndim != 1 else 1
k_c = k_ex - self.k_exog_user
else:
k_c = 0
self.k_trend = _params_info.get("k_trend", k_c)
# TODO: we need to distinguish exog including trend and exog_user
self.k_exog = self.k_trend + self.k_exog_user
if self.k_trend > 0:
if coefs_exog.ndim == 2:
self.intercept = coefs_exog[:, 0]
else:
self.intercept = coefs_exog
else:
self.intercept = np.zeros(self.neqs)
def get_eq_index(self, name):
"""Return integer position of requested equation name"""
return util.get_index(self.names, name)
def __str__(self):
output = "VAR(%d) process for %d-dimensional response y_t" % (
self.k_ar,
self.neqs,
)
output += "\nstable: %s" % self.is_stable()
output += "\nmean: %s" % self.mean()
return output
def is_stable(self, verbose=False):
"""Determine stability based on model coefficients
Parameters
----------
verbose : bool
Print eigenvalues of the VAR(1) companion
Notes
-----
Checks if det(I - Az) = 0 for any mod(z) <= 1, so all the eigenvalues of
the companion matrix must lie outside the unit circle
"""
return is_stable(self.coefs, verbose=verbose)
def simulate_var(self, steps=None, offset=None, seed=None, initial_values=None, nsimulations=None):
"""
simulate the VAR(p) process for the desired number of steps
Parameters
----------
steps : None or int
number of observations to simulate, this includes the initial
observations to start the autoregressive process.
If offset is not None, then exog of the model are used if they were
provided in the model
offset : None or ndarray (steps, neqs)
If not None, then offset is added as an observation specific
intercept to the autoregression. If it is None and either trend
(including intercept) or exog were used in the VAR model, then
the linear predictor of those components will be used as offset.
This should have the same number of rows as steps, and the same
number of columns as endogenous variables (neqs).
seed : {None, int}
If seed is not None, then it will be used with for the random
variables generated by numpy.random.
initial_values : array_like, optional
Initial values for use in the simulation. Shape should be
(nlags, neqs) or (neqs,). Values should be ordered from less to
most recent. Note that this values will be returned by the
simulation as the first values of `endog_simulated` and they
will count for the total number of steps.
nsimulations : {None, int}
Number of simulations to perform. If `nsimulations` is None it will
perform one simulation and return value will have shape (steps, neqs).
Returns
-------
endog_simulated : nd_array
Endog of the simulated VAR process. Shape will be (nsimulations, steps, neqs)
or (steps, neqs) if `nsimulations` is None.
"""
steps_ = None
if offset is None:
if self.k_exog_user > 0 or self.k_trend > 1:
# if more than intercept
# endog_lagged contains all regressors, trend, exog_user
# and lagged endog, trimmed initial observations
offset = self.endog_lagged[:, : self.k_exog].dot(
self.coefs_exog.T
)
steps_ = self.endog_lagged.shape[0]
else:
offset = self.intercept
else:
steps_ = offset.shape[0]
# default, but over written if exog or offset are used
if steps is None:
if steps_ is None:
steps = 1000
else:
steps = steps_
else:
if steps_ is not None and steps != steps_:
raise ValueError(
"if exog or offset are used, then steps must"
"be equal to their length or None"
)
y = util.varsim(
self.coefs,
offset,
self.sigma_u,
steps=steps,
seed=seed,
initial_values=initial_values,
nsimulations=nsimulations
)
return y
def plotsim(self, steps=None, offset=None, seed=None):
"""
Plot a simulation from the VAR(p) process for the desired number of
steps
"""
y = self.simulate_var(steps=steps, offset=offset, seed=seed)
return plotting.plot_mts(y)
def intercept_longrun(self):
r"""
Long run intercept of stable VAR process
Lütkepohl eq. 2.1.23
.. math:: \mu = (I - A_1 - \dots - A_p)^{-1} \alpha
where \alpha is the intercept (parameter of the constant)
"""
return np.linalg.solve(self._char_mat, self.intercept)
def mean(self):
r"""
Long run intercept of stable VAR process
Warning: trend and exog except for intercept are ignored for this.
This might change in future versions.
Lütkepohl eq. 2.1.23
.. math:: \mu = (I - A_1 - \dots - A_p)^{-1} \alpha
where \alpha is the intercept (parameter of the constant)
"""
return self.intercept_longrun()
def ma_rep(self, maxn=10):
r"""
Compute MA(:math:`\infty`) coefficient matrices
Parameters
----------
maxn : int
Number of coefficient matrices to compute
Returns
-------
coefs : ndarray (maxn x k x k)
"""
return ma_rep(self.coefs, maxn=maxn)
def orth_ma_rep(self, maxn=10, P=None):
r"""
Compute orthogonalized MA coefficient matrices using P matrix such
that :math:`\Sigma_u = PP^\prime`. P defaults to the Cholesky
decomposition of :math:`\Sigma_u`
Parameters
----------
maxn : int
Number of coefficient matrices to compute
P : ndarray (k x k), optional
Matrix such that Sigma_u = PP', defaults to Cholesky descomp
Returns
-------
coefs : ndarray (maxn x k x k)
"""
return orth_ma_rep(self, maxn, P)
def long_run_effects(self):
r"""Compute long-run effect of unit impulse
.. math::
\Psi_\infty = \sum_{i=0}^\infty \Phi_i
"""
return np.linalg.inv(self._char_mat)
@cache_readonly
def _chol_sigma_u(self):
return np.linalg.cholesky(self.sigma_u)
@cache_readonly
def _char_mat(self):
"""Characteristic matrix of the VAR"""
return np.eye(self.neqs) - self.coefs.sum(0)
def acf(self, nlags=None):
"""Compute theoretical autocovariance function
Returns
-------
acf : ndarray (p x k x k)
"""
return var_acf(self.coefs, self.sigma_u, nlags=nlags)
def acorr(self, nlags=None):
"""
Autocorrelation function
Parameters
----------
nlags : int or None
The number of lags to include in the autocovariance function. The
default is the number of lags included in the model.
Returns
-------
acorr : ndarray
Autocorrelation and cross correlations (nlags, neqs, neqs)
"""
return util.acf_to_acorr(self.acf(nlags=nlags))
def plot_acorr(self, nlags=10, linewidth=8):
"""Plot theoretical autocorrelation function"""
fig = plotting.plot_full_acorr(
self.acorr(nlags=nlags), linewidth=linewidth
)
return fig
def forecast(self, y, steps, exog_future=None):
"""Produce linear minimum MSE forecasts for desired number of steps
ahead, using prior values y
Parameters
----------
y : ndarray (p x k)
steps : int
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lütkepohl pp 37-38
"""
if self.exog is None and exog_future is not None:
raise ValueError(
"No exog in model, so no exog_future supported "
"in forecast method."
)
if self.exog is not None and exog_future is None:
raise ValueError(
"Please provide an exog_future argument to "
"the forecast method."
)
exog_future = array_like(
exog_future, "exog_future", optional=True, ndim=2
)
if exog_future is not None:
if exog_future.shape[0] != steps:
err_msg = f"""\
exog_future only has {exog_future.shape[0]} observations. It must have \
steps ({steps}) observations.
"""
raise ValueError(err_msg)
trend_coefs = None if self.coefs_exog.size == 0 else self.coefs_exog.T
exogs = []
if self.trend.startswith("c"): # constant term
exogs.append(np.ones(steps))
exog_lin_trend = np.arange(
self.n_totobs + 1, self.n_totobs + 1 + steps
)
if "t" in self.trend:
exogs.append(exog_lin_trend)
if "tt" in self.trend:
exogs.append(exog_lin_trend ** 2)
if exog_future is not None:
exogs.append(exog_future)
if not exogs:
exog_future = None
else:
exog_future = np.column_stack(exogs)
return forecast(y, self.coefs, trend_coefs, steps, exog_future)
# TODO: use `mse` module-level function?
def mse(self, steps):
r"""
Compute theoretical forecast error variance matrices
Parameters
----------
steps : int
Number of steps ahead
Notes
-----
.. math:: \mathrm{MSE}(h) = \sum_{i=0}^{h-1} \Phi \Sigma_u \Phi^T
Returns
-------
forc_covs : ndarray (steps x neqs x neqs)
"""
ma_coefs = self.ma_rep(steps)
k = len(self.sigma_u)
forc_covs = np.zeros((steps, k, k))
prior = np.zeros((k, k))
for h in range(steps):
# Sigma(h) = Sigma(h-1) + Phi Sig_u Phi'
phi = ma_coefs[h]
var = phi @ self.sigma_u @ phi.T
forc_covs[h] = prior = prior + var
return forc_covs
forecast_cov = mse
def _forecast_vars(self, steps):
covs = self.forecast_cov(steps)
# Take diagonal for each cov
inds = np.arange(self.neqs)
return covs[:, inds, inds]
def forecast_interval(self, y, steps, alpha=0.05, exog_future=None):
"""
Construct forecast interval estimates assuming the y are Gaussian
Parameters
----------
y : {ndarray, None}
The initial values to use for the forecasts. If None,
the last k_ar values of the original endogenous variables are
used.
steps : int
Number of steps ahead to forecast
alpha : float, optional
The significance level for the confidence intervals.
exog_future : ndarray, optional
Forecast values of the exogenous variables. Should include
constant, trend, etc. as needed, including extrapolating out
of sample.
Returns
-------
point : ndarray
Mean value of forecast
lower : ndarray
Lower bound of confidence interval
upper : ndarray
Upper bound of confidence interval
Notes
-----
Lütkepohl pp. 39-40
"""
if not 0 < alpha < 1:
raise ValueError("alpha must be between 0 and 1")
q = util.norm_signif_level(alpha)
point_forecast = self.forecast(y, steps, exog_future=exog_future)
sigma = np.sqrt(self._forecast_vars(steps))
forc_lower = point_forecast - q * sigma
forc_upper = point_forecast + q * sigma
return point_forecast, forc_lower, forc_upper
def to_vecm(self):
"""to_vecm"""
k = self.coefs.shape[1]
p = self.coefs.shape[0]
A = self.coefs
pi = -(np.identity(k) - np.sum(A, 0))
gamma = np.zeros((p - 1, k, k))
for i in range(p - 1):
gamma[i] = -(np.sum(A[i + 1 :], 0))
gamma = np.concatenate(gamma, 1)
return {"Gamma": gamma, "Pi": pi}
# ----------------------------------------------------------------------------
# VARResults class
class VARResults(VARProcess):
"""Estimate VAR(p) process with fixed number of lags
Parameters
----------
endog : ndarray
endog_lagged : ndarray
params : ndarray
sigma_u : ndarray
lag_order : int
model : VAR model instance
trend : str {'n', 'c', 'ct'}
names : array_like
List of names of the endogenous variables in order of appearance in
`endog`.
dates
exog : ndarray
Attributes
----------
params : ndarray (p x K x K)
Estimated A_i matrices, A_i = coefs[i-1]
dates
endog
endog_lagged
k_ar : int
Order of VAR process
k_trend : int
model
names
neqs : int
Number of variables (equations)
nobs : int
n_totobs : int
params : ndarray (Kp + 1) x K
A_i matrices and intercept in stacked form [int A_1 ... A_p]
names : list
variables names
sigma_u : ndarray (K x K)
Estimate of white noise process variance Var[u_t]
"""
_model_type = "VAR"
def __init__(
self,
endog,
endog_lagged,
params,
sigma_u,
lag_order,
model=None,
trend="c",
names=None,
dates=None,
exog=None,
):
self.model = model
self.endog = endog
self.endog_lagged = endog_lagged
self.dates = dates
self.n_totobs, neqs = self.endog.shape
self.nobs = self.n_totobs - lag_order
self.trend = trend
k_trend = util.get_trendorder(trend)
self.exog_names = util.make_lag_names(
names, lag_order, k_trend, model.data.orig_exog
)
self.params = params
self.exog = exog
# Initialize VARProcess parent class
# construct coefficient matrices
# Each matrix needs to be transposed
endog_start = k_trend
if exog is not None:
k_exog_user = exog.shape[1]
endog_start += k_exog_user
else:
k_exog_user = 0
reshaped = self.params[endog_start:]
reshaped = reshaped.reshape((lag_order, neqs, neqs))
# Need to transpose each coefficient matrix
coefs = reshaped.swapaxes(1, 2).copy()
self.coefs_exog = params[:endog_start].T
self.k_exog = self.coefs_exog.shape[1]
self.k_exog_user = k_exog_user
# maybe change to params class, distinguish exog_all versus exog_user
# see issue #4535
_params_info = {
"k_trend": k_trend,
"k_exog_user": k_exog_user,
"k_ar": lag_order,
}
super().__init__(
coefs,
self.coefs_exog,
sigma_u,
names=names,
_params_info=_params_info,
)
def plot(self):
"""Plot input time series"""
return plotting.plot_mts(
self.endog, names=self.names, index=self.dates
)
@property
def df_model(self):
"""
Number of estimated parameters, including the intercept / trends
"""
return self.neqs * self.k_ar + self.k_exog
@property
def df_resid(self):
"""Number of observations minus number of estimated parameters"""
return self.nobs - self.df_model
@cache_readonly
def fittedvalues(self):
"""
The predicted insample values of the response variables of the model.
"""
return np.dot(self.endog_lagged, self.params)
@cache_readonly
def resid(self):
"""
Residuals of response variable resulting from estimated coefficients
"""
return self.endog[self.k_ar :] - self.fittedvalues
def sample_acov(self, nlags=1):
"""Sample acov"""
return _compute_acov(self.endog[self.k_ar :], nlags=nlags)
def sample_acorr(self, nlags=1):
"""Sample acorr"""
acovs = self.sample_acov(nlags=nlags)
return _acovs_to_acorrs(acovs)
def plot_sample_acorr(self, nlags=10, linewidth=8):
"""
Plot sample autocorrelation function
Parameters
----------
nlags : int
The number of lags to use in compute the autocorrelation. Does
not count the zero lag, which will be returned.
linewidth : int
The linewidth for the plots.
Returns
-------
Figure
The figure that contains the plot axes.
"""
fig = plotting.plot_full_acorr(
self.sample_acorr(nlags=nlags), linewidth=linewidth
)
return fig
def resid_acov(self, nlags=1):
"""
Compute centered sample autocovariance (including lag 0)
Parameters
----------
nlags : int
Returns
-------
"""
return _compute_acov(self.resid, nlags=nlags)
def resid_acorr(self, nlags=1):
"""
Compute sample autocorrelation (including lag 0)
Parameters
----------
nlags : int
Returns
-------
"""
acovs = self.resid_acov(nlags=nlags)
return _acovs_to_acorrs(acovs)
@cache_readonly
def resid_corr(self):
"""
Centered residual correlation matrix
"""
return self.resid_acorr(0)[0]
@cache_readonly
def sigma_u_mle(self):
"""(Biased) maximum likelihood estimate of noise process covariance"""
if not self.df_resid:
return np.zeros_like(self.sigma_u)
return self.sigma_u * self.df_resid / self.nobs
def cov_params(self):
"""Estimated variance-covariance of model coefficients
Notes
-----
Covariance of vec(B), where B is the matrix
[params_for_deterministic_terms, A_1, ..., A_p] with the shape
(K x (Kp + number_of_deterministic_terms))
Adjusted to be an unbiased estimator
Ref: Lütkepohl p.74-75
"""
z = self.endog_lagged
return np.kron(np.linalg.inv(z.T @ z), self.sigma_u)
def cov_ybar(self):
r"""Asymptotically consistent estimate of covariance of the sample mean
.. math::
\sqrt(T) (\bar{y} - \mu) \rightarrow
{\cal N}(0, \Sigma_{\bar{y}}) \\
\Sigma_{\bar{y}} = B \Sigma_u B^\prime, \text{where }
B = (I_K - A_1 - \cdots - A_p)^{-1}
Notes
-----
Lütkepohl Proposition 3.3
"""
Ainv = np.linalg.inv(np.eye(self.neqs) - self.coefs.sum(0))
return Ainv @ self.sigma_u @ Ainv.T
# ------------------------------------------------------------
# Estimation-related things
@cache_readonly
def _zz(self):
# Z'Z
return np.dot(self.endog_lagged.T, self.endog_lagged)
@property
def _cov_alpha(self):
"""
Estimated covariance matrix of model coefficients w/o exog
"""
# drop exog
kn = self.k_exog * self.neqs
return self.cov_params()[kn:, kn:]
@cache_readonly
def _cov_sigma(self):
"""
Estimated covariance matrix of vech(sigma_u)
"""
D_K = tsa.duplication_matrix(self.neqs)
D_Kinv = np.linalg.pinv(D_K)
sigxsig = np.kron(self.sigma_u, self.sigma_u)
return 2 * D_Kinv @ sigxsig @ D_Kinv.T
@cache_readonly
def llf(self):
"Compute VAR(p) loglikelihood"
return var_loglike(self.resid, self.sigma_u_mle, self.nobs)
@cache_readonly
def stderr(self):
"""Standard errors of coefficients, reshaped to match in size"""
stderr = np.sqrt(np.diag(self.cov_params()))
return stderr.reshape((self.df_model, self.neqs), order="C")
bse = stderr # statsmodels interface?
@cache_readonly
def stderr_endog_lagged(self):
"""Stderr_endog_lagged"""
start = self.k_exog
return self.stderr[start:]
@cache_readonly
def stderr_dt(self):
"""Stderr_dt"""
end = self.k_exog
return self.stderr[:end]
@cache_readonly
def tvalues(self):
"""
Compute t-statistics. Use Student-t(T - Kp - 1) = t(df_resid) to
test significance.
"""
return self.params / self.stderr
@cache_readonly
def tvalues_endog_lagged(self):
"""tvalues_endog_lagged"""
start = self.k_exog
return self.tvalues[start:]
@cache_readonly
def tvalues_dt(self):
"""tvalues_dt"""
end = self.k_exog
return self.tvalues[:end]
@cache_readonly
def pvalues(self):
"""
Two-sided p-values for model coefficients from Student t-distribution
"""
# return stats.t.sf(np.abs(self.tvalues), self.df_resid)*2
return 2 * stats.norm.sf(np.abs(self.tvalues))
@cache_readonly
def pvalues_endog_lagged(self):
"""pvalues_endog_laggd"""
start = self.k_exog
return self.pvalues[start:]
@cache_readonly
def pvalues_dt(self):
"""pvalues_dt"""
end = self.k_exog
return self.pvalues[:end]
# todo: ------------------------------------------------------------------
def plot_forecast(self, steps, alpha=0.05, plot_stderr=True):
"""
Plot forecast
"""
mid, lower, upper = self.forecast_interval(
self.endog[-self.k_ar :], steps, alpha=alpha
)
fig = plotting.plot_var_forc(
self.endog,
mid,
lower,
upper,
names=self.names,
plot_stderr=plot_stderr,
)
return fig
# Forecast error covariance functions
def forecast_cov(self, steps=1, method="mse"):
r"""Compute forecast covariance matrices for desired number of steps
Parameters
----------
steps : int
Notes
-----
.. math:: \Sigma_{\hat y}(h) = \Sigma_y(h) + \Omega(h) / T
Ref: Lütkepohl pp. 96-97
Returns
-------
covs : ndarray (steps x k x k)
"""
fc_cov = self.mse(steps)
if method == "mse":
pass
elif method == "auto":
if self.k_exog == 1 and self.k_trend < 2:
# currently only supported if no exog and trend in ['n', 'c']
fc_cov += self._omega_forc_cov(steps) / self.nobs
import warnings
warnings.warn(
"forecast cov takes parameter uncertainty into" "account",
OutputWarning,
stacklevel = 2,
)
else:
raise ValueError("method has to be either 'mse' or 'auto'")
return fc_cov
# Monte Carlo irf standard errors
def irf_errband_mc(
self,
orth=False,
repl=1000,
steps=10,
signif=0.05,
seed=None,
burn=100,
cum=False,
):
"""
Compute Monte Carlo integrated error bands assuming normally
distributed for impulse response functions
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse response error bands
repl : int
number of Monte Carlo replications to perform
steps : int, default 10
number of impulse response periods
signif : float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed : int
np.random.seed for replications
burn : int
number of initial observations to discard for simulation
cum : bool, default False
produce cumulative irf error bands
Notes
-----
Lütkepohl (2005) Appendix D
Returns
-------
Tuple of lower and upper arrays of ma_rep monte carlo standard errors
"""
ma_coll = self.irf_resim(
orth=orth, repl=repl, steps=steps, seed=seed, burn=burn, cum=cum
)
ma_sort = np.sort(ma_coll, axis=0) # sort to get quantiles
# python 2: round returns float
low_idx = int(round(signif / 2 * repl) - 1)
upp_idx = int(round((1 - signif / 2) * repl) - 1)
lower = ma_sort[low_idx, :, :, :]
upper = ma_sort[upp_idx, :, :, :]
return lower, upper
def irf_resim(
self, orth=False, repl=1000, steps=10, seed=None, burn=100, cum=False
):
"""
Simulates impulse response function, returning an array of simulations.
Used for Sims-Zha error band calculation.
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse response error bands
repl : int
number of Monte Carlo replications to perform
steps : int, default 10
number of impulse response periods
signif : float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed : int
np.random.seed for replications
burn : int
number of initial observations to discard for simulation
cum : bool, default False
produce cumulative irf error bands
Notes
-----
.. [*] Sims, Christoper A., and Tao Zha. 1999. "Error Bands for Impulse
Response." Econometrica 67: 1113-1155.
Returns
-------
Array of simulated impulse response functions
"""
neqs = self.neqs
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
nobs = self.nobs
nobs_original = nobs + k_ar
ma_coll = np.zeros((repl, steps + 1, neqs, neqs))
def fill_coll(sim):
ret = VAR(sim, exog=self.exog).fit(maxlags=k_ar, trend=self.trend)
ret = (
ret.orth_ma_rep(maxn=steps) if orth else ret.ma_rep(maxn=steps)
)
return ret.cumsum(axis=0) if cum else ret
for i in range(repl):
# discard first burn to eliminate correct for starting bias
sim = util.varsim(
coefs,
intercept,
sigma_u,
seed=seed,
steps=nobs_original + burn,
)
sim = sim[burn:]
ma_coll[i, :, :, :] = fill_coll(sim)
return ma_coll
def _omega_forc_cov(self, steps):
# Approximate MSE matrix \Omega(h) as defined in Lut p97
G = self._zz
Ginv = np.linalg.inv(G)
# memoize powers of B for speedup
# TODO: see if can memoize better
# TODO: much lower-hanging fruit in caching `np.trace` below.
B = self._bmat_forc_cov()
_B = {}
def bpow(i):
if i not in _B:
_B[i] = np.linalg.matrix_power(B, i)
return _B[i]
phis = self.ma_rep(steps)
sig_u = self.sigma_u
omegas = np.zeros((steps, self.neqs, self.neqs))
for h in range(1, steps + 1):
if h == 1:
omegas[h - 1] = self.df_model * self.sigma_u
continue
om = omegas[h - 1]
for i in range(h):
for j in range(h):
Bi = bpow(h - 1 - i)
Bj = bpow(h - 1 - j)
mult = np.trace(Bi.T @ Ginv @ Bj @ G)
om += mult * phis[i] @ sig_u @ phis[j].T
omegas[h - 1] = om
return omegas
def _bmat_forc_cov(self):
# B as defined on p. 96 of Lut
upper = np.zeros((self.k_exog, self.df_model))
upper[:, : self.k_exog] = np.eye(self.k_exog)
lower_dim = self.neqs * (self.k_ar - 1)
eye = np.eye(lower_dim)
lower = np.column_stack(
(
np.zeros((lower_dim, self.k_exog)),
eye,
np.zeros((lower_dim, self.neqs)),
)
)
return np.vstack((upper, self.params.T, lower))
def summary(self):
"""Compute console output summary of estimates
Returns
-------
summary : VARSummary
"""
return VARSummary(self)
def irf(self, periods=10, var_decomp=None, var_order=None):
"""Analyze impulse responses to shocks in system
Parameters
----------
periods : int
var_decomp : ndarray (k x k), lower triangular
Must satisfy Omega = P P', where P is the passed matrix. Defaults
to Cholesky decomposition of Omega
var_order : sequence
Alternate variable order for Cholesky decomposition
Returns
-------
irf : IRAnalysis
"""
if var_order is not None:
raise NotImplementedError(
"alternate variable order not implemented" " (yet)"
)
return IRAnalysis(self, P=var_decomp, periods=periods)
def fevd(self, periods=10, var_decomp=None):
"""
Compute forecast error variance decomposition ("fevd")
Returns
-------
fevd : FEVD instance
"""
return FEVD(self, P=var_decomp, periods=periods)
def reorder(self, order):
"""Reorder variables for structural specification"""
if len(order) != len(self.params[0, :]):
raise ValueError(
"Reorder specification length should match "
"number of endogenous variables"
)
# This converts order to list of integers if given as strings
if isinstance(order[0], str):
order_new = []
for i, nam in enumerate(order):
order_new.append(self.names.index(order[i]))
order = order_new
return _reordered(self, order)
# -----------------------------------------------------------
# VAR Diagnostics: Granger-causality, whiteness of residuals,
# normality, etc
def test_causality(self, caused, causing=None, kind="f", signif=0.05):
"""
Test Granger causality
Parameters
----------
caused : int or str or sequence of int or str
If int or str, test whether the variable specified via this index
(int) or name (str) is Granger-caused by the variable(s) specified
by `causing`.
If a sequence of int or str, test whether the corresponding
variables are Granger-caused by the variable(s) specified
by `causing`.
causing : int or str or sequence of int or str or None, default: None
If int or str, test whether the variable specified via this index
(int) or name (str) is Granger-causing the variable(s) specified by
`caused`.
If a sequence of int or str, test whether the corresponding
variables are Granger-causing the variable(s) specified by
`caused`.
If None, `causing` is assumed to be the complement of `caused`.
kind : {'f', 'wald'}
Perform F-test or Wald (chi-sq) test
signif : float, default 5%
Significance level for computing critical values for test,
defaulting to standard 0.05 level
Notes
-----
Null hypothesis is that there is no Granger-causality for the indicated
variables. The degrees of freedom in the F-test are based on the
number of variables in the VAR system, that is, degrees of freedom
are equal to the number of equations in the VAR times degree of freedom
of a single equation.
Test for Granger-causality as described in chapter 7.6.3 of [1]_.
Test H0: "`causing` does not Granger-cause the remaining variables of
the system" against H1: "`causing` is Granger-causal for the
remaining variables".
Returns
-------
results : CausalityTestResults
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series*
*Analysis*. Springer.
"""
if not (0 < signif < 1):
raise ValueError("signif has to be between 0 and 1")
allowed_types = (str, int)
if isinstance(caused, allowed_types):
caused = [caused]
if not all(isinstance(c, allowed_types) for c in caused):
raise TypeError(
"caused has to be of type string or int (or a "
"sequence of these types)."
)
caused = [self.names[c] if type(c) == int else c for c in caused]
caused_ind = [util.get_index(self.names, c) for c in caused]
if causing is not None:
if isinstance(causing, allowed_types):
causing = [causing]
if not all(isinstance(c, allowed_types) for c in causing):
raise TypeError(
"causing has to be of type string or int (or "
"a sequence of these types) or None."
)
causing = [self.names[c] if type(c) == int else c for c in causing]
causing_ind = [util.get_index(self.names, c) for c in causing]
else:
causing_ind = [i for i in range(self.neqs) if i not in caused_ind]
causing = [self.names[c] for c in caused_ind]
k, p = self.neqs, self.k_ar
if p == 0:
err = "Cannot test Granger Causality in a model with 0 lags."
raise RuntimeError(err)
# number of restrictions
num_restr = len(causing) * len(caused) * p
num_det_terms = self.k_exog
# Make restriction matrix
C = np.zeros((num_restr, k * num_det_terms + k ** 2 * p), dtype=float)
cols_det = k * num_det_terms
row = 0
for j in range(p):
for ing_ind in causing_ind:
for ed_ind in caused_ind:
C[row, cols_det + ed_ind + k * ing_ind + k ** 2 * j] = 1
row += 1
# Lütkepohl 3.6.5
Cb = np.dot(C, vec(self.params.T))
middle = np.linalg.inv(C @ self.cov_params() @ C.T)
# wald statistic
lam_wald = statistic = Cb @ middle @ Cb
if kind.lower() == "wald":
df = num_restr
dist = stats.chi2(df)
elif kind.lower() == "f":
statistic = lam_wald / num_restr
df = (num_restr, k * self.df_resid)
dist = stats.f(*df)
else:
raise ValueError("kind %s not recognized" % kind)
pvalue = dist.sf(statistic)
crit_value = dist.ppf(1 - signif)
return CausalityTestResults(
causing,
caused,
statistic,
crit_value,
pvalue,
df,
signif,
test="granger",
method=kind,
)
def test_inst_causality(self, causing, signif=0.05):
"""
Test for instantaneous causality
Parameters
----------
causing :
If int or str, test whether the corresponding variable is causing
the variable(s) specified in caused.
If sequence of int or str, test whether the corresponding
variables are causing the variable(s) specified in caused.
signif : float between 0 and 1, default 5 %
Significance level for computing critical values for test,
defaulting to standard 0.05 level
verbose : bool
If True, print a table with the results.
Returns
-------
results : dict
A dict holding the test's results. The dict's keys are:
"statistic" : float
The calculated test statistic.
"crit_value" : float
The critical value of the Chi^2-distribution.
"pvalue" : float
The p-value corresponding to the test statistic.
"df" : float
The degrees of freedom of the Chi^2-distribution.
"conclusion" : str {"reject", "fail to reject"}
Whether H0 can be rejected or not.
"signif" : float
Significance level
Notes
-----
Test for instantaneous causality as described in chapters 3.6.3 and
7.6.4 of [1]_.
Test H0: "No instantaneous causality between caused and causing"
against H1: "Instantaneous causality between caused and causing
exists".
Instantaneous causality is a symmetric relation (i.e. if causing is
"instantaneously causing" caused, then also caused is "instantaneously
causing" causing), thus the naming of the parameters (which is chosen
to be in accordance with test_granger_causality()) may be misleading.
This method is not returning the same result as JMulTi. This is
because the test is based on a VAR(k_ar) model in statsmodels
(in accordance to pp. 104, 320-321 in [1]_) whereas JMulTi seems
to be using a VAR(k_ar+1) model.
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series*
*Analysis*. Springer.
"""
if not (0 < signif < 1):
raise ValueError("signif has to be between 0 and 1")
allowed_types = (str, int)
if isinstance(causing, allowed_types):
causing = [causing]
if not all(isinstance(c, allowed_types) for c in causing):
raise TypeError(
"causing has to be of type string or int (or a "
+ "a sequence of these types)."
)
causing = [self.names[c] if type(c) == int else c for c in causing]
causing_ind = [util.get_index(self.names, c) for c in causing]
caused_ind = [i for i in range(self.neqs) if i not in causing_ind]
caused = [self.names[c] for c in caused_ind]
# Note: JMulTi seems to be using k_ar+1 instead of k_ar
k, t, p = self.neqs, self.nobs, self.k_ar
num_restr = len(causing) * len(caused) # called N in Lütkepohl
sigma_u = self.sigma_u
vech_sigma_u = util.vech(sigma_u)
sig_mask = np.zeros(sigma_u.shape)
# set =1 twice to ensure, that all the ones needed are below the main
# diagonal:
sig_mask[causing_ind, caused_ind] = 1
sig_mask[caused_ind, causing_ind] = 1
vech_sig_mask = util.vech(sig_mask)
inds = np.nonzero(vech_sig_mask)[0]
# Make restriction matrix
C = np.zeros((num_restr, len(vech_sigma_u)), dtype=float)
for row in range(num_restr):
C[row, inds[row]] = 1
Cs = np.dot(C, vech_sigma_u)
d = np.linalg.pinv(duplication_matrix(k))
Cd = np.dot(C, d)
middle = np.linalg.inv(Cd @ np.kron(sigma_u, sigma_u) @ Cd.T) / 2
wald_statistic = t * (Cs.T @ middle @ Cs)
df = num_restr
dist = stats.chi2(df)
pvalue = dist.sf(wald_statistic)
crit_value = dist.ppf(1 - signif)
return CausalityTestResults(
causing,
caused,
wald_statistic,
crit_value,
pvalue,
df,
signif,
test="inst",
method="wald",
)
def test_whiteness(self, nlags=10, signif=0.05, adjusted=False):
"""
Residual whiteness tests using Portmanteau test
Parameters
----------
nlags : int > 0
The number of lags tested must be larger than the number of lags
included in the VAR model.
signif : float, between 0 and 1
The significance level of the test.
adjusted : bool, default False
Flag indicating to apply small-sample adjustments.
Returns
-------
WhitenessTestResults
The test results.
Notes
-----
Test the whiteness of the residuals using the Portmanteau test as
described in [1]_, chapter 4.4.3.
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series*
*Analysis*. Springer.
"""
if nlags - self.k_ar <= 0:
raise ValueError(
"The whiteness test can only be used when nlags "
"is larger than the number of lags included in "
f"the model ({self.k_ar})."
)
statistic = 0
u = np.asarray(self.resid)
acov_list = _compute_acov(u, nlags)
cov0_inv = np.linalg.inv(acov_list[0])
for t in range(1, nlags + 1):
ct = acov_list[t]
to_add = np.trace(ct.T @ cov0_inv @ ct @ cov0_inv)
if adjusted:
to_add /= self.nobs - t
statistic += to_add
statistic *= self.nobs ** 2 if adjusted else self.nobs
df = self.neqs ** 2 * (nlags - self.k_ar)
dist = stats.chi2(df)
pvalue = dist.sf(statistic)
crit_value = dist.ppf(1 - signif)
return WhitenessTestResults(
statistic, crit_value, pvalue, df, signif, nlags, adjusted
)
def plot_acorr(self, nlags=10, resid=True, linewidth=8):
r"""
Plot autocorrelation of sample (endog) or residuals
Sample (Y) or Residual autocorrelations are plotted together with the
standard :math:`2 / \sqrt{T}` bounds.
Parameters
----------
nlags : int
number of lags to display (excluding 0)
resid : bool
If True, then the autocorrelation of the residuals is plotted
If False, then the autocorrelation of endog is plotted.
linewidth : int
width of vertical bars
Returns
-------
Figure
Figure instance containing the plot.
"""
if resid:
acorrs = self.resid_acorr(nlags)
else:
acorrs = self.sample_acorr(nlags)
bound = 2 / np.sqrt(self.nobs)
fig = plotting.plot_full_acorr(
acorrs[1:],
xlabel=np.arange(1, nlags + 1),
err_bound=bound,
linewidth=linewidth,
)
fig.suptitle(r"ACF plots for residuals with $2 / \sqrt{T}$ bounds ")
return fig
def test_normality(self, signif=0.05):
"""
Test assumption of normal-distributed errors using Jarque-Bera-style
omnibus Chi^2 test.
Parameters
----------
signif : float
Test significance level.
Returns
-------
result : NormalityTestResults
Notes
-----
H0 (null) : data are generated by a Gaussian-distributed process
"""
return test_normality(self, signif=signif)
@cache_readonly
def detomega(self):
r"""
Return determinant of white noise covariance with degrees of freedom
correction:
.. math::
\hat \Omega = \frac{T}{T - Kp - 1} \hat \Omega_{\mathrm{MLE}}
"""
return np.linalg.det(self.sigma_u)
@cache_readonly
def info_criteria(self):
"information criteria for lagorder selection"
nobs = self.nobs
neqs = self.neqs
lag_order = self.k_ar
free_params = lag_order * neqs ** 2 + neqs * self.k_exog
if self.df_resid:
ld = logdet_symm(self.sigma_u_mle)
else:
ld = -np.inf
# See Lütkepohl pp. 146-150
aic = ld + (2.0 / nobs) * free_params
bic = ld + (np.log(nobs) / nobs) * free_params
hqic = ld + (2.0 * np.log(np.log(nobs)) / nobs) * free_params
if self.df_resid:
fpe = ((nobs + self.df_model) / self.df_resid) ** neqs * np.exp(ld)
else:
fpe = np.inf
return {"aic": aic, "bic": bic, "hqic": hqic, "fpe": fpe}
@property
def aic(self):
"""Akaike information criterion"""
return self.info_criteria["aic"]
@property
def fpe(self):
"""Final Prediction Error (FPE)
Lütkepohl p. 147, see info_criteria
"""
return self.info_criteria["fpe"]
@property
def hqic(self):
"""Hannan-Quinn criterion"""
return self.info_criteria["hqic"]
@property
def bic(self):
"""Bayesian a.k.a. Schwarz info criterion"""
return self.info_criteria["bic"]
@cache_readonly
def roots(self):
"""
The roots of the VAR process are the solution to
(I - coefs[0]*z - coefs[1]*z**2 ... - coefs[p-1]*z**k_ar) = 0.
Note that the inverse roots are returned, and stability requires that
the roots lie outside the unit circle.
"""
neqs = self.neqs
k_ar = self.k_ar
p = neqs * k_ar
arr = np.zeros((p, p))
arr[:neqs, :] = np.column_stack(self.coefs)
arr[neqs:, :-neqs] = np.eye(p - neqs)
roots = np.linalg.eig(arr)[0] ** -1
idx = np.argsort(np.abs(roots))[::-1] # sort by reverse modulus
return roots[idx]
class VARResultsWrapper(wrap.ResultsWrapper):
_attrs = {
"bse": "columns_eq",
"cov_params": "cov",
"params": "columns_eq",
"pvalues": "columns_eq",
"tvalues": "columns_eq",
"sigma_u": "cov_eq",
"sigma_u_mle": "cov_eq",
"stderr": "columns_eq",
}
_wrap_attrs = wrap.union_dicts(
TimeSeriesResultsWrapper._wrap_attrs, _attrs
)
_methods = {"conf_int": "multivariate_confint"}
_wrap_methods = wrap.union_dicts(
TimeSeriesResultsWrapper._wrap_methods, _methods
)
wrap.populate_wrapper(VARResultsWrapper, VARResults) # noqa:E305
class FEVD:
"""
Compute and plot Forecast error variance decomposition and asymptotic
standard errors
"""
def __init__(self, model, P=None, periods=None):
self.periods = periods
self.model = model
self.neqs = model.neqs
self.names = model.model.endog_names
self.irfobj = model.irf(var_decomp=P, periods=periods)
self.orth_irfs = self.irfobj.orth_irfs
# cumulative impulse responses
irfs = (self.orth_irfs[:periods] ** 2).cumsum(axis=0)
rng = lrange(self.neqs)
mse = self.model.mse(periods)[:, rng, rng]
# lag x equation x component
fevd = np.empty_like(irfs)
for i in range(periods):
fevd[i] = (irfs[i].T / mse[i]).T
# switch to equation x lag x component
self.decomp = fevd.swapaxes(0, 1)
def summary(self):
buf = StringIO()
rng = lrange(self.periods)
for i in range(self.neqs):
ppm = output.pprint_matrix(self.decomp[i], rng, self.names)
buf.write("FEVD for %s\n" % self.names[i])
buf.write(ppm + "\n")
print(buf.getvalue())
def cov(self):
"""Compute asymptotic standard errors
Returns
-------
"""
raise NotImplementedError
def plot(self, periods=None, figsize=(10, 10), **plot_kwds):
"""Plot graphical display of FEVD
Parameters
----------
periods : int, default None
Defaults to number originally specified. Can be at most that number
"""
import matplotlib.pyplot as plt
k = self.neqs
periods = periods or self.periods
fig, axes = plt.subplots(nrows=k, figsize=figsize)
fig.suptitle("Forecast error variance decomposition (FEVD)")
colors = [str(c) for c in np.arange(k, dtype=float) / k]
ticks = np.arange(periods)
limits = self.decomp.cumsum(2)
ax = axes[0]
for i in range(k):
ax = axes[i]
this_limits = limits[i].T
handles = []
for j in range(k):
lower = this_limits[j - 1] if j > 0 else 0
upper = this_limits[j]
handle = ax.bar(
ticks,
upper - lower,
bottom=lower,
color=colors[j],
label=self.names[j],
**plot_kwds,
)
handles.append(handle)
ax.set_title(self.names[i])
# just use the last axis to get handles for plotting
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc="upper right")
plotting.adjust_subplots(right=0.85)
return fig
# -------------------------------------------------------------------------------
def _compute_acov(x, nlags=1):
x = x - x.mean(0)
result = []
for lag in range(nlags + 1):
if lag > 0:
r = np.dot(x[lag:].T, x[:-lag])
else:
r = np.dot(x.T, x)
result.append(r)
return np.array(result) / len(x)
def _acovs_to_acorrs(acovs):
sd = np.sqrt(np.diag(acovs[0]))
return acovs / np.outer(sd, sd)
| bsd-3-clause | 6b435c8ff8392b5b7bfe12498034f52f | 29.800321 | 103 | 0.543454 | 3.720083 | false | false | false | false |
statsmodels/statsmodels | statsmodels/distributions/copula/depfunc_ev.py | 3 | 9244 | # -*- coding: utf-8 -*-
""" Pickand's dependence functions as generators for EV-copulas
Created on Wed Jan 27 14:33:40 2021
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar, approx_hess
class PickandDependence:
def __call__(self, *args, **kwargs):
return self.evaluate(*args, **kwargs)
def evaluate(self, t, *args):
raise NotImplementedError
def deriv(self, t, *args):
"""First derivative of the dependence function
implemented through numerical differentiation
"""
t = np.atleast_1d(t)
return _approx_fprime_cs_scalar(t, self.evaluate)
def deriv2(self, t, *args):
"""Second derivative of the dependence function
implemented through numerical differentiation
"""
if np.size(t) == 1:
d2 = approx_hess([t], self.evaluate, args=args)[0]
else:
d2 = np.array([approx_hess([ti], self.evaluate, args=args)[0, 0]
for ti in t])
return d2
class AsymLogistic(PickandDependence):
'''asymmetric logistic model of Tawn 1988
special case: a1=a2=1 : Gumbel
restrictions:
- theta in (0,1]
- a1, a2 in [0,1]
'''
k_args = 3
def _check_args(self, a1, a2, theta):
condth = (theta > 0) and (theta <= 1)
conda1 = (a1 >= 0) and (a1 <= 1)
conda2 = (a2 >= 0) and (a2 <= 1)
return condth and conda1 and conda2
def evaluate(self, t, a1, a2, theta):
# if not np.all(_check_args(a1, a2, theta)):
# raise ValueError('invalid args')
transf = (1 - a2) * (1-t)
transf += (1 - a1) * t
transf += ((a1 * t)**(1./theta) + (a2 * (1-t))**(1./theta))**theta
return transf
def deriv(self, t, a1, a2, theta):
b = theta
d1 = ((a1 * (a1 * t)**(1/b - 1) - a2 * (a2 * (1 - t))**(1/b - 1)) *
((a1 * t)**(1/b) + (a2 * (1 - t))**(1/b))**(b - 1) - a1 + a2)
return d1
def deriv2(self, t, a1, a2, theta):
b = theta
d2 = ((1 - b) * (a1 * t)**(1/b) * (a2 * (1 - t))**(1/b) *
((a1 * t)**(1/b) + (a2 * (1 - t))**(1/b))**(b - 2)
)/(b * (1 - t)**2 * t**2)
return d2
transform_tawn = AsymLogistic()
class AsymNegLogistic(PickandDependence):
'''asymmetric negative logistic model of Joe 1990
special case: a1=a2=1 : symmetric negative logistic of Galambos 1978
restrictions:
- theta in (0,inf)
- a1, a2 in (0,1]
'''
k_args = 3
def _check_args(self, a1, a2, theta):
condth = (theta > 0)
conda1 = (a1 > 0) and (a1 <= 1)
conda2 = (a2 > 0) and (a2 <= 1)
return condth and conda1 and conda2
def evaluate(self, t, a1, a2, theta):
# if not np.all(self._check_args(a1, a2, theta)):
# raise ValueError('invalid args')
a1, a2 = a2, a1
transf = 1 - ((a1 * (1-t))**(-1./theta) +
(a2 * t)**(-1./theta))**(-theta)
return transf
def deriv(self, t, a1, a2, theta):
a1, a2 = a2, a1
m1 = -1 / theta
m2 = m1 - 1
# (a1^(-1/θ) (1 - t)^(-1/θ - 1) - a2^(-1/θ) t^(-1/θ - 1))*
# (a1^(-1/θ) (1 - t)^(-1/θ) + (a2 t)^(-1/θ))^(-θ - 1)
d1 = (a1**m1 * (1 - t)**m2 - a2**m1 * t**m2) * (
(a1 * (1 - t))**m1 + (a2 * t)**m1)**(-theta - 1)
return d1
def deriv2(self, t, a1, a2, theta):
b = theta
a1, a2 = a2, a1
a1tp = (a1 * (1 - t))**(1/b)
a2tp = (a2 * t)**(1/b)
a1tn = (a1 * (1 - t))**(-1/b)
a2tn = (a2 * t)**(-1/b)
t1 = (b + 1) * a2tp * a1tp * (a1tn + a2tn)**(-b)
t2 = b * (1 - t)**2 * t**2 * (a1tp + a2tp)**2
d2 = t1 / t2
return d2
transform_joe = AsymNegLogistic()
class AsymMixed(PickandDependence):
'''asymmetric mixed model of Tawn 1988
special case: k=0, theta in [0,1] : symmetric mixed model of
Tiago de Oliveira 1980
restrictions:
- theta > 0
- theta + 3*k > 0
- theta + k <= 1
- theta + 2*k <= 1
'''
k_args = 2
def _check_args(self, theta, k):
condth = (theta >= 0)
cond1 = (theta + 3*k > 0) and (theta + k <= 1) and (theta + 2*k <= 1)
return condth & cond1
def evaluate(self, t, theta, k):
transf = 1 - (theta + k) * t + theta * t*t + k * t**3
return transf
def deriv(self, t, theta, k):
d_dt = - (theta + k) + 2 * theta * t + 3 * k * t**2
return d_dt
def deriv2(self, t, theta, k):
d2_dt2 = 2 * theta + 6 * k * t
return d2_dt2
# backwards compatibility for now
transform_tawn2 = AsymMixed()
class AsymBiLogistic(PickandDependence):
'''bilogistic model of Coles and Tawn 1994, Joe, Smith and Weissman 1992
restrictions:
- (beta, delta) in (0,1)^2 or
- (beta, delta) in (-inf,0)^2
not vectorized because of numerical integration
'''
k_args = 2
def _check_args(self, beta, delta):
cond1 = (beta > 0) and (beta <= 1) and (delta > 0) and (delta <= 1)
cond2 = (beta < 0) and (delta < 0)
return cond1 | cond2
def evaluate(self, t, beta, delta):
# if not np.all(_check_args(beta, delta)):
# raise ValueError('invalid args')
def _integrant(w):
term1 = (1 - beta) * np.power(w, -beta) * (1-t)
term2 = (1 - delta) * np.power(1-w, -delta) * t
return np.maximum(term1, term2)
from scipy.integrate import quad
transf = quad(_integrant, 0, 1)[0]
return transf
transform_bilogistic = AsymBiLogistic()
class HR(PickandDependence):
'''model of Huesler Reiss 1989
special case: a1=a2=1 : symmetric negative logistic of Galambos 1978
restrictions:
- lambda in (0,inf)
'''
k_args = 1
def _check_args(self, lamda):
cond = (lamda > 0)
return cond
def evaluate(self, t, lamda):
# if not np.all(self._check_args(lamda)):
# raise ValueError('invalid args')
term = np.log((1. - t) / t) * 0.5 / lamda
from scipy.stats import norm
# use special if I want to avoid stats import
transf = ((1 - t) * norm._cdf(lamda + term) +
t * norm._cdf(lamda - term))
return transf
def _derivs(self, t, lamda, order=(1, 2)):
if not isinstance(order, (int, np.integer)):
if (1 in order) and (2 in order):
order = -1
else:
raise ValueError("order should be 1, 2, or (1,2)")
dn = 1 / np.sqrt(2 * np.pi)
a = lamda
g = np.log((1. - t) / t) * 0.5 / a
gd1 = 1 / (2 * a * (t - 1) * t)
gd2 = (0.5 - t) / (a * ((1 - t) * t)**2)
# f = stats.norm.cdf(t)
# fd1 = np.exp(-t**2 / 2) / sqrt(2 * np.pi) # stats.norm.pdf(t)
# fd2 = fd1 * t
tp = a + g
fp = stats.norm.cdf(tp)
fd1p = np.exp(-tp**2 / 2) * dn # stats.norm.pdf(t)
fd2p = -fd1p * tp
tn = a - g
fn = stats.norm.cdf(tn)
fd1n = np.exp(-tn**2 / 2) * dn # stats.norm.pdf(t)
fd2n = -fd1n * tn
if order in (1, -1):
# d1 = g'(t) (-t f'(a - g(t)) - (t - 1) f'(a + g(t))) + f(a - g(t))
# - f(a + g(t))
d1 = gd1 * (-t * fd1n - (t - 1) * fd1p) + fn - fp
if order in (2, -1):
# d2 = g'(t)^2 (t f''(a - g(t)) - (t - 1) f''(a + g(t))) +
# (-(t - 1) g''(t) - 2 g'(t)) f'(a + g(t)) -
# (t g''(t) + 2 g'(t)) f'(a - g(t))
d2 = (gd1**2 * (t * fd2n - (t - 1) * fd2p) +
(-(t - 1) * gd2 - 2 * gd1) * fd1p -
(t * gd2 + 2 * gd1) * fd1n
)
if order == 1:
return d1
elif order == 2:
return d2
elif order == -1:
return (d1, d2)
def deriv(self, t, lamda):
return self._derivs(t, lamda, 1)
def deriv2(self, t, lamda):
return self._derivs(t, lamda, 2)
transform_hr = HR()
# def transform_tev(t, rho, df):
class TEV(PickandDependence):
'''t-EV model of Demarta and McNeil 2005
restrictions:
- rho in (-1,1)
- x > 0
'''
k_args = 2
def _check_args(self, rho, df):
x = df # alias, Genest and Segers use chi, copual package uses df
cond1 = (x > 0)
cond2 = (rho > 0) and (rho < 1)
return cond1 and cond2
def evaluate(self, t, rho, df):
x = df # alias, Genest and Segers use chi, copual package uses df
# if not np.all(self, _check_args(rho, x)):
# raise ValueError('invalid args')
from scipy.stats import t as stats_t
# use special if I want to avoid stats import
term1 = (np.power(t/(1.-t), 1./x) - rho) # for t
term2 = (np.power((1.-t)/t, 1./x) - rho) # for 1-t
term0 = np.sqrt(1. + x) / np.sqrt(1 - rho*rho)
z1 = term0 * term1
z2 = term0 * term2
transf = t * stats_t._cdf(z1, x+1) + (1 - t) * stats_t._cdf(z2, x+1)
return transf
transform_tev = TEV()
| bsd-3-clause | f43df8d2b50f2b45f688608ed92ba8c3 | 27.072948 | 79 | 0.484517 | 2.862101 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/kombu/tests/utilities/test_encoding.py | 2 | 2306 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import with_statement
import sys
from contextlib import contextmanager
from mock import patch
from nose import SkipTest
from kombu.utils.encoding import safe_str
from kombu.tests.utils import TestCase
@contextmanager
def clean_encoding():
old_encoding = sys.modules.pop('kombu.utils.encoding', None)
import kombu.utils.encoding
yield kombu.utils.encoding
if old_encoding:
sys.modules['kombu.utils.encoding'] = old_encoding
class test_default_encoding(TestCase):
@patch('sys.getfilesystemencoding')
def test_default(self, getfilesystemencoding):
getfilesystemencoding.return_value = 'ascii'
with clean_encoding() as encoding:
enc = encoding.default_encoding()
if sys.platform.startswith('java'):
self.assertEqual(enc, 'utf-8')
else:
self.assertEqual(enc, 'ascii')
getfilesystemencoding.assert_called_with()
class test_encoding_utils(TestCase):
def setUp(self):
if sys.version_info >= (3, 0):
raise SkipTest('not relevant on py3k')
def test_str_to_bytes(self):
with clean_encoding() as e:
self.assertIsInstance(e.str_to_bytes(u'foobar'), str)
self.assertIsInstance(e.str_to_bytes('foobar'), str)
def test_from_utf8(self):
with clean_encoding() as e:
self.assertIsInstance(e.from_utf8(u'foobar'), str)
def test_default_encode(self):
with clean_encoding() as e:
self.assertTrue(e.default_encode('foo'))
class test_safe_str(TestCase):
def test_when_str(self):
self.assertEqual(safe_str('foo'), 'foo')
def test_when_unicode(self):
self.assertIsInstance(safe_str(u'foo'), str)
def test_when_containing_high_chars(self):
s = u'The quiæk fåx jømps øver the lazy dåg'
res = safe_str(s)
self.assertIsInstance(res, str)
def test_when_not_string(self):
o = object()
self.assertEqual(safe_str(o), repr(o))
def test_when_unrepresentable(self):
class O(object):
def __repr__(self):
raise KeyError('foo')
self.assertIn('<Unrepresentable', safe_str(O()))
| bsd-3-clause | e4072f5ae41d1351e334c923e3b06b91 | 26.722892 | 65 | 0.632768 | 3.73539 | false | true | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/celery/security/certificate.py | 1 | 2549 | # -*- coding: utf-8 -*-
"""
celery.security.certificate
~~~~~~~~~~~~~~~~~~~~~~~~~~~
X.509 certificates.
"""
from __future__ import absolute_import
from __future__ import with_statement
import glob
import os
from celery.exceptions import SecurityError
from .utils import crypto, reraise_errors
class Certificate(object):
"""X.509 certificate."""
def __init__(self, cert):
assert crypto is not None
with reraise_errors('Invalid certificate: %r'):
self._cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
def has_expired(self):
"""Check if the certificate has expired."""
return self._cert.has_expired()
def get_serial_number(self):
"""Returns the certificates serial number."""
return self._cert.get_serial_number()
def get_issuer(self):
"""Returns issuer (CA) as a string"""
return ' '.join(x[1] for x in
self._cert.get_issuer().get_components())
def get_id(self):
"""Serial number/issuer pair uniquely identifies a certificate"""
return '%s %s' % (self.get_issuer(), self.get_serial_number())
def verify(self, data, signature, digest):
"""Verifies the signature for string containing data."""
with reraise_errors('Bad signature: %r'):
crypto.verify(self._cert, signature, data, digest)
class CertStore(object):
"""Base class for certificate stores"""
def __init__(self):
self._certs = {}
def itercerts(self):
"""an iterator over the certificates"""
for c in self._certs.itervalues():
yield c
def __getitem__(self, id):
"""get certificate by id"""
try:
return self._certs[id]
except KeyError:
raise SecurityError('Unknown certificate: %r' % (id, ))
def add_cert(self, cert):
if cert.get_id() in self._certs:
raise SecurityError('Duplicate certificate: %r' % (id, ))
self._certs[cert.get_id()] = cert
class FSCertStore(CertStore):
"""File system certificate store"""
def __init__(self, path):
CertStore.__init__(self)
if os.path.isdir(path):
path = os.path.join(path, '*')
for p in glob.glob(path):
with open(p) as f:
cert = Certificate(f.read())
if cert.has_expired():
raise SecurityError(
'Expired certificate: %r' % (cert.get_id(), ))
self.add_cert(cert)
| bsd-3-clause | f2815940c0c71685ac66912e1cb9e9a1 | 27.965909 | 75 | 0.570027 | 4.151466 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/djcelery/managers.py | 3 | 7647 | from __future__ import absolute_import
import warnings
from functools import wraps
from itertools import count
from django.db import transaction, connection
try:
from django.db import connections, router
except ImportError: # pre-Django 1.2
connections = router = None # noqa
from django.db import models
from django.db.models.query import QuerySet
from django.conf import settings
from celery.utils.timeutils import maybe_timedelta
from .utils import now
class TxIsolationWarning(UserWarning):
pass
def transaction_retry(max_retries=1):
"""Decorator for methods doing database operations.
If the database operation fails, it will retry the operation
at most ``max_retries`` times.
"""
def _outer(fun):
@wraps(fun)
def _inner(*args, **kwargs):
_max_retries = kwargs.pop("exception_retry_count", max_retries)
for retries in count(0):
try:
return fun(*args, **kwargs)
except Exception: # pragma: no cover
# Depending on the database backend used we can experience
# various exceptions. E.g. psycopg2 raises an exception
# if some operation breaks the transaction, so saving
# the task result won't be possible until we rollback
# the transaction.
if retries >= _max_retries:
raise
transaction.rollback_unless_managed()
return _inner
return _outer
def update_model_with_dict(obj, fields):
[setattr(obj, attr_name, attr_value)
for attr_name, attr_value in fields.items()]
obj.save()
return obj
class ExtendedQuerySet(QuerySet):
def update_or_create(self, **kwargs):
obj, created = self.get_or_create(**kwargs)
if not created:
fields = dict(kwargs.pop("defaults", {}))
fields.update(kwargs)
update_model_with_dict(obj, fields)
return obj
class ExtendedManager(models.Manager):
def get_query_set(self):
return ExtendedQuerySet(self.model)
def update_or_create(self, **kwargs):
return self.get_query_set().update_or_create(**kwargs)
def connection_for_write(self):
if connections:
return connections[router.db_for_write(self.model)]
return connection
def connection_for_read(self):
if connections:
return connections[self.db]
return connection
def current_engine(self):
try:
return settings.DATABASES[self.db]["ENGINE"]
except AttributeError:
return settings.DATABASE_ENGINE
class ResultManager(ExtendedManager):
def get_all_expired(self, expires):
"""Get all expired task results."""
return self.filter(date_done__lt=now() - maybe_timedelta(expires))
def delete_expired(self, expires):
"""Delete all expired taskset results."""
self.get_all_expired(expires).update(hidden=True)
cursor = self.connection_for_write().cursor()
cursor.execute("DELETE FROM %s WHERE hidden=%%s" % (
self.model._meta.db_table, ), (True, ))
transaction.commit_unless_managed()
class PeriodicTaskManager(ExtendedManager):
def enabled(self):
return self.filter(enabled=True)
class TaskManager(ResultManager):
"""Manager for :class:`celery.models.Task` models."""
_last_id = None
def get_task(self, task_id):
"""Get task meta for task by ``task_id``.
:keyword exception_retry_count: How many times to retry by
transaction rollback on exception. This could theoretically
happen in a race condition if another worker is trying to
create the same task. The default is to retry once.
"""
try:
return self.get(task_id=task_id)
except self.model.DoesNotExist:
if self._last_id == task_id:
self.warn_if_repeatable_read()
self._last_id = task_id
return self.model(task_id=task_id)
@transaction_retry(max_retries=2)
def store_result(self, task_id, result, status, traceback=None,
children=None):
"""Store the result and status of a task.
:param task_id: task id
:param result: The return value of the task, or an exception
instance raised by the task.
:param status: Task status. See
:meth:`celery.result.AsyncResult.get_status` for a list of
possible status values.
:keyword traceback: The traceback at the point of exception (if the
task failed).
:keyword children: List of serialized results of subtasks
of this task.
:keyword exception_retry_count: How many times to retry by
transaction rollback on exception. This could theoretically
happen in a race condition if another worker is trying to
create the same task. The default is to retry twice.
"""
return self.update_or_create(task_id=task_id,
defaults={"status": status,
"result": result,
"traceback": traceback,
"meta": {"children": children}})
def warn_if_repeatable_read(self):
if "mysql" in self.current_engine().lower():
cursor = self.connection_for_read().cursor()
if cursor.execute("SELECT @@tx_isolation"):
isolation = cursor.fetchone()[0]
if isolation == 'REPEATABLE-READ':
warnings.warn(TxIsolationWarning(
"Polling results with transaction isolation level "
"repeatable-read within the same transaction "
"may give outdated results. Be sure to commit the "
"transaction for each poll iteration."))
class TaskSetManager(ResultManager):
"""Manager for :class:`celery.models.TaskSet` models."""
def restore_taskset(self, taskset_id):
"""Get the async result instance by taskset id."""
try:
return self.get(taskset_id=taskset_id)
except self.model.DoesNotExist:
pass
def delete_taskset(self, taskset_id):
"""Delete a saved taskset result."""
s = self.restore_taskset(taskset_id)
if s:
s.delete()
@transaction_retry(max_retries=2)
def store_result(self, taskset_id, result):
"""Store the async result instance of a taskset.
:param taskset_id: task set id
:param result: The return value of the taskset
"""
return self.update_or_create(taskset_id=taskset_id,
defaults={"result": result})
class TaskStateManager(ExtendedManager):
def active(self):
return self.filter(hidden=False)
def expired(self, states, expires, nowfun=now):
return self.filter(state__in=states,
tstamp__lte=nowfun() - maybe_timedelta(expires))
def expire_by_states(self, states, expires):
if expires is not None:
return self.expired(states, expires).update(hidden=True)
def purge(self):
cursor = self.connection_for_write().cursor()
cursor.execute("DELETE FROM %s WHERE hidden=%%s" % (
self.model._meta.db_table, ), (True, ))
transaction.commit_unless_managed()
| bsd-3-clause | 5c03cadbbc56791fe1b72e039b923f0c | 31.540426 | 79 | 0.596705 | 4.464098 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/billiard/heap.py | 1 | 8005 | #
# Module which supports allocation of memory from an mmap
#
# multiprocessing/heap.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
import bisect
import mmap
import os
import sys
import threading
import itertools
from ._ext import _billiard, win32
from .util import Finalize, info, get_temp_dir
from .forking import assert_spawning, ForkingPickler
__all__ = ['BufferWrapper']
try:
maxsize = sys.maxsize
except AttributeError:
maxsize = sys.maxint
#
# Inheirtable class which wraps an mmap, and from which blocks can be allocated
#
if sys.platform == 'win32':
class Arena(object):
_counter = itertools.count()
def __init__(self, size):
self.size = size
self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next())
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == 0, 'tagname already in use'
self._state = (self.size, self.name)
def __getstate__(self):
assert_spawning(self)
return self._state
def __setstate__(self, state):
self.size, self.name = self._state = state
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
else:
class Arena(object):
_counter = itertools.count()
def __init__(self, size, fileno=-1):
from .forking import _forking_is_enabled
self.size = size
self.fileno = fileno
if fileno == -1 and not _forking_is_enabled:
name = os.path.join(
get_temp_dir(),
'pym-%d-%d' % (os.getpid(), self._counter.next()))
self.fileno = os.open(
name, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0600)
os.unlink(name)
os.ftruncate(self.fileno, size)
self.buffer = mmap.mmap(self.fileno, self.size)
def reduce_arena(a):
if a.fileno == -1:
raise ValueError('Arena is unpicklable because'
'forking was enabled when it was created')
return Arena, (a.size, a.fileno)
ForkingPickler.register(Arena, reduce_arena)
#
# Class allowing allocation of chunks of memory from arenas
#
class Heap(object):
_alignment = 8
def __init__(self, size=mmap.PAGESIZE):
self._lastpid = os.getpid()
self._lock = threading.Lock()
self._size = size
self._lengths = []
self._len_to_seq = {}
self._start_to_block = {}
self._stop_to_block = {}
self._allocated_blocks = set()
self._arenas = []
# list of pending blocks to free - see free() comment below
self._pending_free_blocks = []
@staticmethod
def _roundup(n, alignment):
# alignment must be a power of 2
mask = alignment - 1
return (n + mask) & ~mask
def _malloc(self, size):
# returns a large enough block -- it might be much larger
i = bisect.bisect_left(self._lengths, size)
if i == len(self._lengths):
length = self._roundup(max(self._size, size), mmap.PAGESIZE)
self._size *= 2
info('allocating a new mmap of length %d', length)
arena = Arena(length)
self._arenas.append(arena)
return (arena, 0, length)
else:
length = self._lengths[i]
seq = self._len_to_seq[length]
block = seq.pop()
if not seq:
del self._len_to_seq[length], self._lengths[i]
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
return block
def _free(self, block):
# free location and try to merge with neighbours
(arena, start, stop) = block
try:
prev_block = self._stop_to_block[(arena, start)]
except KeyError:
pass
else:
start, _ = self._absorb(prev_block)
try:
next_block = self._start_to_block[(arena, stop)]
except KeyError:
pass
else:
_, stop = self._absorb(next_block)
block = (arena, start, stop)
length = stop - start
try:
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)
self._start_to_block[(arena, start)] = block
self._stop_to_block[(arena, stop)] = block
def _absorb(self, block):
# deregister this block so it can be merged with a neighbour
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
length = stop - start
seq = self._len_to_seq[length]
seq.remove(block)
if not seq:
del self._len_to_seq[length]
self._lengths.remove(length)
return start, stop
def _free_pending_blocks(self):
# Free all the blocks in the pending list - called with the lock held
while 1:
try:
block = self._pending_free_blocks.pop()
except IndexError:
break
self._allocated_blocks.remove(block)
self._free(block)
def free(self, block):
# free a block returned by malloc()
# Since free() can be called asynchronously by the GC, it could happen
# that it's called while self._lock is held: in that case,
# self._lock.acquire() would deadlock (issue #12352). To avoid that, a
# trylock is used instead, and if the lock can't be acquired
# immediately, the block is added to a list of blocks to be freed
# synchronously sometimes later from malloc() or free(), by calling
# _free_pending_blocks() (appending and retrieving from a list is not
# strictly thread-safe but under cPython it's atomic thanks
# to the GIL).
assert os.getpid() == self._lastpid
if not self._lock.acquire(False):
# can't aquire the lock right now, add the block to the list of
# pending blocks to free
self._pending_free_blocks.append(block)
else:
# we hold the lock
try:
self._free_pending_blocks()
self._allocated_blocks.remove(block)
self._free(block)
finally:
self._lock.release()
def malloc(self, size):
# return a block of right size (possibly rounded up)
assert 0 <= size < maxsize
if os.getpid() != self._lastpid:
self.__init__() # reinitialize after fork
self._lock.acquire()
self._free_pending_blocks()
try:
size = self._roundup(max(size, 1), self._alignment)
(arena, start, stop) = self._malloc(size)
new_stop = start + size
if new_stop < stop:
self._free((arena, new_stop, stop))
block = (arena, start, new_stop)
self._allocated_blocks.add(block)
return block
finally:
self._lock.release()
#
# Class representing a chunk of an mmap -- can be inherited
#
class BufferWrapper(object):
_heap = Heap()
def __init__(self, size):
assert 0 <= size < maxsize
block = BufferWrapper._heap.malloc(size)
self._state = (block, size)
Finalize(self, BufferWrapper._heap.free, args=(block,))
def get_address(self):
(arena, start, stop), size = self._state
address, length = _billiard.address_of_buffer(arena.buffer)
assert size <= length
return address + start
def get_size(self):
return self._state[1]
| bsd-3-clause | 776b83da6ee746794233c7e2fe6f6df9 | 30.515748 | 79 | 0.5604 | 3.904878 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/djcelery/models.py | 3 | 13390 | from __future__ import absolute_import
from datetime import datetime, timedelta
from time import time, mktime
from django.core.exceptions import MultipleObjectsReturned, ValidationError
from django.db import models
from django.db.models import signals
from django.utils.translation import ugettext_lazy as _
from celery import schedules
from celery import states
from celery.events.state import heartbeat_expires
from celery.utils.timeutils import timedelta_seconds
from . import managers
from .picklefield import PickledObjectField
from .utils import now
TASK_STATE_CHOICES = zip(states.ALL_STATES, states.ALL_STATES)
class TaskMeta(models.Model):
"""Task result/status."""
task_id = models.CharField(_(u"task id"), max_length=255, unique=True)
status = models.CharField(_(u"state"), max_length=50,
default=states.PENDING, choices=TASK_STATE_CHOICES)
result = PickledObjectField(null=True, default=None, editable=False)
date_done = models.DateTimeField(_(u"done at"), auto_now=True)
traceback = models.TextField(_(u"traceback"), blank=True, null=True)
hidden = models.BooleanField(editable=False, default=False, db_index=True)
meta = PickledObjectField(_(u"meta"), null=True, default=None,
editable=False)
objects = managers.TaskManager()
class Meta:
verbose_name = _(u"task state")
verbose_name_plural = _(u"task states")
db_table = "celery_taskmeta"
def to_dict(self):
return {"task_id": self.task_id,
"status": self.status,
"result": self.result,
"date_done": self.date_done,
"traceback": self.traceback,
"children": (self.meta or {}).get("children")}
def __unicode__(self):
return u"<Task: %s state=%s>" % (self.task_id, self.status)
class TaskSetMeta(models.Model):
"""TaskSet result"""
taskset_id = models.CharField(_(u"group id"), max_length=255, unique=True)
result = PickledObjectField()
date_done = models.DateTimeField(_(u"created at"), auto_now=True)
hidden = models.BooleanField(editable=False, default=False, db_index=True)
objects = managers.TaskSetManager()
class Meta:
"""Model meta-data."""
verbose_name = _(u"saved group result")
verbose_name_plural = _(u"saved group results")
db_table = "celery_tasksetmeta"
def to_dict(self):
return {"taskset_id": self.taskset_id,
"result": self.result,
"date_done": self.date_done}
def __unicode__(self):
return u"<TaskSet: %s>" % (self.taskset_id)
PERIOD_CHOICES = (("days", _(u"Days")),
("hours", _(u"Hours")),
("minutes", _(u"Minutes")),
("seconds", _(u"Seconds")),
("microseconds", _(u"Microseconds")))
class IntervalSchedule(models.Model):
every = models.IntegerField(_(u"every"), null=False)
period = models.CharField(_(u"period"), max_length=24,
choices=PERIOD_CHOICES)
class Meta:
verbose_name = _(u"interval")
verbose_name_plural = _(u"intervals")
@property
def schedule(self):
return schedules.schedule(timedelta(**{self.period: self.every}))
@classmethod
def from_schedule(cls, schedule, period="seconds"):
every = timedelta_seconds(schedule.run_every)
try:
return cls.objects.get(every=every, period=period)
except cls.DoesNotExist:
return cls(every=every, period=period)
except MultipleObjectsReturned:
cls.objects.filter(every=every, period=period).delete()
return cls(every=every, period=period)
def __unicode__(self):
if self.every == 1:
return _(u"every %(period)s") % {"period": self.period[:-1]}
return _(u"every %(every)s %(period)s") % {"every": self.every,
"period": self.period}
class CrontabSchedule(models.Model):
minute = models.CharField(_(u"minute"),
max_length=64,
default="*")
hour = models.CharField(_(u"hour"),
max_length=64,
default="*")
day_of_week = models.CharField(_(u"day of week"),
max_length=64,
default="*")
day_of_month = models.CharField(_(u"day of month"),
max_length=64,
default="*")
month_of_year = models.CharField(_(u"month of year"),
max_length=64,
default="*")
class Meta:
verbose_name = _(u"crontab")
verbose_name_plural = _(u"crontabs")
def __unicode__(self):
rfield = lambda f: f and str(f).replace(" ", "") or "*"
return u"%s %s %s %s %s (m/h/d/dM/MY)" % (rfield(self.minute),
rfield(self.hour),
rfield(self.day_of_week),
rfield(self.day_of_month),
rfield(self.month_of_year))
@property
def schedule(self):
return schedules.crontab(minute=self.minute,
hour=self.hour,
day_of_week=self.day_of_week,
day_of_month=self.day_of_month,
month_of_year=self.month_of_year)
@classmethod
def from_schedule(cls, schedule):
spec = {'minute': schedule._orig_minute,
'hour': schedule._orig_hour,
'day_of_week': schedule._orig_day_of_week,
'day_of_month': schedule._orig_day_of_month,
'month_of_year': schedule._orig_month_of_year}
try:
return cls.objects.get(**spec)
except cls.DoesNotExist:
return cls(**spec)
except MultipleObjectsReturned:
cls.objects.filter(**spec).delete()
return cls(**spec)
class PeriodicTasks(models.Model):
ident = models.SmallIntegerField(default=1, primary_key=True, unique=True)
last_update = models.DateTimeField(null=False)
objects = managers.ExtendedManager()
@classmethod
def changed(cls, instance, **kwargs):
if not instance.no_changes:
cls.objects.update_or_create(ident=1,
defaults={"last_update": now()})
@classmethod
def last_change(cls):
try:
return cls.objects.get(ident=1).last_update
except cls.DoesNotExist:
pass
class PeriodicTask(models.Model):
name = models.CharField(_(u"name"), max_length=200, unique=True,
help_text=_(u"Useful description"))
task = models.CharField(_(u"task name"), max_length=200)
interval = models.ForeignKey(IntervalSchedule, null=True, blank=True,
verbose_name=_(u"interval"))
crontab = models.ForeignKey(CrontabSchedule, null=True, blank=True,
verbose_name=_(u"crontab"),
help_text=_(u"Use one of interval/crontab"))
args = models.TextField(_(u"Arguments"),
blank=True, default="[]",
help_text=_(u"JSON encoded positional arguments"))
kwargs = models.TextField(_(u"Keyword arguments"),
blank=True, default="{}",
help_text=_("JSON encoded keyword arguments"))
queue = models.CharField(_("queue"),
max_length=200, blank=True,
null=True, default=None,
help_text=_(u"Queue defined in CELERY_QUEUES"))
exchange = models.CharField(_(u"exchange"),
max_length=200, blank=True,
null=True, default=None)
routing_key = models.CharField(_(u"routing key"),
max_length=200, blank=True,
null=True, default=None)
expires = models.DateTimeField(_(u"expires"),
blank=True, null=True)
enabled = models.BooleanField(_(u"enabled"), default=True)
last_run_at = models.DateTimeField(
auto_now=False, auto_now_add=False,
editable=False, blank=True, null=True)
total_run_count = models.PositiveIntegerField(default=0, editable=False)
date_changed = models.DateTimeField(auto_now=True)
description = models.TextField(_("description"), blank=True)
objects = managers.PeriodicTaskManager()
no_changes = False
class Meta:
verbose_name = _(u"periodic task")
verbose_name_plural = _(u"periodic tasks")
def validate_unique(self, *args, **kwargs):
super(PeriodicTask, self).validate_unique(*args, **kwargs)
if not self.interval and not self.crontab:
raise ValidationError(
{"interval": ["One of interval or crontab must be set."]})
if self.interval and self.crontab:
raise ValidationError(
{"crontab": ["Only one of interval or crontab must be set"]})
def save(self, *args, **kwargs):
self.exchange = self.exchange or None
self.routing_key = self.routing_key or None
self.queue = self.queue or None
if not self.enabled:
self.last_run_at = None
super(PeriodicTask, self).save(*args, **kwargs)
def __unicode__(self):
if self.interval:
return u"%s: %s" % (self.name, unicode(self.interval))
if self.crontab:
return u"%s: %s" % (self.name, unicode(self.crontab))
return u"%s: {no schedule}" % (self.name, )
@property
def schedule(self):
if self.interval:
return self.interval.schedule
if self.crontab:
return self.crontab.schedule
signals.pre_delete.connect(PeriodicTasks.changed, sender=PeriodicTask)
signals.pre_save.connect(PeriodicTasks.changed, sender=PeriodicTask)
class WorkerState(models.Model):
hostname = models.CharField(_(u"hostname"), max_length=255, unique=True)
last_heartbeat = models.DateTimeField(_(u"last heartbeat"), null=True,
db_index=True)
objects = managers.ExtendedManager()
class Meta:
"""Model meta-data."""
verbose_name = _(u"worker")
verbose_name_plural = _(u"workers")
get_latest_by = "last_heartbeat"
ordering = ["-last_heartbeat"]
def __unicode__(self):
return self.hostname
def __repr__(self):
return "<WorkerState: %s>" % (self.hostname, )
def is_alive(self):
if self.last_heartbeat:
return time() < heartbeat_expires(self.heartbeat_timestamp)
return False
@property
def heartbeat_timestamp(self):
return mktime(self.last_heartbeat.timetuple())
class TaskState(models.Model):
state = models.CharField(_(u"state"),
max_length=64,
choices=TASK_STATE_CHOICES, db_index=True)
task_id = models.CharField(_(u"UUID"),
max_length=36, unique=True)
name = models.CharField(_(u"name"),
max_length=200, null=True, db_index=True)
tstamp = models.DateTimeField(_(u"event received at"), db_index=True)
args = models.TextField(_(u"Arguments"), null=True)
kwargs = models.TextField(_(u"Keyword arguments"), null=True)
eta = models.DateTimeField(_(u"ETA"), null=True,
help_text=u"date to execute")
expires = models.DateTimeField(_(u"expires"), null=True)
result = models.TextField(_(u"result"), null=True)
traceback = models.TextField(_(u"traceback"), null=True)
runtime = models.FloatField(_(u"execution time"), null=True,
help_text=_(u"in seconds if task successful"))
retries = models.IntegerField(_(u"number of retries"), default=0)
worker = models.ForeignKey(WorkerState, null=True,
verbose_name=_("worker"))
hidden = models.BooleanField(editable=False, default=False, db_index=True)
objects = managers.TaskStateManager()
class Meta:
"""Model meta-data."""
verbose_name = _(u"task")
verbose_name_plural = _(u"tasks")
get_latest_by = "tstamp"
ordering = ["-tstamp"]
def save(self, *args, **kwargs):
if self.eta is not None:
self.eta = datetime.utcfromtimestamp(mktime(self.eta.timetuple()))
super(TaskState, self).save(*args, **kwargs)
def __unicode__(self):
name = self.name or "UNKNOWN"
s = u"%s %s %s" % (self.state.ljust(10),
self.task_id.ljust(36),
name)
if self.eta:
s += u" eta:%s" % (self.eta, )
return s
def __repr__(self):
return "<TaskState: %s %s(%s) ts:%s>" % (self.state,
self.name or "UNKNOWN",
self.task_id,
self.tstamp)
| bsd-3-clause | 9ac436b05e425346b2b455f7c480b91a | 37.699422 | 78 | 0.558252 | 4.185683 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/celery/backends/database/a805d4bd.py | 1 | 2292 | # -*- coding: utf-8 -*-
"""
celery.backends.database.a805d4bd
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module fixes a bug with pickling and relative imports in Python < 2.6.
The problem is with pickling an e.g. `exceptions.KeyError` instance.
As SQLAlchemy has its own `exceptions` module, pickle will try to
lookup :exc:`KeyError` in the wrong module, resulting in this exception::
cPickle.PicklingError: Can't pickle <type 'exceptions.KeyError'>:
attribute lookup exceptions.KeyError failed
doing `import exceptions` just before the dump in `sqlalchemy.types`
reveals the source of the bug::
EXCEPTIONS: <module 'sqlalchemy.exc' from '/var/lib/hudson/jobs/celery/
workspace/buildenv/lib/python2.5/site-packages/sqlalchemy/exc.pyc'>
Hence the random module name 'a805d5bd' is taken to decrease the chances of
a collision.
"""
from __future__ import absolute_import
from sqlalchemy.types import PickleType as _PickleType
class PickleType(_PickleType): # pragma: no cover
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
dumps = self.pickler.dumps
protocol = self.protocol
if impl_processor:
def process(value):
if value is not None:
value = dumps(value, protocol)
return impl_processor(value)
else:
def process(value): # noqa
if value is not None:
value = dumps(value, protocol)
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
loads = self.pickler.loads
if impl_processor:
def process(value):
value = impl_processor(value)
if value is not None:
return loads(value)
else:
def process(value): # noqa
if value is not None:
return loads(value)
return process
def copy_value(self, value):
if self.mutable:
return self.pickler.loads(self.pickler.dumps(value, self.protocol))
else:
return value
| bsd-3-clause | 57d248392a70cff8da0fa761f06317e1 | 31.28169 | 79 | 0.602531 | 4.459144 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/celery/utils/imports.py | 1 | 2531 | # -*- coding: utf-8 -*-
"""
celery.utils.import
~~~~~~~~~~~~~~~~~~~
Utilities related to importing modules and symbols by name.
"""
from __future__ import absolute_import
from __future__ import with_statement
import imp as _imp
import importlib
import os
import sys
from contextlib import contextmanager
from kombu.utils import symbol_by_name
from .compat import reload
class NotAPackage(Exception):
pass
if sys.version_info >= (3, 3): # pragma: no cover
def qualname(obj):
return obj.__qualname__
else:
def qualname(obj): # noqa
if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):
return qualname(obj.__class__)
return '%s.%s' % (obj.__module__, obj.__name__)
def instantiate(name, *args, **kwargs):
"""Instantiate class by name.
See :func:`symbol_by_name`.
"""
return symbol_by_name(name)(*args, **kwargs)
@contextmanager
def cwd_in_path():
cwd = os.getcwd()
if cwd in sys.path:
yield
else:
sys.path.insert(0, cwd)
try:
yield cwd
finally:
try:
sys.path.remove(cwd)
except ValueError: # pragma: no cover
pass
def find_module(module, path=None, imp=None):
"""Version of :func:`imp.find_module` supporting dots."""
if imp is None:
imp = importlib.import_module
with cwd_in_path():
if '.' in module:
last = None
parts = module.split('.')
for i, part in enumerate(parts[:-1]):
mpart = imp('.'.join(parts[:i + 1]))
try:
path = mpart.__path__
except AttributeError:
raise NotAPackage(module)
last = _imp.find_module(parts[i + 1], path)
return last
return _imp.find_module(module)
def import_from_cwd(module, imp=None, package=None):
"""Import module, but make sure it finds modules
located in the current directory.
Modules located in the current directory has
precedence over modules located in `sys.path`.
"""
if imp is None:
imp = importlib.import_module
with cwd_in_path():
return imp(module, package=package)
def reload_from_cwd(module, reloader=None):
if reloader is None:
reloader = reload
with cwd_in_path():
return reloader(module)
def module_file(module):
name = module.__file__
return name[:-1] if name.endswith('.pyc') else name
| bsd-3-clause | 3973729b528b7f4c82a5c8695b55598a | 22.435185 | 70 | 0.576057 | 3.930124 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/celery/concurrency/base.py | 1 | 3738 | # -*- coding: utf-8 -*-
"""
celery.concurrency.base
~~~~~~~~~~~~~~~~~~~~~~~
TaskPool interface.
"""
from __future__ import absolute_import
import logging
import os
import time
from kombu.utils.encoding import safe_repr
from celery.utils import timer2
from celery.utils.log import get_logger
logger = get_logger('celery.concurrency')
def apply_target(target, args=(), kwargs={}, callback=None,
accept_callback=None, pid=None, **_):
if accept_callback:
accept_callback(pid or os.getpid(), time.time())
callback(target(*args, **kwargs))
class BasePool(object):
RUN = 0x1
CLOSE = 0x2
TERMINATE = 0x3
Timer = timer2.Timer
#: set to true if the pool can be shutdown from within
#: a signal handler.
signal_safe = True
#: set to true if pool supports rate limits.
#: (this is here for gevent, which currently does not implement
#: the necessary timers).
rlimit_safe = True
#: set to true if pool requires the use of a mediator
#: thread (e.g. if applying new items can block the current thread).
requires_mediator = False
#: set to true if pool uses greenlets.
is_green = False
_state = None
_pool = None
#: only used by multiprocessing pool
uses_semaphore = False
def __init__(self, limit=None, putlocks=True,
forking_enable=True, **options):
self.limit = limit
self.putlocks = putlocks
self.options = options
self.forking_enable = forking_enable
self._does_debug = logger.isEnabledFor(logging.DEBUG)
def on_start(self):
pass
def did_start_ok(self):
return True
def on_stop(self):
pass
def on_apply(self, *args, **kwargs):
pass
def on_terminate(self):
pass
def on_soft_timeout(self, job):
pass
def on_hard_timeout(self, job):
pass
def maybe_handle_result(self, *args):
pass
def maintain_pool(self, *args, **kwargs):
pass
def terminate_job(self, pid):
raise NotImplementedError(
'%s does not implement kill_job' % (self.__class__, ))
def restart(self):
raise NotImplementedError(
'%s does not implement restart' % (self.__class__, ))
def stop(self):
self.on_stop()
self._state = self.TERMINATE
def terminate(self):
self._state = self.TERMINATE
self.on_terminate()
def start(self):
self.on_start()
self._state = self.RUN
def close(self):
self._state = self.CLOSE
self.on_close()
def on_close(self):
pass
def init_callbacks(self, **kwargs):
pass
def apply_async(self, target, args=[], kwargs={}, **options):
"""Equivalent of the :func:`apply` built-in function.
Callbacks should optimally return as soon as possible since
otherwise the thread which handles the result will get blocked.
"""
if self._does_debug:
logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)',
target, safe_repr(args), safe_repr(kwargs))
return self.on_apply(target, args, kwargs,
waitforslot=self.putlocks,
**options)
def _get_info(self):
return {}
@property
def info(self):
return self._get_info()
@property
def active(self):
return self._state == self.RUN
@property
def num_processes(self):
return self.limit
@property
def readers(self):
return {}
@property
def writers(self):
return {}
@property
def timers(self):
return {}
| bsd-3-clause | 8050cc4b7c84b7972fe34a4c1e889408 | 21.792683 | 72 | 0.582665 | 4.036717 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/celery/tests/tasks/test_canvas.py | 1 | 5372 | from __future__ import absolute_import
from __future__ import with_statement
from mock import Mock
from celery import current_app, task
from celery.canvas import Signature, chain, group, chord, subtask
from celery.result import EagerResult
from celery.tests.utils import Case
SIG = Signature({'task': 'TASK',
'args': ('A1', ),
'kwargs': {'K1': 'V1'},
'options': {'task_id': 'TASK_ID'},
'subtask_type': ''})
@task()
def add(x, y):
return x + y
@task()
def mul(x, y):
return x * y
@task()
def div(x, y):
return x / y
class test_Signature(Case):
def test_getitem_property_class(self):
self.assertTrue(Signature.task)
self.assertTrue(Signature.args)
self.assertTrue(Signature.kwargs)
self.assertTrue(Signature.options)
self.assertTrue(Signature.subtask_type)
def test_getitem_property(self):
self.assertEqual(SIG.task, 'TASK')
self.assertEqual(SIG.args, ('A1', ))
self.assertEqual(SIG.kwargs, {'K1': 'V1'})
self.assertEqual(SIG.options, {'task_id': 'TASK_ID'})
self.assertEqual(SIG.subtask_type, '')
def test_replace(self):
x = Signature('TASK', ('A'), {})
self.assertTupleEqual(x.replace(args=('B', )).args, ('B', ))
self.assertDictEqual(
x.replace(kwargs={'FOO': 'BAR'}).kwargs,
{'FOO': 'BAR'},
)
self.assertDictEqual(
x.replace(options={'task_id': '123'}).options,
{'task_id': '123'},
)
def test_set(self):
self.assertDictEqual(
Signature('TASK', x=1).set(task_id='2').options,
{'x': 1, 'task_id': '2'},
)
def test_link(self):
x = subtask(SIG)
x.link(SIG)
x.link(SIG)
self.assertIn(SIG, x.options['link'])
self.assertEqual(len(x.options['link']), 1)
def test_link_error(self):
x = subtask(SIG)
x.link_error(SIG)
x.link_error(SIG)
self.assertIn(SIG, x.options['link_error'])
self.assertEqual(len(x.options['link_error']), 1)
def test_flatten_links(self):
tasks = [add.s(2, 2), mul.s(4), div.s(2)]
tasks[0].link(tasks[1])
tasks[1].link(tasks[2])
self.assertEqual(tasks[0].flatten_links(), tasks)
def test_OR(self):
x = add.s(2, 2) | mul.s(4)
self.assertIsInstance(x, chain)
y = add.s(4, 4) | div.s(2)
z = x | y
self.assertIsInstance(y, chain)
self.assertIsInstance(z, chain)
self.assertEqual(len(z.tasks), 4)
with self.assertRaises(TypeError):
x | 10
def test_INVERT(self):
x = add.s(2, 2)
x.apply_async = Mock()
x.apply_async.return_value = Mock()
x.apply_async.return_value.get = Mock()
x.apply_async.return_value.get.return_value = 4
self.assertEqual(~x, 4)
self.assertTrue(x.apply_async.called)
class test_chain(Case):
def test_repr(self):
x = add.s(2, 2) | add.s(2)
self.assertEqual(repr(x), '%s(2, 2) | %s(2)' % (add.name, add.name))
def test_reverse(self):
x = add.s(2, 2) | add.s(2)
self.assertIsInstance(subtask(x), chain)
self.assertIsInstance(subtask(dict(x)), chain)
def test_always_eager(self):
current_app.conf.CELERY_ALWAYS_EAGER = True
try:
self.assertEqual(~(add.s(4, 4) | add.s(8)), 16)
finally:
current_app.conf.CELERY_ALWAYS_EAGER = False
def test_apply(self):
x = chain(add.s(4, 4), add.s(8), add.s(10))
res = x.apply()
self.assertIsInstance(res, EagerResult)
self.assertEqual(res.get(), 26)
self.assertEqual(res.parent.get(), 16)
self.assertEqual(res.parent.parent.get(), 8)
self.assertIsNone(res.parent.parent.parent)
class test_group(Case):
def test_repr(self):
x = group([add.s(2, 2), add.s(4, 4)])
self.assertEqual(repr(x), repr(x.tasks))
def test_reverse(self):
x = group([add.s(2, 2), add.s(4, 4)])
self.assertIsInstance(subtask(x), group)
self.assertIsInstance(subtask(dict(x)), group)
class test_chord(Case):
def test_reverse(self):
x = chord([add.s(2, 2), add.s(4, 4)], body=mul.s(4))
self.assertIsInstance(subtask(x), chord)
self.assertIsInstance(subtask(dict(x)), chord)
def test_clone_clones_body(self):
x = chord([add.s(2, 2), add.s(4, 4)], body=mul.s(4))
y = x.clone()
self.assertIsNot(x.kwargs['body'], y.kwargs['body'])
y.kwargs.pop('body')
z = y.clone()
self.assertIsNone(z.kwargs.get('body'))
def test_links_to_body(self):
x = chord([add.s(2, 2), add.s(4, 4)], body=mul.s(4))
x.link(div.s(2))
self.assertFalse(x.options.get('link'))
self.assertTrue(x.kwargs['body'].options['link'])
x.link_error(div.s(2))
self.assertFalse(x.options.get('link_error'))
self.assertTrue(x.kwargs['body'].options['link_error'])
self.assertTrue(x.tasks)
self.assertTrue(x.body)
def test_repr(self):
x = chord([add.s(2, 2), add.s(4, 4)], body=mul.s(4))
self.assertTrue(repr(x))
x.kwargs['body'] = None
self.assertIn('without body', repr(x))
| bsd-3-clause | 008cc8c1759b7030b6fdce08bdaf0252 | 28.516484 | 76 | 0.561057 | 3.247884 | false | true | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/celery/worker/bootsteps.py | 1 | 6204 | # -*- coding: utf-8 -*-
"""
celery.worker.bootsteps
~~~~~~~~~~~~~~~~~~~~~~~
The boot-step components.
"""
from __future__ import absolute_import
from collections import defaultdict
from importlib import import_module
from celery.datastructures import DependencyGraph
from celery.utils.imports import instantiate
from celery.utils.log import get_logger
logger = get_logger(__name__)
class Namespace(object):
"""A namespace containing components.
Every component must belong to a namespace.
When component classes are created they are added to the
mapping of unclaimed components. The components will be
claimed when the namespace they belong to is created.
:keyword name: Set the name of this namespace.
:keyword app: Set the Celery app for this namespace.
"""
name = None
_unclaimed = defaultdict(dict)
_started_count = 0
def __init__(self, name=None, app=None):
self.app = app
self.name = name or self.name
self.services = []
def modules(self):
"""Subclasses can override this to return a
list of modules to import before components are claimed."""
return []
def load_modules(self):
"""Will load the component modules this namespace depends on."""
for m in self.modules():
self.import_module(m)
def apply(self, parent, **kwargs):
"""Apply the components in this namespace to an object.
This will apply the ``__init__`` and ``include`` methods
of each components with the object as argument.
For ``StartStopComponents`` the services created
will also be added the the objects ``components`` attribute.
"""
self._debug('Loading modules.')
self.load_modules()
self._debug('Claiming components.')
self.components = self._claim()
self._debug('Building boot step graph.')
self.boot_steps = [self.bind_component(name, parent, **kwargs)
for name in self._finalize_boot_steps()]
self._debug(
'New boot order: {%s}', ', '.join(c.name for c in self.boot_steps),
)
for component in self.boot_steps:
component.include(parent)
return self
def bind_component(self, name, parent, **kwargs):
"""Bind component to parent object and this namespace."""
comp = self[name](parent, **kwargs)
comp.namespace = self
return comp
def import_module(self, module):
return import_module(module)
def __getitem__(self, name):
return self.components[name]
def _find_last(self):
for C in self.components.itervalues():
if C.last:
return C
def _finalize_boot_steps(self):
G = self.graph = DependencyGraph(
(C.name, C.requires) for C in self.components.itervalues())
last = self._find_last()
if last:
for obj in G:
if obj != last.name:
G.add_edge(last.name, obj)
return G.topsort()
def _claim(self):
return self._unclaimed[self.name]
def _debug(self, msg, *args):
return logger.debug('[%s] ' + msg,
*(self.name.capitalize(), ) + args)
class ComponentType(type):
"""Metaclass for components."""
def __new__(cls, name, bases, attrs):
abstract = attrs.pop('abstract', False)
if not abstract:
try:
cname = attrs['name']
except KeyError:
raise NotImplementedError('Components must be named')
namespace = attrs.get('namespace', None)
if not namespace:
attrs['namespace'], _, attrs['name'] = cname.partition('.')
cls = super(ComponentType, cls).__new__(cls, name, bases, attrs)
if not abstract:
Namespace._unclaimed[cls.namespace][cls.name] = cls
return cls
class Component(object):
"""A component.
The :meth:`__init__` method is called when the component
is bound to a parent object, and can as such be used
to initialize attributes in the parent object at
parent instantiation-time.
"""
__metaclass__ = ComponentType
#: The name of the component, or the namespace
#: and the name of the component separated by dot.
name = None
#: List of component names this component depends on.
#: Note that the dependencies must be in the same namespace.
requires = ()
#: can be used to specify the namespace,
#: if the name does not include it.
namespace = None
#: if set the component will not be registered,
#: but can be used as a component base class.
abstract = True
#: Optional obj created by the :meth:`create` method.
#: This is used by StartStopComponents to keep the
#: original service object.
obj = None
#: This flag is reserved for the workers Consumer,
#: since it is required to always be started last.
#: There can only be one object marked with lsat
#: in every namespace.
last = False
#: This provides the default for :meth:`include_if`.
enabled = True
def __init__(self, parent, **kwargs):
pass
def create(self, parent):
"""Create the component."""
pass
def include_if(self, parent):
"""An optional predicate that decided whether this
component should be created."""
return self.enabled
def instantiate(self, qualname, *args, **kwargs):
return instantiate(qualname, *args, **kwargs)
def include(self, parent):
if self.include_if(parent):
self.obj = self.create(parent)
return True
class StartStopComponent(Component):
abstract = True
terminable = False
def start(self):
return self.obj.start()
def stop(self):
return self.obj.stop()
def terminate(self):
if self.terminable:
return self.obj.terminate()
return self.obj.stop()
def include(self, parent):
if super(StartStopComponent, self).include(parent):
parent.components.append(self.obj)
| bsd-3-clause | 215ca4275a590918a339cd35af9b8d8b | 28.402844 | 79 | 0.607672 | 4.431429 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/requests/packages/charade/sjisprober.py | 169 | 3825 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| bsd-3-clause | bcdedddca44563fe6584b24b86c778ee | 40.032967 | 78 | 0.588235 | 4.602888 | false | false | false | false |
conda-forge/conda-forge.github.io | scripts/update_teams.py | 1 | 4482 | #!/usr/bin/env conda-execute
# conda execute
# env:
# - python 2.7.*
# - conda-smithy
# - pygithub 1.*
# - six
# - conda-build
# channels:
# - conda-forge
# run_with: python
from __future__ import print_function
import argparse
import collections
import os
import six
from github import Github
import github
import yaml
from conda_build.metadata import MetaData
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('feedstocks_clone', help="The location of the feedstocks directory within the conda-forge/feedstocks clone.")
args = parser.parse_args()
from conda_smithy.github import gh_token
token = gh_token()
gh = Github(token)
conda_forge = gh.get_organization('conda-forge')
teams = {team.name: team for team in conda_forge.get_teams()}
feedstocks_path = args.feedstocks_clone
packages_visited = set()
all_members = set()
from random import choice
superlative = ['awesome', 'slick', 'formidable', 'awe-inspiring', 'breathtaking',
'magnificent', 'wonderous', 'stunning', 'astonishing', 'superb',
'splendid', 'impressive', 'unbeatable', 'excellent', 'top', 'outstanding',
'exalted', 'standout', 'smashing']
# Go through each of the feedstocks and ensure that the team is up to date and that
# there is nobody in the team which doesn't belong (i.e. isn't in the maintainers list).
for package_name in os.listdir(feedstocks_path):
print("Checking {}".format(package_name))
packages_visited.add(package_name)
feedstock = os.path.join(feedstocks_path, package_name)
recipe = os.path.join(feedstock, 'recipe', 'meta.yaml')
if not os.path.exists(recipe):
print("The {} feedstock is recipe less".format(package_name))
continue
meta = MetaData(os.path.dirname(recipe))
contributors = meta.meta.get('extra', {}).get('recipe-maintainers', [])
if not isinstance(contributors, list):
# Deal with a contribution list which has dashes but no spaces
# (e.g. https://github.com/conda-forge/pandoc-feedstock/issues/1)
contributors = [contributors.lstrip('-')]
contributors = set(handle.lower() for handle in contributors)
all_members.update(contributors)
# If the team already exists, get hold of it.
team = teams.get(package_name)
if not team:
print("Team {} does not exist in conda-forge organization".format(package_name))
continue
current_members = team.get_members()
member_handles = set([member.login.lower() for member in current_members])
for new_member in contributors - member_handles:
headers, data = team._requester.requestJsonAndCheck(
"PUT",
team.url + "/memberships/" + new_member)
for old_member in member_handles - contributors:
print("AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM {}".format(old_member, package_name))
# The following works, it is just a bit scary!
# headers, data = team._requester.requestJsonAndCheck(
# "DELETE",
# team.url + "/memberships/" + old_member)
# Create and administer the all-members team.
team = teams.get('all-members')
if not team:
raise RuntimeError("Team all-members does not exist in conda-forge organization")
current_members = team.get_members()
member_handles = set([member.login.lower() for member in current_members])
for new_member in all_members - member_handles:
headers, data = team._requester.requestJsonAndCheck(
"PUT",
team.url + "/memberships/" + new_member)
for old_member in member_handles - all_members:
print("AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM all-members".format(old_member))
# Remove any teams which don't belong any more (because there is no longer a feedstock).
for team_to_remove in set(teams.keys()) - set(packages_visited):
if team_to_remove in ['Core',
'conda-forge.github.io',
'all-members',
'conda-forge-anvil',
'conda-forge-webservices',
'staged-recipes']:
print('Keeping ', team_to_remove)
continue
print("THE {} TEAM NEEDS TO BE REMOVED.".format(team_to_remove))
# The following works, it is just a bit scary!
# teams[team_to_remove].delete()
| bsd-3-clause | dadb19cb338404b9d9c1a297f5cb0c33 | 35.737705 | 129 | 0.640116 | 3.691928 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/kombu/tests/test_serialization.py | 2 | 9679 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import with_statement
import sys
from kombu.serialization import (registry, register, SerializerNotInstalled,
raw_encode, register_yaml, register_msgpack,
decode, bytes_t, pickle, pickle_protocol,
unregister, register_pickle)
from .utils import TestCase
from .utils import mask_modules, skip_if_not_module
# For content_encoding tests
unicode_string = u'abcdé\u8463'
unicode_string_as_utf8 = unicode_string.encode('utf-8')
latin_string = u'abcdé'
latin_string_as_latin1 = latin_string.encode('latin-1')
latin_string_as_utf8 = latin_string.encode('utf-8')
# For serialization tests
py_data = {'string': 'The quick brown fox jumps over the lazy dog',
'int': 10,
'float': 3.14159265,
'unicode': u'Thé quick brown fox jumps over thé lazy dog',
'list': ['george', 'jerry', 'elaine', 'cosmo'],
}
# JSON serialization tests
json_data = ('{"int": 10, "float": 3.1415926500000002, '
'"list": ["george", "jerry", "elaine", "cosmo"], '
'"string": "The quick brown fox jumps over the lazy '
'dog", "unicode": "Th\\u00e9 quick brown fox jumps over '
'th\\u00e9 lazy dog"}')
# Pickle serialization tests
pickle_data = pickle.dumps(py_data, protocol=pickle_protocol)
# YAML serialization tests
yaml_data = ('float: 3.1415926500000002\nint: 10\n'
'list: [george, jerry, elaine, cosmo]\n'
'string: The quick brown fox jumps over the lazy dog\n'
'unicode: "Th\\xE9 quick brown fox '
'jumps over th\\xE9 lazy dog"\n')
msgpack_py_data = dict(py_data)
# msgpack only supports tuples
msgpack_py_data['list'] = tuple(msgpack_py_data['list'])
# Unicode chars are lost in transmit :(
msgpack_py_data['unicode'] = 'Th quick brown fox jumps over th lazy dog'
msgpack_data = ('\x85\xa3int\n\xa5float\xcb@\t!\xfbS\xc8\xd4\xf1\xa4list'
'\x94\xa6george\xa5jerry\xa6elaine\xa5cosmo\xa6string\xda'
'\x00+The quick brown fox jumps over the lazy dog\xa7unicode'
'\xda\x00)Th quick brown fox jumps over th lazy dog')
def say(m):
sys.stderr.write('%s\n' % (m, ))
registry.register('testS', lambda s: s, lambda s: 'decoded',
'application/testS', 'utf-8')
class test_Serialization(TestCase):
def test_disable(self):
disabled = registry._disabled_content_types
try:
registry.disable('testS')
self.assertIn('application/testS', disabled)
disabled.clear()
registry.disable('application/testS')
self.assertIn('application/testS', disabled)
finally:
disabled.clear()
def test_decode_when_disabled(self):
disabled = registry._disabled_content_types
try:
registry.disable('testS')
with self.assertRaises(SerializerNotInstalled):
registry.decode('xxd', 'application/testS', 'utf-8',
force=False)
ret = registry.decode('xxd', 'application/testS', 'utf-8',
force=True)
self.assertEqual(ret, 'decoded')
finally:
disabled.clear()
def test_decode_when_data_is_None(self):
registry.decode(None, 'application/testS', 'utf-8')
def test_content_type_decoding(self):
self.assertEqual(unicode_string,
registry.decode(
unicode_string_as_utf8,
content_type='plain/text',
content_encoding='utf-8'))
self.assertEqual(latin_string,
registry.decode(
latin_string_as_latin1,
content_type='application/data',
content_encoding='latin-1'))
def test_content_type_binary(self):
self.assertIsInstance(registry.decode(unicode_string_as_utf8,
content_type='application/data',
content_encoding='binary'),
bytes_t)
self.assertEqual(unicode_string_as_utf8,
registry.decode(
unicode_string_as_utf8,
content_type='application/data',
content_encoding='binary'))
def test_content_type_encoding(self):
# Using the 'raw' serializer
self.assertEqual(unicode_string_as_utf8,
registry.encode(
unicode_string, serializer='raw')[-1])
self.assertEqual(latin_string_as_utf8,
registry.encode(
latin_string, serializer='raw')[-1])
# And again w/o a specific serializer to check the
# code where we force unicode objects into a string.
self.assertEqual(unicode_string_as_utf8,
registry.encode(unicode_string)[-1])
self.assertEqual(latin_string_as_utf8,
registry.encode(latin_string)[-1])
def test_json_decode(self):
self.assertEqual(py_data,
registry.decode(
json_data,
content_type='application/json',
content_encoding='utf-8'))
def test_json_encode(self):
self.assertEqual(registry.decode(
registry.encode(py_data, serializer='json')[-1],
content_type='application/json',
content_encoding='utf-8'),
registry.decode(
json_data,
content_type='application/json',
content_encoding='utf-8'))
@skip_if_not_module('msgpack')
def test_msgpack_decode(self):
register_msgpack()
self.assertEqual(msgpack_py_data,
registry.decode(
msgpack_data,
content_type='application/x-msgpack',
content_encoding='binary'))
@skip_if_not_module('msgpack')
def test_msgpack_encode(self):
register_msgpack()
self.assertEqual(registry.decode(
registry.encode(msgpack_py_data, serializer='msgpack')[-1],
content_type='application/x-msgpack',
content_encoding='binary'),
registry.decode(
msgpack_data,
content_type='application/x-msgpack',
content_encoding='binary'))
@skip_if_not_module('yaml')
def test_yaml_decode(self):
register_yaml()
self.assertEqual(py_data,
registry.decode(
yaml_data,
content_type='application/x-yaml',
content_encoding='utf-8'))
@skip_if_not_module('yaml')
def test_yaml_encode(self):
register_yaml()
self.assertEqual(registry.decode(
registry.encode(py_data, serializer='yaml')[-1],
content_type='application/x-yaml',
content_encoding='utf-8'),
registry.decode(
yaml_data,
content_type='application/x-yaml',
content_encoding='utf-8'))
def test_pickle_decode(self):
self.assertEqual(py_data,
registry.decode(
pickle_data,
content_type='application/x-python-serialize',
content_encoding='binary'))
def test_pickle_encode(self):
self.assertEqual(pickle.loads(pickle_data),
pickle.loads(registry.encode(py_data,
serializer='pickle')[-1]))
def test_register(self):
register(None, None, None, None)
def test_unregister(self):
with self.assertRaises(SerializerNotInstalled):
unregister('nonexisting')
registry.encode('foo', serializer='pickle')
unregister('pickle')
with self.assertRaises(SerializerNotInstalled):
registry.encode('foo', serializer='pickle')
register_pickle()
def test_set_default_serializer_missing(self):
with self.assertRaises(SerializerNotInstalled):
registry._set_default_serializer('nonexisting')
def test_encode_missing(self):
with self.assertRaises(SerializerNotInstalled):
registry.encode('foo', serializer='nonexisting')
def test_raw_encode(self):
self.assertTupleEqual(raw_encode('foo'.encode('utf-8')),
('application/data', 'binary',
'foo'.encode('utf-8')))
@mask_modules('yaml')
def test_register_yaml__no_yaml(self):
register_yaml()
with self.assertRaises(SerializerNotInstalled):
decode('foo', 'application/x-yaml', 'utf-8')
@mask_modules('msgpack')
def test_register_msgpack__no_msgpack(self):
register_msgpack()
with self.assertRaises(SerializerNotInstalled):
decode('foo', 'application/x-msgpack', 'utf-8')
| bsd-3-clause | 637aa429d2fa47c12028d291dec385d5 | 38.329268 | 78 | 0.534987 | 4.334677 | false | true | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/celery/states.py | 1 | 2809 | # -*- coding: utf-8 -*-
"""
celery.states
=============
Built-in task states.
.. _states:
States
------
See :ref:`task-states`.
.. _statesets:
Sets
----
.. state:: READY_STATES
READY_STATES
~~~~~~~~~~~~
Set of states meaning the task result is ready (has been executed).
.. state:: UNREADY_STATES
UNREADY_STATES
~~~~~~~~~~~~~~
Set of states meaning the task result is not ready (has not been executed).
.. state:: EXCEPTION_STATES
EXCEPTION_STATES
~~~~~~~~~~~~~~~~
Set of states meaning the task returned an exception.
.. state:: PROPAGATE_STATES
PROPAGATE_STATES
~~~~~~~~~~~~~~~~
Set of exception states that should propagate exceptions to the user.
.. state:: ALL_STATES
ALL_STATES
~~~~~~~~~~
Set of all possible states.
Misc.
-----
"""
from __future__ import absolute_import
#: State precedence.
#: None represents the precedence of an unknown state.
#: Lower index means higher precedence.
PRECEDENCE = ['SUCCESS',
'FAILURE',
None,
'REVOKED',
'STARTED',
'RECEIVED',
'RETRY',
'PENDING']
def precedence(state):
"""Get the precedence index for state.
Lower index means higher precedence.
"""
try:
return PRECEDENCE.index(state)
except ValueError:
return PRECEDENCE.index(None)
class state(str):
"""State is a subclass of :class:`str`, implementing comparison
methods adhering to state precedence rules::
>>> from celery.states import state, PENDING, SUCCESS
>>> state(PENDING) < state(SUCCESS)
True
Any custom state is considered to be lower than :state:`FAILURE` and
:state:`SUCCESS`, but higher than any of the other built-in states::
>>> state('PROGRESS') > state(STARTED)
True
>>> state('PROGRESS') > state('SUCCESS')
False
"""
def compare(self, other, fun):
return fun(precedence(self), precedence(other))
def __gt__(self, other):
return self.compare(other, lambda a, b: a < b)
def __ge__(self, other):
return self.compare(other, lambda a, b: a <= b)
def __lt__(self, other):
return self.compare(other, lambda a, b: a > b)
def __le__(self, other):
return self.compare(other, lambda a, b: a >= b)
PENDING = 'PENDING'
RECEIVED = 'RECEIVED'
STARTED = 'STARTED'
SUCCESS = 'SUCCESS'
FAILURE = 'FAILURE'
REVOKED = 'REVOKED'
RETRY = 'RETRY'
IGNORED = 'IGNORED'
READY_STATES = frozenset([SUCCESS, FAILURE, REVOKED])
UNREADY_STATES = frozenset([PENDING, RECEIVED, STARTED, RETRY])
EXCEPTION_STATES = frozenset([RETRY, FAILURE, REVOKED])
PROPAGATE_STATES = frozenset([FAILURE, REVOKED])
ALL_STATES = frozenset([PENDING, RECEIVED, STARTED,
SUCCESS, FAILURE, RETRY, REVOKED])
| bsd-3-clause | 70716e6476b63792c988609dc08385de | 19.50365 | 75 | 0.61303 | 3.735372 | false | false | false | false |
mozilla/firefox-flicks | flicks/users/migrations/0001_initial.py | 1 | 4435 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserProfile'
db.create_table('users_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('country', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('bio', self.gf('django.db.models.fields.TextField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)),
))
db.send_create_signal('users', ['UserProfile'])
def backwards(self, orm):
# Deleting model 'UserProfile'
db.delete_table('users_userprofile')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'users.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'bio': ('django.db.models.fields.TextField', [], {}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['users'] | bsd-3-clause | 6f4a1da6ea9162f821e93f938a0712a6 | 62.371429 | 182 | 0.55761 | 3.764856 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/basket/base.py | 1 | 3662 | import json
import os
try:
from django.conf import settings
except (ImportError, AttributeError):
settings = None
import requests
BASKET_URL = getattr(settings, 'BASKET_URL',
os.getenv('BASKET_URL', 'http://localhost:8000'))
class BasketException(Exception):
pass
def basket_url(method, token=None):
"""Form a basket API url. If the request requires a user-specific
token, it is suffixed as the last part of the URL."""
token = '%s/' % token if token else ''
return ('%s/news/%s/%s' % (BASKET_URL, method, token))
def parse_response(res):
"""Parse the result of a basket API call, raise exception on error"""
if res.status_code != 200:
raise BasketException('%s request returned from basket: %s' %
(res.status_code, res.content))
# Parse the json and check for errors
result = json.loads(res.content)
if result.get('status') == 'error':
raise BasketException(result['desc'])
return result
def request(method, action, data=None, token=None, params=None):
"""Call the basket API with the supplied http method and data."""
# newsletters should be comma-delimited
if data and 'newsletters' in data:
if '__iter__' in data['newsletters']:
data['newsletters'] = ','.join(data['newsletters'])
try:
res = requests.request(method,
basket_url(action, token),
data=data,
params=params,
timeout=10)
except requests.exceptions.ConnectionError:
raise BasketException("Error connecting to basket")
return parse_response(res)
# Public API methods
def subscribe(email, newsletters, **kwargs):
"""Subscribe an email through basket to `newsletters`, which can
be string or an array of newsletter names. Additional parameters
should be passed as keyword arguments."""
kwargs.update(email=email, newsletters=newsletters)
return request('post', 'subscribe', data=kwargs)
def send_sms(mobile_number, msg_name, optin=False):
"""
Send SMS message `msg_name` to `mobile_number` and optionally add the
number to a list for future messages.
"""
return request('post', 'subscribe_sms', data={
'mobile_number': mobile_number,
'msg_name': msg_name,
'optin': 'Y' if optin else 'N',
})
def unsubscribe(token, email, newsletters=None, optout=False):
"""Unsubscribe an email from certain newsletters, or all of them
if `optout` is passed. Requires a token."""
data = {'email': email}
if optout:
data['optout'] = 'Y'
elif newsletters:
data['newsletters'] = newsletters
else:
raise BasketException('unsubscribe requires ether a newsletters '
'or optout parameter')
return request('post', 'unsubscribe', data=data, token=token)
def user(token):
"""Get all the information about a user. Requires a token."""
return request('get', 'user', token=token)
def update_user(token, **kwargs):
"""Update any fields for a user. Requires a token. If newsletters
is passed, the user is only subscribed to those specific
newsletters."""
return request('post', 'user', data=kwargs, token=token)
def debug_user(email, supertoken):
"""Get a user's information using a supertoken only known by devs,
useful for ensuring that data is being posted correctly"""
return request('get', 'debug-user',
params={'email': email,
'supertoken': supertoken})
| bsd-3-clause | 410cb6408e9a5a13563744a4dfe48a09 | 29.016393 | 73 | 0.626707 | 4.119235 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/djcelery/admin.py | 2 | 10765 | from __future__ import absolute_import
from __future__ import with_statement
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import helpers
from django.contrib.admin.views import main as main_views
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.translation import ugettext_lazy as _
from celery import current_app
from celery import states
from celery.task.control import broadcast, revoke, rate_limit
from celery.utils.text import abbrtask
from .admin_utils import action, display_field, fixedwidth
from .models import (TaskState, WorkerState,
PeriodicTask, IntervalSchedule, CrontabSchedule)
from .humanize import naturaldate
TASK_STATE_COLORS = {states.SUCCESS: "green",
states.FAILURE: "red",
states.REVOKED: "magenta",
states.STARTED: "yellow",
states.RETRY: "orange",
"RECEIVED": "blue"}
NODE_STATE_COLORS = {"ONLINE": "green",
"OFFLINE": "gray"}
class MonitorList(main_views.ChangeList):
def __init__(self, *args, **kwargs):
super(MonitorList, self).__init__(*args, **kwargs)
self.title = self.model_admin.list_page_title
@display_field(_("state"), "state")
def colored_state(task):
state = escape(task.state)
color = TASK_STATE_COLORS.get(task.state, "black")
return """<b><span style="color: %s;">%s</span></b>""" % (color, state)
@display_field(_("state"), "last_heartbeat")
def node_state(node):
state = node.is_alive() and "ONLINE" or "OFFLINE"
color = NODE_STATE_COLORS[state]
return """<b><span style="color: %s;">%s</span></b>""" % (color, state)
@display_field(_("ETA"), "eta")
def eta(task):
if not task.eta:
return """<span style="color: gray;">none</span>"""
return escape(task.eta)
@display_field(_("when"), "tstamp")
def tstamp(task):
return """<div title="%s">%s</div>""" % (escape(str(task.tstamp)),
escape(naturaldate(task.tstamp)))
@display_field(_("name"), "name")
def name(task):
short_name = abbrtask(task.name, 16)
return """<div title="%s"><b>%s</b></div>""" % (escape(task.name),
escape(short_name))
class ModelMonitor(admin.ModelAdmin):
can_add = False
can_delete = False
def get_changelist(self, request, **kwargs):
return MonitorList
def change_view(self, request, object_id, extra_context=None):
extra_context = extra_context or {}
extra_context.setdefault("title", self.detail_title)
return super(ModelMonitor, self).change_view(request, object_id,
extra_context)
def has_delete_permission(self, request, obj=None):
if not self.can_delete:
return False
return super(ModelMonitor, self).has_delete_permission(request, obj)
def has_add_permission(self, request):
if not self.can_add:
return False
return super(ModelMonitor, self).has_add_permission(request)
class TaskMonitor(ModelMonitor):
detail_title = _("Task detail")
list_page_title = _("Tasks")
rate_limit_confirmation_template = "djcelery/confirm_rate_limit.html"
date_hierarchy = "tstamp"
fieldsets = (
(None, {
"fields": ("state", "task_id", "name", "args", "kwargs",
"eta", "runtime", "worker", "tstamp"),
"classes": ("extrapretty", ),
}),
("Details", {
"classes": ("collapse", "extrapretty"),
"fields": ("result", "traceback", "expires"),
}),
)
list_display = (fixedwidth("task_id", name=_("UUID"), pt=8),
colored_state,
name,
fixedwidth("args", pretty=True),
fixedwidth("kwargs", pretty=True),
eta,
tstamp,
"worker")
readonly_fields = ("state", "task_id", "name", "args", "kwargs",
"eta", "runtime", "worker", "result", "traceback",
"expires", "tstamp")
list_filter = ("state", "name", "tstamp", "eta", "worker")
search_fields = ("name", "task_id", "args", "kwargs", "worker__hostname")
actions = ["revoke_tasks",
"terminate_tasks",
"kill_tasks",
"rate_limit_tasks"]
@action(_("Revoke selected tasks"))
def revoke_tasks(self, request, queryset):
with current_app.default_connection() as connection:
for state in queryset:
revoke(state.task_id, connection=connection)
@action(_("Terminate selected tasks"))
def terminate_tasks(self, request, queryset):
with current_app.default_connection() as connection:
for state in queryset:
revoke(state.task_id, connection=connection, terminate=True)
@action(_("Kill selected tasks"))
def kill_tasks(self, request, queryset):
with current_app.default_connection() as connection:
for state in queryset:
revoke(state.task_id, connection=connection,
terminate=True, signal="KILL")
@action(_("Rate limit selected tasks"))
def rate_limit_tasks(self, request, queryset):
tasks = set([task.name for task in queryset])
opts = self.model._meta
app_label = opts.app_label
if request.POST.get("post"):
rate = request.POST["rate_limit"]
with current_app.default_connection() as connection:
for task_name in tasks:
rate_limit(task_name, rate, connection=connection)
return None
context = {
"title": _("Rate limit selection"),
"queryset": queryset,
"object_name": force_unicode(opts.verbose_name),
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
"opts": opts,
"app_label": app_label,
}
return render_to_response(self.rate_limit_confirmation_template,
context, context_instance=RequestContext(request))
def get_actions(self, request):
actions = super(TaskMonitor, self).get_actions(request)
actions.pop("delete_selected", None)
return actions
def queryset(self, request):
qs = super(TaskMonitor, self).queryset(request)
return qs.select_related('worker')
class WorkerMonitor(ModelMonitor):
can_add = True
detail_title = _("Node detail")
list_page_title = _("Worker Nodes")
list_display = ("hostname", node_state)
readonly_fields = ("last_heartbeat", )
actions = ["shutdown_nodes",
"enable_events",
"disable_events"]
@action(_("Shutdown selected worker nodes"))
def shutdown_nodes(self, request, queryset):
broadcast("shutdown", destination=[n.hostname for n in queryset])
@action(_("Enable event mode for selected nodes."))
def enable_events(self, request, queryset):
broadcast("enable_events",
destination=[n.hostname for n in queryset])
@action(_("Disable event mode for selected nodes."))
def disable_events(self, request, queryset):
broadcast("disable_events",
destination=[n.hostname for n in queryset])
def get_actions(self, request):
actions = super(WorkerMonitor, self).get_actions(request)
actions.pop("delete_selected", None)
return actions
admin.site.register(TaskState, TaskMonitor)
admin.site.register(WorkerState, WorkerMonitor)
# ### Periodic Tasks
class LaxChoiceField(forms.ChoiceField):
def valid_value(self, value):
return True
def periodic_task_form():
current_app.loader.import_default_modules()
tasks = list(sorted(name for name in current_app.tasks
if not name.startswith('celery.')))
choices = (("", ""), ) + tuple(zip(tasks, tasks))
class PeriodicTaskForm(forms.ModelForm):
regtask = LaxChoiceField(label=_(u"Task (registered)"),
choices=choices, required=False)
task = forms.CharField(label=_("Task (custom)"), required=False,
max_length=200)
class Meta:
model = PeriodicTask
def clean(self):
data = super(PeriodicTaskForm, self).clean()
regtask = data.get("regtask")
if regtask:
data["task"] = regtask
if not data["task"]:
exc = forms.ValidationError(_(u"Need name of task"))
self._errors["task"] = self.error_class(exc.messages)
raise exc
return data
return PeriodicTaskForm
class PeriodicTaskAdmin(admin.ModelAdmin):
model = PeriodicTask
form = periodic_task_form()
list_display = ('__unicode__', 'enabled')
fieldsets = (
(None, {
"fields": ("name", "regtask", "task", "enabled"),
"classes": ("extrapretty", "wide"),
}),
("Schedule", {
"fields": ("interval", "crontab"),
"classes": ("extrapretty", "wide", ),
}),
("Arguments", {
"fields": ("args", "kwargs"),
"classes": ("extrapretty", "wide", "collapse"),
}),
("Execution Options", {
"fields": ("expires", "queue", "exchange", "routing_key"),
"classes": ("extrapretty", "wide", "collapse"),
}),
)
def __init__(self, *args, **kwargs):
super(PeriodicTaskAdmin, self).__init__(*args, **kwargs)
self.form = periodic_task_form()
def changelist_view(self, request, extra_context=None):
extra_context = extra_context or {}
scheduler = getattr(settings, "CELERYBEAT_SCHEDULER", None)
if scheduler != 'djcelery.schedulers.DatabaseScheduler':
extra_context['wrong_scheduler'] = True
return super(PeriodicTaskAdmin, self).changelist_view(request,
extra_context)
def queryset(self, request):
qs = super(PeriodicTaskAdmin, self).queryset(request)
return qs.select_related('interval', 'crontab')
admin.site.register(IntervalSchedule)
admin.site.register(CrontabSchedule)
admin.site.register(PeriodicTask, PeriodicTaskAdmin)
| bsd-3-clause | 69699ee026e547ef3d0d6bfcb14e01d5 | 34.76412 | 78 | 0.579006 | 4.226541 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/celery/signals.py | 1 | 1818 | # -*- coding: utf-8 -*-
"""
celery.signals
~~~~~~~~~~~~~~
This module defines the signals (Observer pattern) sent by
both workers and clients.
Functions can be connected to these signals, and connected
functions are called whenever a signal is called.
See :ref:`signals` for more information.
"""
from __future__ import absolute_import
from .utils.dispatch import Signal
task_sent = Signal(providing_args=[
'task_id', 'task', 'args', 'kwargs', 'eta', 'taskset'])
task_prerun = Signal(providing_args=['task_id', 'task', 'args', 'kwargs'])
task_postrun = Signal(providing_args=[
'task_id', 'task', 'args', 'kwargs', 'retval'])
task_success = Signal(providing_args=['result'])
task_failure = Signal(providing_args=[
'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo'])
task_revoked = Signal(providing_args=['terminated', 'signum', 'expired'])
celeryd_init = Signal(providing_args=['instance', 'conf'])
celeryd_after_setup = Signal(providing_args=['instance', 'conf'])
worker_init = Signal(providing_args=[])
worker_process_init = Signal(providing_args=[])
worker_ready = Signal(providing_args=[])
worker_shutdown = Signal(providing_args=[])
setup_logging = Signal(providing_args=[
'loglevel', 'logfile', 'format', 'colorize'])
after_setup_logger = Signal(providing_args=[
'logger', 'loglevel', 'logfile', 'format', 'colorize'])
after_setup_task_logger = Signal(providing_args=[
'logger', 'loglevel', 'logfile', 'format', 'colorize'])
beat_init = Signal(providing_args=[])
beat_embedded_init = Signal(providing_args=[])
eventlet_pool_started = Signal(providing_args=[])
eventlet_pool_preshutdown = Signal(providing_args=[])
eventlet_pool_postshutdown = Signal(providing_args=[])
eventlet_pool_apply = Signal(providing_args=['target', 'args', 'kwargs'])
| bsd-3-clause | 1c562be0494d4b6c31f119304bce5642 | 40.318182 | 74 | 0.688119 | 3.456274 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/kombu/transport/mongodb.py | 2 | 7541 | """
kombu.transport.mongodb
=======================
MongoDB transport.
:copyright: (c) 2010 - 2012 by Flavio Percoco Premoli.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from Queue import Empty
import pymongo
from pymongo import errors
from anyjson import loads, dumps
from pymongo.connection import Connection
from kombu.exceptions import StdConnectionError, StdChannelError
from . import virtual
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 27017
__author__ = """\
Flavio [FlaPer87] Percoco Premoli <flaper87@flaper87.org>;\
Scott Lyons <scottalyons@gmail.com>;\
"""
class Channel(virtual.Channel):
_client = None
supports_fanout = True
_fanout_queues = {}
def __init__(self, *vargs, **kwargs):
super_ = super(Channel, self)
super_.__init__(*vargs, **kwargs)
self._queue_cursors = {}
self._queue_readcounts = {}
def _new_queue(self, queue, **kwargs):
pass
def _get(self, queue):
try:
if queue in self._fanout_queues:
msg = self._queue_cursors[queue].next()
self._queue_readcounts[queue] += 1
return loads(msg['payload'])
else:
msg = self.client.command('findandmodify', 'messages',
query={'queue': queue},
sort={'_id': pymongo.ASCENDING}, remove=True)
except errors.OperationFailure, exc:
if 'No matching object found' in exc.args[0]:
raise Empty()
raise
except StopIteration:
raise Empty()
# as of mongo 2.0 empty results won't raise an error
if msg['value'] is None:
raise Empty()
return loads(msg['value']['payload'])
def _size(self, queue):
if queue in self._fanout_queues:
return (self._queue_cursors[queue].count() -
self._queue_readcounts[queue])
return self.client.messages.find({'queue': queue}).count()
def _put(self, queue, message, **kwargs):
self.client.messages.insert({'payload': dumps(message),
'queue': queue})
def _purge(self, queue):
size = self._size(queue)
if queue in self._fanout_queues:
cursor = self._queue_cursors[queue]
cursor.rewind()
self._queue_cursors[queue] = cursor.skip(cursor.count())
else:
self.client.messages.remove({'queue': queue})
return size
def close(self):
super(Channel, self).close()
if self._client:
self._client.connection.end_request()
def _open(self):
"""
See mongodb uri documentation:
http://www.mongodb.org/display/DOCS/Connections
"""
conninfo = self.connection.client
dbname = None
hostname = None
if not conninfo.hostname:
conninfo.hostname = DEFAULT_HOST
for part in conninfo.hostname.split('/'):
if not hostname:
hostname = 'mongodb://' + part
continue
dbname = part
if '?' in part:
# In case someone is passing options
# to the mongodb connection. Right now
# it is not permitted by kombu
dbname, options = part.split('?')
hostname += '/?' + options
hostname = "%s/%s" % (hostname, dbname in [None, "/"] and "admin" \
or dbname)
if not dbname or dbname == "/":
dbname = "kombu_default"
# At this point we expect the hostname to be something like
# (considering replica set form too):
#
# mongodb://[username:password@]host1[:port1][,host2[:port2],
# ...[,hostN[:portN]]][/[?options]]
mongoconn = Connection(host=hostname)
version = mongoconn.server_info()['version']
if tuple(map(int, version.split('.')[:2])) < (1, 3):
raise NotImplementedError(
'Kombu requires MongoDB version 1.3+, but connected to %s' % (
version, ))
database = getattr(mongoconn, dbname)
# This is done by the connection uri
# if conninfo.userid:
# database.authenticate(conninfo.userid, conninfo.password)
self.db = database
col = database.messages
col.ensure_index([('queue', 1), ('_id', 1)], background=True)
if 'messages.broadcast' not in database.collection_names():
capsize = conninfo.transport_options.get(
'capped_queue_size') or 100000
database.create_collection('messages.broadcast', size=capsize,
capped=True)
self.bcast = getattr(database, 'messages.broadcast')
self.bcast.ensure_index([('queue', 1)])
self.routing = getattr(database, 'messages.routing')
self.routing.ensure_index([('queue', 1), ('exchange', 1)])
return database
#TODO: Store a more complete exchange metatable in the routing collection
def get_table(self, exchange):
"""Get table of bindings for ``exchange``."""
localRoutes = frozenset(self.state.exchanges[exchange]['table'])
brokerRoutes = self.client.messages.routing.find({
'exchange': exchange})
return localRoutes | frozenset((r['routing_key'],
r['pattern'],
r['queue']) for r in brokerRoutes)
def _put_fanout(self, exchange, message, **kwargs):
"""Deliver fanout message."""
self.client.messages.broadcast.insert({'payload': dumps(message),
'queue': exchange})
def _queue_bind(self, exchange, routing_key, pattern, queue):
if self.typeof(exchange).type == 'fanout':
cursor = self.bcast.find(query={'queue': exchange},
sort=[('$natural', 1)], tailable=True)
# Fast forward the cursor past old events
self._queue_cursors[queue] = cursor.skip(cursor.count())
self._queue_readcounts[queue] = cursor.count()
self._fanout_queues[queue] = exchange
meta = {'exchange': exchange,
'queue': queue,
'routing_key': routing_key,
'pattern': pattern}
self.client.messages.routing.update(meta, meta, upsert=True)
def queue_delete(self, queue, **kwargs):
self.routing.remove({'queue': queue})
super(Channel, self).queue_delete(queue, **kwargs)
if queue in self._fanout_queues:
self._queue_cursors[queue].close()
self._queue_cursors.pop(queue, None)
self._fanout_queues.pop(queue, None)
@property
def client(self):
if self._client is None:
self._client = self._open()
return self._client
class Transport(virtual.Transport):
Channel = Channel
polling_interval = 1
default_port = DEFAULT_PORT
connection_errors = (StdConnectionError, errors.ConnectionFailure)
channel_errors = (StdChannelError,
errors.ConnectionFailure,
errors.OperationFailure)
driver_type = 'mongodb'
driver_name = 'pymongo'
def driver_version(self):
return pymongo.version
| bsd-3-clause | 831c35b8402e0b8a52caedc50ce32cfe | 33.122172 | 78 | 0.554701 | 4.306682 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/kombu/transport/beanstalk.py | 2 | 3772 | """
kombu.transport.beanstalk
=========================
Beanstalk transport.
:copyright: (c) 2010 - 2012 by David Ziegler.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import beanstalkc
import socket
from anyjson import loads, dumps
from Queue import Empty
from kombu.exceptions import StdConnectionError, StdChannelError
from . import virtual
DEFAULT_PORT = 11300
__author__ = 'David Ziegler <david.ziegler@gmail.com>'
class Channel(virtual.Channel):
_client = None
def _parse_job(self, job):
item, dest = None, None
if job:
try:
item = loads(job.body)
dest = job.stats()['tube']
except Exception:
job.bury()
else:
job.delete()
else:
raise Empty()
return item, dest
def _put(self, queue, message, **kwargs):
extra = {}
priority = message['properties']['delivery_info']['priority']
ttr = message['properties'].get('ttr')
if ttr is not None:
extra['ttr'] = ttr
self.client.use(queue)
self.client.put(dumps(message), priority=priority, **extra)
def _get(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
[self.client.ignore(active)
for active in self.client.watching()
if active != queue]
job = self.client.reserve(timeout=1)
item, dest = self._parse_job(job)
return item
def _get_many(self, queues, timeout=1):
# timeout of None will cause beanstalk to timeout waiting
# for a new request
if timeout is None:
timeout = 1
watching = self.client.watching()
[self.client.watch(active)
for active in queues
if active not in watching]
[self.client.ignore(active)
for active in watching
if active not in queues]
job = self.client.reserve(timeout=timeout)
return self._parse_job(job)
def _purge(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
[self.client.ignore(active)
for active in self.client.watching()
if active != queue]
count = 0
while 1:
job = self.client.reserve(timeout=1)
if job:
job.delete()
count += 1
else:
break
return count
def _size(self, queue):
return 0
def _open(self):
conninfo = self.connection.client
host = conninfo.hostname or 'localhost'
port = conninfo.port or DEFAULT_PORT
conn = beanstalkc.Connection(host=host, port=port)
conn.connect()
return conn
def close(self):
if self._client is not None:
return self._client.close()
super(Channel, self).close()
@property
def client(self):
if self._client is None:
self._client = self._open()
return self._client
class Transport(virtual.Transport):
Channel = Channel
polling_interval = 1
default_port = DEFAULT_PORT
connection_errors = (StdConnectionError,
socket.error,
beanstalkc.SocketError,
IOError)
channel_errors = (StdChannelError,
socket.error,
IOError,
beanstalkc.SocketError,
beanstalkc.BeanstalkcException)
driver_type = 'beanstalk'
driver_name = 'beanstalkc'
def driver_version(self):
return beanstalkc.__version__
| bsd-3-clause | 180954b286483fa9dafe9c8edd1f3152 | 25.194444 | 69 | 0.552492 | 4.163355 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/celery/utils/mail.py | 1 | 5402 | # -*- coding: utf-8 -*-
"""
celery.utils.mail
~~~~~~~~~~~~~~~~~
How task error emails are formatted and sent.
"""
from __future__ import absolute_import
import sys
import smtplib
import socket
import traceback
import warnings
from email.mime.text import MIMEText
from .functional import maybe_list
from .imports import symbol_by_name
supports_timeout = sys.version_info >= (2, 6)
_local_hostname = None
def get_local_hostname():
global _local_hostname
if _local_hostname is None:
_local_hostname = socket.getfqdn()
return _local_hostname
class SendmailWarning(UserWarning):
"""Problem happened while sending the email message."""
class Message(object):
def __init__(self, to=None, sender=None, subject=None,
body=None, charset='us-ascii'):
self.to = maybe_list(to)
self.sender = sender
self.subject = subject
self.body = body
self.charset = charset
def __repr__(self):
return '<Email: To:%r Subject:%r>' % (self.to, self.subject)
def __str__(self):
msg = MIMEText(self.body, 'plain', self.charset)
msg['Subject'] = self.subject
msg['From'] = self.sender
msg['To'] = ', '.join(self.to)
return msg.as_string()
class Mailer(object):
supports_timeout = supports_timeout
def __init__(self, host='localhost', port=0, user=None, password=None,
timeout=2, use_ssl=False, use_tls=False):
self.host = host
self.port = port
self.user = user
self.password = password
self.timeout = timeout
self.use_ssl = use_ssl
self.use_tls = use_tls
def send(self, message, fail_silently=False):
try:
if self.supports_timeout:
self._send(message, timeout=self.timeout)
else:
import socket
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(self.timeout)
try:
self._send(message)
finally:
socket.setdefaulttimeout(old_timeout)
except Exception, exc:
if not fail_silently:
raise
warnings.warn(SendmailWarning(
'Mail could not be sent: %r %r\n%r' % (
exc, {'To': ', '.join(message.to),
'Subject': message.subject},
traceback.format_stack())))
def _send(self, message, **kwargs):
Client = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP
client = Client(self.host, self.port,
local_hostname=get_local_hostname(), **kwargs)
if self.use_tls:
client.ehlo()
client.starttls()
client.ehlo()
if self.user and self.password:
client.login(self.user, self.password)
client.sendmail(message.sender, message.to, str(message))
try:
client.quit()
except socket.sslerror:
client.close()
class ErrorMail(object):
"""Defines how and when task error e-mails should be sent.
:param task: The task instance that raised the error.
:attr:`subject` and :attr:`body` are format strings which
are passed a context containing the following keys:
* name
Name of the task.
* id
UUID of the task.
* exc
String representation of the exception.
* args
Positional arguments.
* kwargs
Keyword arguments.
* traceback
String representation of the traceback.
* hostname
Worker hostname.
"""
# pep8.py borks on a inline signature separator and
# says "trailing whitespace" ;)
EMAIL_SIGNATURE_SEP = '-- '
#: Format string used to generate error email subjects.
subject = """\
[celery@%(hostname)s] Error: Task %(name)s (%(id)s): %(exc)s
"""
#: Format string used to generate error email content.
body = """
Task %%(name)s with id %%(id)s raised exception:\n%%(exc)r
Task was called with args: %%(args)s kwargs: %%(kwargs)s.
The contents of the full traceback was:
%%(traceback)s
%(EMAIL_SIGNATURE_SEP)s
Just to let you know,
py-celery at %%(hostname)s.
""" % {'EMAIL_SIGNATURE_SEP': EMAIL_SIGNATURE_SEP}
error_whitelist = None
def __init__(self, task, **kwargs):
self.task = task
self.email_subject = kwargs.get('subject', self.subject)
self.email_body = kwargs.get('body', self.body)
self.error_whitelist = getattr(task, 'error_whitelist', None) or ()
def should_send(self, context, exc):
"""Returns true or false depending on if a task error mail
should be sent for this type of error."""
allow_classes = tuple(map(symbol_by_name, self.error_whitelist))
return not self.error_whitelist or isinstance(exc, allow_classes)
def format_subject(self, context):
return self.subject.strip() % context
def format_body(self, context):
return self.body.strip() % context
def send(self, context, exc, fail_silently=True):
if self.should_send(context, exc):
self.task.app.mail_admins(self.format_subject(context),
self.format_body(context),
fail_silently=fail_silently)
| bsd-3-clause | 64a7e4166c80bbfdd7af3157984165ef | 26.01 | 75 | 0.585894 | 4.076981 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/kombu/entity.py | 2 | 25059 | """
kombu.entity
================
Exchange and Queue declarations.
"""
from __future__ import absolute_import
from .abstract import MaybeChannelBound
TRANSIENT_DELIVERY_MODE = 1
PERSISTENT_DELIVERY_MODE = 2
DELIVERY_MODES = {'transient': TRANSIENT_DELIVERY_MODE,
'persistent': PERSISTENT_DELIVERY_MODE}
__all__ = ['Exchange', 'Queue']
def pretty_bindings(bindings):
return '[%s]' % (', '.join(map(str, bindings)))
class Exchange(MaybeChannelBound):
"""An Exchange declaration.
:keyword name: See :attr:`name`.
:keyword type: See :attr:`type`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword delivery_mode: See :attr:`delivery_mode`.
:keyword arguments: See :attr:`arguments`.
.. attribute:: name
Name of the exchange. Default is no name (the default exchange).
.. attribute:: type
AMQP defines four default exchange types (routing algorithms) that
covers most of the common messaging use cases. An AMQP broker can
also define additional exchange types, so see your broker
manual for more information about available exchange types.
* `direct` (*default*)
Direct match between the routing key in the message, and the
routing criteria used when a queue is bound to this exchange.
* `topic`
Wildcard match between the routing key and the routing pattern
specified in the exchange/queue binding. The routing key is
treated as zero or more words delimited by `"."` and
supports special wildcard characters. `"*"` matches a
single word and `"#"` matches zero or more words.
* `fanout`
Queues are bound to this exchange with no arguments. Hence any
message sent to this exchange will be forwarded to all queues
bound to this exchange.
* `headers`
Queues are bound to this exchange with a table of arguments
containing headers and values (optional). A special argument
named "x-match" determines the matching algorithm, where
`"all"` implies an `AND` (all pairs must match) and
`"any"` implies `OR` (at least one pair must match).
:attr:`arguments` is used to specify the arguments.
This description of AMQP exchange types was shamelessly stolen
from the blog post `AMQP in 10 minutes: Part 4`_ by
Rajith Attapattu. This article is recommended reading.
.. _`AMQP in 10 minutes: Part 4`:
http://bit.ly/amqp-exchange-types
.. attribute:: channel
The channel the exchange is bound to (if bound).
.. attribute:: durable
Durable exchanges remain active when a server restarts. Non-durable
exchanges (transient exchanges) are purged when a server restarts.
Default is :const:`True`.
.. attribute:: auto_delete
If set, the exchange is deleted when all queues have finished
using it. Default is :const:`False`.
.. attribute:: delivery_mode
The default delivery mode used for messages. The value is an integer,
or alias string.
* 1 or `"transient"`
The message is transient. Which means it is stored in
memory only, and is lost if the server dies or restarts.
* 2 or "persistent" (*default*)
The message is persistent. Which means the message is
stored both in-memory, and on disk, and therefore
preserved if the server dies or restarts.
The default value is 2 (persistent).
.. attribute:: arguments
Additional arguments to specify when the exchange is declared.
"""
TRANSIENT_DELIVERY_MODE = TRANSIENT_DELIVERY_MODE
PERSISTENT_DELIVERY_MODE = PERSISTENT_DELIVERY_MODE
name = ''
type = 'direct'
durable = True
auto_delete = False
delivery_mode = PERSISTENT_DELIVERY_MODE
attrs = (('name', None),
('type', None),
('arguments', None),
('durable', bool),
('auto_delete', bool),
('delivery_mode', lambda m: DELIVERY_MODES.get(m) or m))
def __init__(self, name='', type='', channel=None, **kwargs):
super(Exchange, self).__init__(**kwargs)
self.name = name or self.name
self.type = type or self.type
self.maybe_bind(channel)
def __hash__(self):
return hash('E|%s' % (self.name, ))
def declare(self, nowait=False, passive=False):
"""Declare the exchange.
Creates the exchange on the broker.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
return self.channel.exchange_declare(exchange=self.name,
type=self.type,
durable=self.durable,
auto_delete=self.auto_delete,
arguments=self.arguments,
nowait=nowait,
passive=passive)
def bind_to(self, exchange='', routing_key='',
arguments=None, nowait=False, **kwargs):
"""Binds the exchange to another exchange.
:keyword nowait: If set the server will not respond, and the call
will not block waiting for a response. Default is :const:`False`.
"""
if isinstance(exchange, Exchange):
exchange = exchange.name
return self.channel.exchange_bind(destination=self.name,
source=exchange,
routing_key=routing_key,
nowait=nowait,
arguments=arguments)
def unbind_from(self, source='', routing_key='', nowait=False,
arguments=None):
"""Delete previously created exchange binding from the server."""
if isinstance(source, Exchange):
source = source.name
return self.channel.exchange_unbind(destination=self.name,
source=source,
routing_key=routing_key,
nowait=nowait,
arguments=arguments)
def Message(self, body, delivery_mode=None, priority=None,
content_type=None, content_encoding=None, properties=None,
headers=None):
"""Create message instance to be sent with :meth:`publish`.
:param body: Message body.
:keyword delivery_mode: Set custom delivery mode. Defaults
to :attr:`delivery_mode`.
:keyword priority: Message priority, 0 to 9. (currently not
supported by RabbitMQ).
:keyword content_type: The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
:keyword content_encoding: The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
:keyword properties: Message properties.
:keyword headers: Message headers.
"""
properties = {} if properties is None else properties
dm = delivery_mode or self.delivery_mode
properties['delivery_mode'] = \
DELIVERY_MODES[dm] if (dm != 2 and dm != 1) else dm
return self.channel.prepare_message(body,
properties=properties,
priority=priority,
content_type=content_type,
content_encoding=content_encoding,
headers=headers)
def publish(self, message, routing_key=None, mandatory=False,
immediate=False, exchange=None):
"""Publish message.
:param message: :meth:`Message` instance to publish.
:param routing_key: Routing key.
:param mandatory: Currently not supported.
:param immediate: Currently not supported.
"""
exchange = exchange or self.name
return self.channel.basic_publish(message,
exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate)
def delete(self, if_unused=False, nowait=False):
"""Delete the exchange declaration on server.
:keyword if_unused: Delete only if the exchange has no bindings.
Default is :const:`False`.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
return self.channel.exchange_delete(exchange=self.name,
if_unused=if_unused,
nowait=nowait)
def __eq__(self, other):
if isinstance(other, Exchange):
return (self.name == other.name and
self.type == other.type and
self.arguments == other.arguments and
self.durable == other.durable and
self.auto_delete == other.auto_delete and
self.delivery_mode == other.delivery_mode)
return False
def __repr__(self):
return super(Exchange, self).__repr__(str(self))
def __str__(self):
return 'Exchange %s(%s)' % (self.name or repr(''), self.type)
@property
def can_cache_declaration(self):
return self.durable
class binding(object):
"""Represents a queue or exchange binding.
:keyword exchange: Exchange to bind to.
:keyword routing_key: Routing key used as binding key.
:keyword arguments: Arguments for bind operation.
:keyword unbind_arguments: Arguments for unbind operation.
"""
def __init__(self, exchange=None, routing_key='', arguments=None,
unbind_arguments=None):
self.exchange = exchange
self.routing_key = routing_key
self.arguments = arguments
self.unbind_arguments = unbind_arguments
def declare(self, channel, nowait=False):
"""Declare destination exchange."""
if self.exchange and self.exchange.name:
ex = self.exchange(channel)
ex.declare(nowait=nowait)
def bind(self, entity, nowait=False):
"""Bind entity to this binding."""
entity.bind_to(exchange=self.exchange,
routing_key=self.routing_key,
arguments=self.arguments,
nowait=nowait)
def unbind(self, entity, nowait=False):
"""Unbind entity from this binding."""
entity.unbind_from(self.exchange,
routing_key=self.routing_key,
arguments=self.unbind_arguments,
nowait=nowait)
def __repr__(self):
return '<binding: %s>' % (self, )
def __str__(self):
return '%s->%s' % (self.exchange.name, self.routing_key)
class Queue(MaybeChannelBound):
"""A Queue declaration.
:keyword name: See :attr:`name`.
:keyword exchange: See :attr:`exchange`.
:keyword routing_key: See :attr:`routing_key`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword exclusive: See :attr:`exclusive`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword queue_arguments: See :attr:`queue_arguments`.
:keyword binding_arguments: See :attr:`binding_arguments`.
.. attribute:: name
Name of the queue. Default is no name (default queue destination).
.. attribute:: exchange
The :class:`Exchange` the queue binds to.
.. attribute:: routing_key
The routing key (if any), also called *binding key*.
The interpretation of the routing key depends on
the :attr:`Exchange.type`.
* direct exchange
Matches if the routing key property of the message and
the :attr:`routing_key` attribute are identical.
* fanout exchange
Always matches, even if the binding does not have a key.
* topic exchange
Matches the routing key property of the message by a primitive
pattern matching scheme. The message routing key then consists
of words separated by dots (`"."`, like domain names), and
two special characters are available; star (`"*"`) and hash
(`"#"`). The star matches any word, and the hash matches
zero or more words. For example `"*.stock.#"` matches the
routing keys `"usd.stock"` and `"eur.stock.db"` but not
`"stock.nasdaq"`.
.. attribute:: channel
The channel the Queue is bound to (if bound).
.. attribute:: durable
Durable queues remain active when a server restarts.
Non-durable queues (transient queues) are purged if/when
a server restarts.
Note that durable queues do not necessarily hold persistent
messages, although it does not make sense to send
persistent messages to a transient queue.
Default is :const:`True`.
.. attribute:: exclusive
Exclusive queues may only be consumed from by the
current connection. Setting the 'exclusive' flag
always implies 'auto-delete'.
Default is :const:`False`.
.. attribute:: auto_delete
If set, the queue is deleted when all consumers have
finished using it. Last consumer can be cancelled
either explicitly or because its channel is closed. If
there was no consumer ever on the queue, it won't be
deleted.
.. attribute:: queue_arguments
Additional arguments used when declaring the queue.
.. attribute:: binding_arguments
Additional arguments used when binding the queue.
.. attribute:: alias
Unused in Kombu, but applications can take advantage of this.
For example to give alternate names to queues with automatically
generated queue names.
"""
name = ''
exchange = Exchange('')
routing_key = ''
durable = True
exclusive = False
auto_delete = False
no_ack = False
attrs = (('name', None),
('exchange', None),
('routing_key', None),
('queue_arguments', None),
('binding_arguments', None),
('durable', bool),
('exclusive', bool),
('auto_delete', bool),
('no_ack', None),
('alias', None),
('bindings', list))
def __init__(self, name='', exchange=None, routing_key='', channel=None,
bindings=None, **kwargs):
super(Queue, self).__init__(**kwargs)
self.name = name or self.name
self.exchange = exchange or self.exchange
self.routing_key = routing_key or self.routing_key
self.bindings = set(bindings or [])
# allows Queue('name', [binding(...), binding(...), ...])
if isinstance(exchange, (list, tuple, set)):
self.bindings |= set(exchange)
if self.bindings:
self.exchange = None
# exclusive implies auto-delete.
if self.exclusive:
self.auto_delete = True
self.maybe_bind(channel)
def __hash__(self):
return hash('Q|%s' % (self.name, ))
def when_bound(self):
if self.exchange:
self.exchange = self.exchange(self.channel)
def declare(self, nowait=False):
"""Declares the queue, the exchange and binds the queue to
the exchange."""
# - declare main binding.
if self.exchange:
self.exchange.declare(nowait)
self.queue_declare(nowait, passive=False)
if self.exchange is not None:
self.queue_bind(nowait)
# - declare extra/multi-bindings.
for B in self.bindings:
B.declare(self.channel)
B.bind(self, nowait=nowait)
return self.name
def queue_declare(self, nowait=False, passive=False):
"""Declare queue on the server.
:keyword nowait: Do not wait for a reply.
:keyword passive: If set, the server will not create the queue.
The client can use this to check whether a queue exists
without modifying the server state.
"""
ret = self.channel.queue_declare(queue=self.name,
passive=passive,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete,
arguments=self.queue_arguments,
nowait=nowait)
if not self.name:
self.name = ret[0]
return ret
def queue_bind(self, nowait=False):
"""Create the queue binding on the server."""
return self.bind_to(self.exchange, self.routing_key,
self.binding_arguments, nowait=nowait)
def bind_to(self, exchange='', routing_key='', arguments=None,
nowait=False):
if isinstance(exchange, Exchange):
exchange = exchange.name
return self.channel.queue_bind(queue=self.name,
exchange=exchange,
routing_key=routing_key,
arguments=arguments,
nowait=nowait)
def get(self, no_ack=None):
"""Poll the server for a new message.
Returns the message instance if a message was available,
or :const:`None` otherwise.
:keyword no_ack: If set messages received does not have to
be acknowledged.
This method provides direct access to the messages in a
queue using a synchronous dialogue, designed for
specific types of applications where synchronous functionality
is more important than performance.
"""
no_ack = self.no_ack if no_ack is None else no_ack
message = self.channel.basic_get(queue=self.name, no_ack=no_ack)
if message is not None:
m2p = getattr(self.channel, 'message_to_python', None)
if m2p:
message = m2p(message)
return message
def purge(self, nowait=False):
"""Remove all ready messages from the queue."""
return self.channel.queue_purge(queue=self.name,
nowait=nowait) or 0
def consume(self, consumer_tag='', callback=None, no_ack=None,
nowait=False):
"""Start a queue consumer.
Consumers last as long as the channel they were created on, or
until the client cancels them.
:keyword consumer_tag: Unique identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
:keyword no_ack: If set messages received does not have to
be acknowledged.
:keyword nowait: Do not wait for a reply.
:keyword callback: callback called for each delivered message
"""
if no_ack is None:
no_ack = self.no_ack
return self.channel.basic_consume(queue=self.name,
no_ack=no_ack,
consumer_tag=consumer_tag or '',
callback=callback,
nowait=nowait)
def cancel(self, consumer_tag):
"""Cancel a consumer by consumer tag."""
return self.channel.basic_cancel(consumer_tag)
def delete(self, if_unused=False, if_empty=False, nowait=False):
"""Delete the queue.
:keyword if_unused: If set, the server will only delete the queue
if it has no consumers. A channel error will be raised
if the queue has consumers.
:keyword if_empty: If set, the server will only delete the queue
if it is empty. If it is not empty a channel error will be raised.
:keyword nowait: Do not wait for a reply.
"""
return self.channel.queue_delete(queue=self.name,
if_unused=if_unused,
if_empty=if_empty,
nowait=nowait)
def queue_unbind(self, arguments=None, nowait=False):
return self.unbind_from(self.exchange, self.routing_key,
arguments, nowait)
def unbind_from(self, exchange='', routing_key='', arguments=None,
nowait=False):
"""Unbind queue by deleting the binding from the server."""
return self.channel.queue_unbind(queue=self.name,
exchange=exchange.name,
routing_key=routing_key,
arguments=arguments,
nowait=nowait)
def __eq__(self, other):
if isinstance(other, Queue):
return (self.name == other.name and
self.exchange == other.exchange and
self.routing_key == other.routing_key and
self.queue_arguments == other.queue_arguments and
self.binding_arguments == other.binding_arguments and
self.durable == other.durable and
self.exclusive == other.exclusive and
self.auto_delete == other.auto_delete)
return False
def __repr__(self):
s = super(Queue, self).__repr__
if self.bindings:
return s('Queue %r -> %s' % (
self.name,
pretty_bindings(self.bindings),
))
return s('Queue %r -> %s -> %r' % (
self.name,
self.exchange,
self.routing_key or '',
))
@property
def can_cache_declaration(self):
return self.durable
@classmethod
def from_dict(self, queue, **options):
binding_key = options.get('binding_key') or options.get('routing_key')
e_durable = options.get('exchange_durable')
if e_durable is None:
e_durable = options.get('durable')
e_auto_delete = options.get('exchange_auto_delete')
if e_auto_delete is None:
e_auto_delete = options.get('auto_delete')
q_durable = options.get('queue_durable')
if q_durable is None:
q_durable = options.get('durable')
q_auto_delete = options.get('queue_auto_delete')
if q_auto_delete is None:
q_auto_delete = options.get('auto_delete')
e_arguments = options.get('exchange_arguments')
q_arguments = options.get('queue_arguments')
b_arguments = options.get('binding_arguments')
bindings = options.get('bindings')
exchange = Exchange(options.get('exchange'),
type=options.get('exchange_type'),
delivery_mode=options.get('delivery_mode'),
routing_key=options.get('routing_key'),
durable=e_durable,
auto_delete=e_auto_delete,
arguments=e_arguments)
return Queue(queue,
exchange=exchange,
routing_key=binding_key,
durable=q_durable,
exclusive=options.get('exclusive'),
auto_delete=q_auto_delete,
no_ack=options.get('no_ack'),
queue_arguments=q_arguments,
binding_arguments=b_arguments,
bindings=bindings)
| bsd-3-clause | 0dadd86888c72ed20b7601400f9e45f2 | 35.960177 | 78 | 0.556726 | 4.755028 | false | false | false | false |
mozilla/firefox-flicks | flicks/base/helpers.py | 1 | 1424 | import json as json_mod
from django.utils.translation import get_language
from babel.core import Locale, UnknownLocaleError
from babel.dates import format_date
from jingo import register
from product_details import product_details
from flicks.base import regions
from flicks.base.util import absolutify as real_absolutify
def _babel_locale():
"""Return the current locale in Babel's format."""
try:
return Locale.parse(get_language(), sep='-')
except UnknownLocaleError:
# Default to en-US
return Locale('en', 'US')
@register.filter
def babel_date(date, format='long'):
"""Format a date properly for the current locale. Format can be one of
'short', 'medium', 'long', or 'full'.
"""
locale = _babel_locale()
return format_date(date, format, locale)
@register.filter
def json(data):
return json_mod.dumps(data)
@register.function
def country_name(country_code):
"""Return a localized version of a country's name."""
locale = get_language()
# product_details has no `es` regional information, so we us es-ES instead.
if locale == 'es':
locale = 'es-ES'
try:
return product_details.get_regions(locale)[country_code]
except KeyError:
return ''
@register.function
def absolutify(url):
return real_absolutify(url)
@register.function
def region_name(region):
return regions.get_region_name(region)
| bsd-3-clause | ccaff30fcd0f82ca34ce02b7acad3c57 | 23.135593 | 79 | 0.692416 | 3.660668 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/celery/datastructures.py | 1 | 12131 | # -*- coding: utf-8 -*-
"""
celery.datastructures
~~~~~~~~~~~~~~~~~~~~~
Custom types and data structures.
"""
from __future__ import absolute_import
from __future__ import with_statement
import sys
import time
from collections import defaultdict
from itertools import chain
from billiard.einfo import ExceptionInfo # noqa
from kombu.utils.limits import TokenBucket # noqa
from .utils.functional import LRUCache, first, uniq # noqa
class CycleError(Exception):
"""A cycle was detected in an acyclic graph."""
class DependencyGraph(object):
"""A directed acyclic graph of objects and their dependencies.
Supports a robust topological sort
to detect the order in which they must be handled.
Takes an optional iterator of ``(obj, dependencies)``
tuples to build the graph from.
.. warning::
Does not support cycle detection.
"""
def __init__(self, it=None):
self.adjacent = {}
if it is not None:
self.update(it)
def add_arc(self, obj):
"""Add an object to the graph."""
self.adjacent.setdefault(obj, [])
def add_edge(self, A, B):
"""Add an edge from object ``A`` to object ``B``
(``A`` depends on ``B``)."""
self[A].append(B)
def topsort(self):
"""Sort the graph topologically.
:returns: a list of objects in the order
in which they must be handled.
"""
graph = DependencyGraph()
components = self._tarjan72()
NC = dict((node, component)
for component in components
for node in component)
for component in components:
graph.add_arc(component)
for node in self:
node_c = NC[node]
for successor in self[node]:
successor_c = NC[successor]
if node_c != successor_c:
graph.add_edge(node_c, successor_c)
return [t[0] for t in graph._khan62()]
def valency_of(self, obj):
"""Returns the velency (degree) of a vertex in the graph."""
try:
l = [len(self[obj])]
except KeyError:
return 0
for node in self[obj]:
l.append(self.valency_of(node))
return sum(l)
def update(self, it):
"""Update the graph with data from a list
of ``(obj, dependencies)`` tuples."""
tups = list(it)
for obj, _ in tups:
self.add_arc(obj)
for obj, deps in tups:
for dep in deps:
self.add_edge(obj, dep)
def edges(self):
"""Returns generator that yields for all edges in the graph."""
return (obj for obj, adj in self.iteritems() if adj)
def _khan62(self):
"""Khans simple topological sort algorithm from '62
See http://en.wikipedia.org/wiki/Topological_sorting
"""
count = defaultdict(lambda: 0)
result = []
for node in self:
for successor in self[node]:
count[successor] += 1
ready = [node for node in self if not count[node]]
while ready:
node = ready.pop()
result.append(node)
for successor in self[node]:
count[successor] -= 1
if count[successor] == 0:
ready.append(successor)
result.reverse()
return result
def _tarjan72(self):
"""Tarjan's algorithm to find strongly connected components.
See http://bit.ly/vIMv3h.
"""
result, stack, low = [], [], {}
def visit(node):
if node in low:
return
num = len(low)
low[node] = num
stack_pos = len(stack)
stack.append(node)
for successor in self[node]:
visit(successor)
low[node] = min(low[node], low[successor])
if num == low[node]:
component = tuple(stack[stack_pos:])
stack[stack_pos:] = []
result.append(component)
for item in component:
low[item] = len(self)
for node in self:
visit(node)
return result
def to_dot(self, fh, ws=' ' * 4):
"""Convert the graph to DOT format.
:param fh: A file, or a file-like object to write the graph to.
"""
fh.write('digraph dependencies {\n')
for obj, adjacent in self.iteritems():
if not adjacent:
fh.write(ws + '"%s"\n' % (obj, ))
for req in adjacent:
fh.write(ws + '"%s" -> "%s"\n' % (obj, req))
fh.write('}\n')
def __iter__(self):
return iter(self.adjacent)
def __getitem__(self, node):
return self.adjacent[node]
def __len__(self):
return len(self.adjacent)
def __contains__(self, obj):
return obj in self.adjacent
def _iterate_items(self):
return self.adjacent.iteritems()
items = iteritems = _iterate_items
def __repr__(self):
return '\n'.join(self.repr_node(N) for N in self)
def repr_node(self, obj, level=1):
output = ['%s(%s)' % (obj, self.valency_of(obj))]
if obj in self:
for other in self[obj]:
d = '%s(%s)' % (other, self.valency_of(other))
output.append(' ' * level + d)
output.extend(self.repr_node(other, level + 1).split('\n')[1:])
return '\n'.join(output)
class AttributeDictMixin(object):
"""Adds attribute access to mappings.
`d.key -> d[key]`
"""
def __getattr__(self, k):
"""`d.key -> d[key]`"""
try:
return self[k]
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (type(self).__name__, k))
def __setattr__(self, key, value):
"""`d[key] = value -> d.key = value`"""
self[key] = value
class AttributeDict(dict, AttributeDictMixin):
"""Dict subclass with attribute access."""
pass
class DictAttribute(object):
"""Dict interface to attributes.
`obj[k] -> obj.k`
"""
obj = None
def __init__(self, obj):
object.__setattr__(self, 'obj', obj)
def __getattr__(self, key):
return getattr(self.obj, key)
def __setattr__(self, key, value):
return setattr(self.obj, key, value)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def __getitem__(self, key):
try:
return getattr(self.obj, key)
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
setattr(self.obj, key, value)
def __contains__(self, key):
return hasattr(self.obj, key)
def _iterate_keys(self):
return iter(dir(self.obj))
iterkeys = _iterate_keys
def __iter__(self):
return self._iterate_keys()
def _iterate_items(self):
for key in self._iterate_keys():
yield key, getattr(self.obj, key)
iteritems = _iterate_items
if sys.version_info[0] == 3: # pragma: no cover
items = _iterate_items
keys = _iterate_keys
else:
def keys(self):
return list(self)
def items(self):
return list(self._iterate_items())
class ConfigurationView(AttributeDictMixin):
"""A view over an applications configuration dicts.
If the key does not exist in ``changes``, the ``defaults`` dict
is consulted.
:param changes: Dict containing changes to the configuration.
:param defaults: Dict containing the default configuration.
"""
changes = None
defaults = None
_order = None
def __init__(self, changes, defaults):
self.__dict__.update(changes=changes, defaults=defaults,
_order=[changes] + defaults)
def add_defaults(self, d):
self.defaults.insert(0, d)
self._order.insert(1, d)
def __getitem__(self, key):
for d in self._order:
try:
return d[key]
except KeyError:
pass
raise KeyError(key)
def __setitem__(self, key, value):
self.changes[key] = value
def first(self, *keys):
return first(None, (self.get(key) for key in keys))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def update(self, *args, **kwargs):
return self.changes.update(*args, **kwargs)
def __contains__(self, key):
for d in self._order:
if key in d:
return True
return False
def __repr__(self):
return repr(dict(self.iteritems()))
def __iter__(self):
return self._iterate_keys()
def _iter(self, op):
# defaults must be first in the stream, so values in
# changes takes precedence.
return chain(*[op(d) for d in reversed(self._order)])
def _iterate_keys(self):
return uniq(self._iter(lambda d: d))
iterkeys = _iterate_keys
def _iterate_items(self):
return ((key, self[key]) for key in self)
iteritems = _iterate_items
def _iterate_values(self):
return (self[key] for key in self)
itervalues = _iterate_values
def keys(self):
return list(self._iterate_keys())
def items(self):
return list(self._iterate_items())
def values(self):
return list(self._iterate_values())
class LimitedSet(object):
"""Kind-of Set with limitations.
Good for when you need to test for membership (`a in set`),
but the list might become to big, so you want to limit it so it doesn't
consume too much resources.
:keyword maxlen: Maximum number of members before we start
evicting expired members.
:keyword expires: Time in seconds, before a membership expires.
"""
__slots__ = ('maxlen', 'expires', '_data', '__len__')
def __init__(self, maxlen=None, expires=None):
self.maxlen = maxlen
self.expires = expires
self._data = {}
self.__len__ = self._data.__len__
def add(self, value):
"""Add a new member."""
self._expire_item()
self._data[value] = time.time()
def clear(self):
"""Remove all members"""
self._data.clear()
def pop_value(self, value):
"""Remove membership by finding value."""
self._data.pop(value, None)
def _expire_item(self):
"""Hunt down and remove an expired item."""
while 1:
if self.maxlen and len(self) >= self.maxlen:
value, when = self.first
if not self.expires or time.time() > when + self.expires:
try:
self.pop_value(value)
except TypeError: # pragma: no cover
continue
break
def __contains__(self, value):
return value in self._data
def update(self, other):
if isinstance(other, self.__class__):
self._data.update(other._data)
else:
for obj in other:
self.add(obj)
def as_dict(self):
return self._data
def __iter__(self):
return iter(self._data)
def __repr__(self):
return 'LimitedSet(%r)' % (list(self._data), )
@property
def chronologically(self):
return sorted(self._data.items(), key=lambda (value, when): when)
@property
def first(self):
"""Get the oldest member."""
return self.chronologically[0]
| bsd-3-clause | 0072ccf3dd756a175db7cd451ae1f0c0 | 25.544858 | 79 | 0.542247 | 4.098311 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/celery/app/amqp.py | 1 | 14390 | # -*- coding: utf-8 -*-
"""
celery.app.amqp
~~~~~~~~~~~~~~~
Sending and receiving messages using Kombu.
"""
from __future__ import absolute_import
from datetime import timedelta
from weakref import WeakValueDictionary
from kombu import Connection, Consumer, Exchange, Producer, Queue
from kombu.common import entry_to_queue
from kombu.pools import ProducerPool
from kombu.utils import cached_property, uuid
from kombu.utils.encoding import safe_repr
from celery import signals
from celery.utils.text import indent as textindent
from . import app_or_default
from . import routes as _routes
#: Human readable queue declaration.
QUEUE_FORMAT = """
. %(name)s exchange:%(exchange)s(%(exchange_type)s) binding:%(routing_key)s
"""
class Queues(dict):
"""Queue name⇒ declaration mapping.
:param queues: Initial list/tuple or dict of queues.
:keyword create_missing: By default any unknown queues will be
added automatically, but if disabled
the occurrence of unknown queues
in `wanted` will raise :exc:`KeyError`.
:keyword ha_policy: Default HA policy for queues with none set.
"""
#: If set, this is a subset of queues to consume from.
#: The rest of the queues are then used for routing only.
_consume_from = None
def __init__(self, queues=None, default_exchange=None,
create_missing=True, ha_policy=None):
dict.__init__(self)
self.aliases = WeakValueDictionary()
self.default_exchange = default_exchange
self.create_missing = create_missing
self.ha_policy = ha_policy
if isinstance(queues, (tuple, list)):
queues = dict((q.name, q) for q in queues)
for name, q in (queues or {}).iteritems():
self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q)
def __getitem__(self, name):
try:
return self.aliases[name]
except KeyError:
return dict.__getitem__(self, name)
def __setitem__(self, name, queue):
if self.default_exchange and (not queue.exchange or
not queue.exchange.name):
queue.exchange = self.default_exchange
dict.__setitem__(self, name, queue)
if queue.alias:
self.aliases[queue.alias] = queue
def __missing__(self, name):
if self.create_missing:
return self.add(self.new_missing(name))
raise KeyError(name)
def add(self, queue, **kwargs):
"""Add new queue.
:param queue: Name of the queue.
:keyword exchange: Name of the exchange.
:keyword routing_key: Binding key.
:keyword exchange_type: Type of exchange.
:keyword \*\*options: Additional declaration options.
"""
if not isinstance(queue, Queue):
return self.add_compat(queue, **kwargs)
if self.ha_policy:
if queue.queue_arguments is None:
queue.queue_arguments = {}
self._set_ha_policy(queue.queue_arguments)
self[queue.name] = queue
return queue
def add_compat(self, name, **options):
# docs used to use binding_key as routing key
options.setdefault('routing_key', options.get('binding_key'))
if options['routing_key'] is None:
options['routing_key'] = name
if self.ha_policy is not None:
self._set_ha_policy(options.setdefault('queue_arguments', {}))
q = self[name] = entry_to_queue(name, **options)
return q
def _set_ha_policy(self, args):
policy = self.ha_policy
if isinstance(policy, (list, tuple)):
return args.update({'x-ha-policy': 'nodes',
'x-ha-policy-params': list(policy)})
args['x-ha-policy'] = policy
def format(self, indent=0, indent_first=True):
"""Format routing table into string for log dumps."""
active = self.consume_from
if not active:
return ''
info = [
QUEUE_FORMAT.strip() % {
'name': (name + ':').ljust(12),
'exchange': q.exchange.name,
'exchange_type': q.exchange.type,
'routing_key': q.routing_key}
for name, q in sorted(active.iteritems())]
if indent_first:
return textindent('\n'.join(info), indent)
return info[0] + '\n' + textindent('\n'.join(info[1:]), indent)
def select_add(self, queue, **kwargs):
"""Add new task queue that will be consumed from even when
a subset has been selected using the :option:`-Q` option."""
q = self.add(queue, **kwargs)
if self._consume_from is not None:
self._consume_from[q.name] = q
return q
def select_subset(self, wanted):
"""Sets :attr:`consume_from` by selecting a subset of the
currently defined queues.
:param wanted: List of wanted queue names.
"""
if wanted:
self._consume_from = dict((name, self[name]) for name in wanted)
def select_remove(self, queue):
if self._consume_from is None:
self.select_subset(k for k in self if k != queue)
else:
self._consume_from.pop(queue, None)
def new_missing(self, name):
return Queue(name, Exchange(name), name)
@property
def consume_from(self):
if self._consume_from is not None:
return self._consume_from
return self
class TaskProducer(Producer):
app = None
auto_declare = False
retry = False
retry_policy = None
def __init__(self, channel=None, exchange=None, *args, **kwargs):
self.retry = kwargs.pop('retry', self.retry)
self.retry_policy = kwargs.pop('retry_policy',
self.retry_policy or {})
exchange = exchange or self.exchange
self.queues = self.app.amqp.queues # shortcut
self.default_queue = self.app.amqp.default_queue
super(TaskProducer, self).__init__(channel, exchange, *args, **kwargs)
def publish_task(self, task_name, task_args=None, task_kwargs=None,
countdown=None, eta=None, task_id=None, group_id=None,
taskset_id=None, # compat alias to group_id
expires=None, exchange=None, exchange_type=None,
event_dispatcher=None, retry=None, retry_policy=None,
queue=None, now=None, retries=0, chord=None,
callbacks=None, errbacks=None, routing_key=None,
serializer=None, delivery_mode=None, compression=None,
declare=None, **kwargs):
"""Send task message."""
qname = queue
if queue is None and exchange is None:
queue = self.default_queue
if queue is not None:
if isinstance(queue, basestring):
qname, queue = queue, self.queues[queue]
else:
qname = queue.name
exchange = exchange or queue.exchange.name
routing_key = routing_key or queue.routing_key
declare = declare or ([queue] if queue else [])
# merge default and custom policy
retry = self.retry if retry is None else retry
_rp = (dict(self.retry_policy, **retry_policy) if retry_policy
else self.retry_policy)
task_id = task_id or uuid()
task_args = task_args or []
task_kwargs = task_kwargs or {}
if not isinstance(task_args, (list, tuple)):
raise ValueError('task args must be a list or tuple')
if not isinstance(task_kwargs, dict):
raise ValueError('task kwargs must be a dictionary')
if countdown: # Convert countdown to ETA.
now = now or self.app.now()
eta = now + timedelta(seconds=countdown)
if isinstance(expires, (int, float)):
now = now or self.app.now()
expires = now + timedelta(seconds=expires)
eta = eta and eta.isoformat()
expires = expires and expires.isoformat()
body = {
'task': task_name,
'id': task_id,
'args': task_args,
'kwargs': task_kwargs,
'retries': retries or 0,
'eta': eta,
'expires': expires,
'utc': self.utc,
'callbacks': callbacks,
'errbacks': errbacks,
'taskset': group_id or taskset_id,
'chord': chord,
}
self.publish(
body,
exchange=exchange, routing_key=routing_key,
serializer=serializer or self.serializer,
compression=compression or self.compression,
retry=retry, retry_policy=_rp,
delivery_mode=delivery_mode, declare=declare,
**kwargs
)
signals.task_sent.send(sender=task_name, **body)
if event_dispatcher:
exname = exchange or self.exchange
if isinstance(exname, Exchange):
exname = exname.name
event_dispatcher.send(
'task-sent', uuid=task_id,
name=task_name,
args=safe_repr(task_args),
kwargs=safe_repr(task_kwargs),
retries=retries,
eta=eta,
expires=expires,
queue=qname,
exchange=exname,
routing_key=routing_key,
)
return task_id
delay_task = publish_task # XXX Compat
class TaskPublisher(TaskProducer):
"""Deprecated version of :class:`TaskProducer`."""
def __init__(self, channel=None, exchange=None, *args, **kwargs):
self.app = app_or_default(kwargs.pop('app', self.app))
self.retry = kwargs.pop('retry', self.retry)
self.retry_policy = kwargs.pop('retry_policy',
self.retry_policy or {})
exchange = exchange or self.exchange
if not isinstance(exchange, Exchange):
exchange = Exchange(exchange,
kwargs.pop('exchange_type', 'direct'))
self.queues = self.app.amqp.queues # shortcut
super(TaskPublisher, self).__init__(channel, exchange, *args, **kwargs)
class TaskConsumer(Consumer):
app = None
def __init__(self, channel, queues=None, app=None, **kw):
self.app = app or self.app
super(TaskConsumer, self).__init__(
channel,
queues or self.app.amqp.queues.consume_from.values(), **kw
)
class AMQP(object):
Connection = Connection
Consumer = Consumer
#: compat alias to Connection
BrokerConnection = Connection
#: Cached and prepared routing table.
_rtable = None
#: Underlying producer pool instance automatically
#: set by the :attr:`producer_pool`.
_producer_pool = None
def __init__(self, app):
self.app = app
def flush_routes(self):
self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES)
def Queues(self, queues, create_missing=None, ha_policy=None):
"""Create new :class:`Queues` instance, using queue defaults
from the current configuration."""
conf = self.app.conf
if create_missing is None:
create_missing = conf.CELERY_CREATE_MISSING_QUEUES
if ha_policy is None:
ha_policy = conf.CELERY_QUEUE_HA_POLICY
if not queues and conf.CELERY_DEFAULT_QUEUE:
queues = (Queue(conf.CELERY_DEFAULT_QUEUE,
exchange=self.default_exchange,
routing_key=conf.CELERY_DEFAULT_ROUTING_KEY), )
return Queues(queues, self.default_exchange, create_missing, ha_policy)
def Router(self, queues=None, create_missing=None):
"""Returns the current task router."""
return _routes.Router(self.routes, queues or self.queues,
self.app.either('CELERY_CREATE_MISSING_QUEUES',
create_missing), app=self.app)
@cached_property
def TaskConsumer(self):
"""Return consumer configured to consume from the queues
we are configured for (``app.amqp.queues.consume_from``)."""
return self.app.subclass_with_self(TaskConsumer,
reverse='amqp.TaskConsumer')
get_task_consumer = TaskConsumer # XXX compat
@cached_property
def TaskProducer(self):
"""Returns publisher used to send tasks.
You should use `app.send_task` instead.
"""
conf = self.app.conf
return self.app.subclass_with_self(
TaskProducer,
reverse='amqp.TaskProducer',
exchange=self.default_exchange,
routing_key=conf.CELERY_DEFAULT_ROUTING_KEY,
serializer=conf.CELERY_TASK_SERIALIZER,
compression=conf.CELERY_MESSAGE_COMPRESSION,
retry=conf.CELERY_TASK_PUBLISH_RETRY,
retry_policy=conf.CELERY_TASK_PUBLISH_RETRY_POLICY,
utc=conf.CELERY_ENABLE_UTC,
)
TaskPublisher = TaskProducer # compat
@cached_property
def default_queue(self):
return self.queues[self.app.conf.CELERY_DEFAULT_QUEUE]
@cached_property
def queues(self):
"""Queue name⇒ declaration mapping."""
return self.Queues(self.app.conf.CELERY_QUEUES)
@queues.setter # noqa
def queues(self, queues):
return self.Queues(queues)
@property
def routes(self):
if self._rtable is None:
self.flush_routes()
return self._rtable
@cached_property
def router(self):
return self.Router()
@property
def producer_pool(self):
if self._producer_pool is None:
self._producer_pool = ProducerPool(
self.app.pool,
limit=self.app.pool.limit,
Producer=self.TaskProducer,
)
return self._producer_pool
publisher_pool = producer_pool # compat alias
@cached_property
def default_exchange(self):
return Exchange(self.app.conf.CELERY_DEFAULT_EXCHANGE,
self.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE)
| bsd-3-clause | bcff9659f1ff53bea640519964d809ff | 34.875312 | 79 | 0.579174 | 4.207663 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/celery/bin/celeryd.py | 1 | 6626 | # -*- coding: utf-8 -*-
"""
The :program:`celery worker` command (previously known as ``celeryd``)
.. program:: celery worker
.. seealso::
See :ref:`preload-options`.
.. cmdoption:: -c, --concurrency
Number of child processes processing the queue. The default
is the number of CPUs available on your system.
.. cmdoption:: -P, --pool
Pool implementation:
processes (default), eventlet, gevent, solo or threads.
.. cmdoption:: -f, --logfile
Path to log file. If no logfile is specified, `stderr` is used.
.. cmdoption:: -l, --loglevel
Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
`ERROR`, `CRITICAL`, or `FATAL`.
.. cmdoption:: -n, --hostname
Set custom hostname, e.g. 'foo.example.com'.
.. cmdoption:: -B, --beat
Also run the `celerybeat` periodic task scheduler. Please note that
there must only be one instance of this service.
.. cmdoption:: -Q, --queues
List of queues to enable for this worker, separated by comma.
By default all configured queues are enabled.
Example: `-Q video,image`
.. cmdoption:: -I, --include
Comma separated list of additional modules to import.
Example: -I foo.tasks,bar.tasks
.. cmdoption:: -s, --schedule
Path to the schedule database if running with the `-B` option.
Defaults to `celerybeat-schedule`. The extension ".db" may be
appended to the filename.
.. cmdoption:: --scheduler
Scheduler class to use. Default is celery.beat.PersistentScheduler
.. cmdoption:: -S, --statedb
Path to the state database. The extension '.db' may
be appended to the filename. Default: %(default)s
.. cmdoption:: -E, --events
Send events that can be captured by monitors like :program:`celeryev`,
`celerymon`, and others.
.. cmdoption:: --purge
Purges all waiting tasks before the daemon is started.
**WARNING**: This is unrecoverable, and the tasks will be
deleted from the messaging server.
.. cmdoption:: --time-limit
Enables a hard time limit (in seconds int/float) for tasks.
.. cmdoption:: --soft-time-limit
Enables a soft time limit (in seconds int/float) for tasks.
.. cmdoption:: --maxtasksperchild
Maximum number of tasks a pool worker can execute before it's
terminated and replaced by a new worker.
.. cmdoption:: --pidfile
Optional file used to store the workers pid.
The worker will not start if this file already exists
and the pid is still alive.
.. cmdoption:: --autoscale
Enable autoscaling by providing
max_concurrency, min_concurrency. Example::
--autoscale=10,3
(always keep 3 processes, but grow to 10 if necessary)
.. cmdoption:: --autoreload
Enable autoreloading.
.. cmdoption:: --no-execv
Don't do execv after multiprocessing child fork.
"""
from __future__ import absolute_import
import sys
from celery import concurrency
from celery.bin.base import Command, Option
from celery.utils.log import LOG_LEVELS, mlevel
class WorkerCommand(Command):
doc = __doc__ # parse help from this.
namespace = 'celeryd'
enable_config_from_cmdline = True
supports_args = False
def execute_from_commandline(self, argv=None):
if argv is None:
argv = list(sys.argv)
return super(WorkerCommand, self).execute_from_commandline(argv)
def run(self, *args, **kwargs):
kwargs.pop('app', None)
# Pools like eventlet/gevent needs to patch libs as early
# as possible.
kwargs['pool_cls'] = concurrency.get_implementation(
kwargs.get('pool_cls') or self.app.conf.CELERYD_POOL)
if self.app.IS_WINDOWS and kwargs.get('beat'):
self.die('-B option does not work on Windows. '
'Please run celerybeat as a separate service.')
loglevel = kwargs.get('loglevel')
if loglevel:
try:
kwargs['loglevel'] = mlevel(loglevel)
except KeyError: # pragma: no cover
self.die('Unknown level %r. Please use one of %s.' % (
loglevel, '|'.join(l for l in LOG_LEVELS
if isinstance(l, basestring))))
return self.app.Worker(**kwargs).run()
def with_pool_option(self, argv):
# this command support custom pools
# that may have to be loaded as early as possible.
return (['-P'], ['--pool'])
def get_options(self):
conf = self.app.conf
return (
Option('-c', '--concurrency',
default=conf.CELERYD_CONCURRENCY, type='int'),
Option('-P', '--pool', default=conf.CELERYD_POOL, dest='pool_cls'),
Option('--purge', '--discard', default=False, action='store_true'),
Option('-f', '--logfile', default=conf.CELERYD_LOG_FILE),
Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL),
Option('-n', '--hostname'),
Option('-B', '--beat', action='store_true'),
Option('-s', '--schedule', dest='schedule_filename',
default=conf.CELERYBEAT_SCHEDULE_FILENAME),
Option('--scheduler', dest='scheduler_cls'),
Option('-S', '--statedb',
default=conf.CELERYD_STATE_DB, dest='state_db'),
Option('-E', '--events', default=conf.CELERY_SEND_EVENTS,
action='store_true', dest='send_events'),
Option('--time-limit', type='float', dest='task_time_limit',
default=conf.CELERYD_TASK_TIME_LIMIT),
Option('--soft-time-limit', dest='task_soft_time_limit',
default=conf.CELERYD_TASK_SOFT_TIME_LIMIT, type='float'),
Option('--maxtasksperchild', dest='max_tasks_per_child',
default=conf.CELERYD_MAX_TASKS_PER_CHILD, type='int'),
Option('--queues', '-Q', default=[]),
Option('--include', '-I', default=[]),
Option('--pidfile'),
Option('--autoscale'),
Option('--autoreload', action='store_true'),
Option('--no-execv', action='store_true', default=False),
)
def main():
# Fix for setuptools generated scripts, so that it will
# work with multiprocessing fork emulation.
# (see multiprocessing.forking.get_preparation_data())
if __name__ != '__main__': # pragma: no cover
sys.modules['__main__'] = sys.modules[__name__]
from billiard import freeze_support
freeze_support()
worker = WorkerCommand()
worker.execute_from_commandline()
if __name__ == '__main__': # pragma: no cover
main()
| bsd-3-clause | 2bb02216db75d520b08d81ca31d73ff3 | 31.165049 | 79 | 0.613945 | 4.010896 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/djcelery/backends/database.py | 3 | 1934 | from __future__ import absolute_import
from celery import current_app
from celery.backends.base import BaseDictBackend
from celery.utils.timeutils import maybe_timedelta
from ..models import TaskMeta, TaskSetMeta
class DatabaseBackend(BaseDictBackend):
"""The database backend.
Using Django models to store task state.
"""
TaskModel = TaskMeta
TaskSetModel = TaskSetMeta
expires = current_app.conf.CELERY_TASK_RESULT_EXPIRES
create_django_tables = True
subpolling_interval = 0.5
def _store_result(self, task_id, result, status, traceback=None):
"""Store return value and status of an executed task."""
self.TaskModel._default_manager.store_result(task_id, result, status,
traceback=traceback, children=self.current_task_children())
return result
def _save_group(self, group_id, result):
"""Store the result of an executed group."""
self.TaskSetModel._default_manager.store_result(group_id, result)
return result
def _get_task_meta_for(self, task_id):
"""Get task metadata for a task by id."""
return self.TaskModel._default_manager.get_task(task_id).to_dict()
def _restore_group(self, group_id):
"""Get group metadata for a group by id."""
meta = self.TaskSetModel._default_manager.restore_taskset(group_id)
if meta:
return meta.to_dict()
def _delete_group(self, group_id):
self.TaskSetModel._default_manager.delete_taskset(group_id)
def _forget(self, task_id):
try:
self.TaskModel._default_manager.get(task_id=task_id).delete()
except self.TaskModel.DoesNotExist:
pass
def cleanup(self):
"""Delete expired metadata."""
expires = maybe_timedelta(self.expires)
for model in self.TaskModel, self.TaskSetModel:
model._default_manager.delete_expired(expires)
| bsd-3-clause | 93663a4dd2dc4043207274310b48429d | 32.344828 | 79 | 0.664943 | 3.95501 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/requests/auth.py | 15 | 5258 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import time
import hashlib
import logging
from base64 import b64encode
from .compat import urlparse, str
from .utils import parse_dict_header
log = logging.getLogger(__name__)
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1')
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authenetication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm', 'MD5')
opaque = self.chal.get('opaque', None)
algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if algorithm == 'MD5':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
# XXX MD5-sess
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, hash_utf8(A2))
respdig = KD(hash_utf8(A1), noncebit)
elif qop is None:
respdig = KD(hash_utf8(A1), "%s:%s" % (nonce, hash_utf8(A2)))
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if entdig:
base += ', digest="%s"' % entdig
base += ', algorithm="%s"' % algorithm
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_401(self, r):
"""Takes the given response and tries digest-auth, if needed."""
num_401_calls = r.request.hooks['response'].count(self.handle_401)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
self.chal = parse_dict_header(s_auth.replace('Digest ', ''))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
r.request.headers['Authorization'] = self.build_digest_header(r.request.method, r.request.url)
_r = r.connection.send(r.request)
_r.history.append(r)
return _r
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
r.register_hook('response', self.handle_401)
return r
| bsd-3-clause | 242dd681fde61f57715253f7f85a9e8b | 30.297619 | 107 | 0.559338 | 3.726435 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/billiard/queues.py | 1 | 10800 | #
# Module implementing queues
#
# multiprocessing/queues.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
from __future__ import with_statement
__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']
import sys
import os
import threading
import collections
import time
import weakref
import errno
from Queue import Empty, Full
from . import Pipe
from ._ext import _billiard
from .synchronize import Lock, BoundedSemaphore, Semaphore, Condition
from .util import debug, error, info, Finalize, register_after_fork
from .forking import assert_spawning
class Queue(object):
'''
Queue type using a pipe, buffer and thread
'''
def __init__(self, maxsize=0):
if maxsize <= 0:
maxsize = _billiard.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
# For use by concurrent.futures
self._ignore_epipe = False
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
def __getstate__(self):
assert_spawning(self)
return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
def __setstate__(self, state):
(self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) = state
self._after_fork()
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
self._joincancelled = False
self._closed = False
self._close = None
self._send = self._writer.send
self._recv = self._reader.recv
self._poll = self._reader.poll
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full
with self._notempty:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._notempty.notify()
def get(self, block=True, timeout=None):
if block and timeout is None:
with self._rlock:
res = self._recv()
self._sem.release()
return res
else:
if block:
deadline = time.time() + timeout
if not self._rlock.acquire(block, timeout):
raise Empty
try:
if block:
timeout = deadline - time.time()
if timeout < 0 or not self._poll(timeout):
raise Empty
elif not self._poll():
raise Empty
res = self._recv()
self._sem.release()
return res
finally:
self._rlock.release()
def qsize(self):
# Raises NotImplementedError on Mac OSX because
# of broken sem_getvalue()
return self._maxsize - self._sem._semlock._get_value()
def empty(self):
return not self._poll()
def full(self):
return self._sem._semlock._is_zero()
def get_nowait(self):
return self.get(False)
def put_nowait(self, obj):
return self.put(obj, False)
def close(self):
self._closed = True
self._reader.close()
if self._close:
self._close()
def join_thread(self):
debug('Queue.join_thread()')
assert self._closed
if self._jointhread:
self._jointhread()
def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
try:
self._jointhread.cancel()
except AttributeError:
pass
def _start_thread(self):
debug('Queue._start_thread()')
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
target=Queue._feed,
args=(self._buffer, self._notempty, self._send,
self._wlock, self._writer.close, self._ignore_epipe),
name='QueueFeederThread'
)
self._thread.daemon = True
debug('doing self._thread.start()')
self._thread.start()
debug('... done self._thread.start()')
# On process exit we will wait for data to be flushed to pipe.
#
# However, if this process created the queue then all
# processes which use the queue will be descendants of this
# process. Therefore waiting for the queue to be flushed
# is pointless once all the child processes have been joined.
created_by_this_process = (self._opid == os.getpid())
if not self._joincancelled and not created_by_this_process:
self._jointhread = Finalize(
self._thread, Queue._finalize_join,
[weakref.ref(self._thread)],
exitpriority=-5
)
# Send sentinel to the thread queue object when garbage collected
self._close = Finalize(
self, Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10
)
@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
thread = twr()
if thread is not None:
thread.join()
debug('... queue thread joined')
else:
debug('... queue thread already dead')
@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
with notempty:
buffer.append(_sentinel)
notempty.notify()
@staticmethod
def _feed(buffer, notempty, send, writelock, close, ignore_epipe):
debug('starting thread to feed data to pipe')
from .util import is_exiting
ncond = notempty
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wlock = writelock
else:
wlock = None
try:
while 1:
with ncond:
if not buffer:
nwait()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
close()
return
if wlock is None:
send(obj)
else:
with wlock:
send(obj)
except IndexError:
pass
except Exception, e:
if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
return
# Since this runs in a daemon thread the resources it uses
# may be become unusable while the process is cleaning up.
# We ignore errors which happen after the process has
# started to cleanup.
try:
if is_exiting():
info('error in queue thread: %r', e, exc_info=True)
else:
if not error('error in queue thread: %r', e,
exc_info=True):
import traceback
traceback.print_exc()
except Exception:
pass
_sentinel = object()
class JoinableQueue(Queue):
'''
A queue type which also supports join() and task_done() methods
Note that if you do not call task_done() for each finished task then
eventually the counter's semaphore may overflow causing Bad Things
to happen.
'''
def __init__(self, maxsize=0):
Queue.__init__(self, maxsize)
self._unfinished_tasks = Semaphore(0)
self._cond = Condition()
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
def __setstate__(self, state):
Queue.__setstate__(self, state[:-2])
self._cond, self._unfinished_tasks = state[-2:]
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full
with self._notempty:
with self._cond:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._unfinished_tasks.release()
self._notempty.notify()
def task_done(self):
with self._cond:
if not self._unfinished_tasks.acquire(False):
raise ValueError('task_done() called too many times')
if self._unfinished_tasks._semlock._is_zero():
self._cond.notify_all()
def join(self):
with self._cond:
if not self._unfinished_tasks._semlock._is_zero():
self._cond.wait()
class SimpleQueue(object):
'''
Simplified Queue type -- really just a locked pipe
'''
def __init__(self):
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._poll = self._reader.poll
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._make_methods()
def empty(self):
return not self._poll()
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock) = state
self._make_methods()
def _make_methods(self):
recv = self._reader.recv
rlock = self._rlock
def get():
with rlock:
return recv()
self.get = get
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = self._writer.send
else:
send = self._writer.send
wlock = self._wlock
def put(obj):
with wlock:
return send(obj)
self.put = put
| bsd-3-clause | 896f6290a5ef033f5ff00629f5f3d785 | 29.594901 | 78 | 0.5325 | 4.390244 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/celery/app/utils.py | 1 | 4998 | # -*- coding: utf-8 -*-
"""
celery.app.utils
~~~~~~~~~~~~~~~~
App utilities: Compat settings, bugreport tool, pickling apps.
"""
from __future__ import absolute_import
import os
import platform as _platform
from celery import datastructures
from celery import platforms
from celery.utils.text import pretty
from celery.utils.imports import qualname
from .defaults import find
#: Format used to generate bugreport information.
BUGREPORT_INFO = """
software -> celery:%(celery_v)s kombu:%(kombu_v)s py:%(py_v)s
billiard:%(billiard_v)s %(driver_v)s
platform -> system:%(system)s arch:%(arch)s imp:%(py_i)s
loader -> %(loader)s
settings -> transport:%(transport)s results:%(results)s
%(human_settings)s
"""
class Settings(datastructures.ConfigurationView):
"""Celery settings object."""
@property
def CELERY_RESULT_BACKEND(self):
return self.first('CELERY_RESULT_BACKEND', 'CELERY_BACKEND')
@property
def BROKER_TRANSPORT(self):
return self.first('BROKER_TRANSPORT',
'BROKER_BACKEND', 'CARROT_BACKEND')
@property
def BROKER_BACKEND(self):
"""Deprecated compat alias to :attr:`BROKER_TRANSPORT`."""
return self.BROKER_TRANSPORT
@property
def BROKER_HOST(self):
return (os.environ.get('CELERY_BROKER_URL') or
self.first('BROKER_URL', 'BROKER_HOST'))
@property
def CELERY_TIMEZONE(self):
# this way we also support django's time zone.
return self.first('CELERY_TIMEZONE', 'TIME_ZONE')
def without_defaults(self):
"""Returns the current configuration, but without defaults."""
# the last stash is the default settings, so just skip that
return Settings({}, self._order[:-1])
def find_option(self, name, namespace='celery'):
"""Search for option by name.
Will return ``(namespace, option_name, Option)`` tuple, e.g.::
>>> celery.conf.find_option('disable_rate_limits')
('CELERY', 'DISABLE_RATE_LIMITS',
<Option: type->bool default->False>))
:param name: Name of option, cannot be partial.
:keyword namespace: Preferred namespace (``CELERY`` by default).
"""
return find(name, namespace)
def find_value_for_key(self, name, namespace='celery'):
"""Shortcut to ``get_by_parts(*find_option(name)[:-1])``"""
return self.get_by_parts(*self.find_option(name, namespace)[:-1])
def get_by_parts(self, *parts):
"""Returns the current value for setting specified as a path.
Example::
>>> celery.conf.get_by_parts('CELERY', 'DISABLE_RATE_LIMITS')
False
"""
return self['_'.join(filter(None, parts))]
def humanize(self):
"""Returns a human readable string showing changes to the
configuration."""
return '\n'.join(
'%s %s' % (key + ':', pretty(value, width=50))
for key, value in self.without_defaults().iteritems())
class AppPickler(object):
"""Default application pickler/unpickler."""
def __call__(self, cls, *args):
kwargs = self.build_kwargs(*args)
app = self.construct(cls, **kwargs)
self.prepare(app, **kwargs)
return app
def prepare(self, app, **kwargs):
app.conf.update(kwargs['changes'])
def build_kwargs(self, *args):
return self.build_standard_kwargs(*args)
def build_standard_kwargs(self, main, changes, loader, backend, amqp,
events, log, control, accept_magic_kwargs):
return dict(main=main, loader=loader, backend=backend, amqp=amqp,
changes=changes, events=events, log=log, control=control,
set_as_current=False,
accept_magic_kwargs=accept_magic_kwargs)
def construct(self, cls, **kwargs):
return cls(**kwargs)
def _unpickle_app(cls, pickler, *args):
return pickler()(cls, *args)
def bugreport(app):
"""Returns a string containing information useful in bug reports."""
import billiard
import celery
import kombu
try:
conn = app.connection()
driver_v = '%s:%s' % (conn.transport.driver_name,
conn.transport.driver_version())
transport = conn.transport_cls
except Exception:
transport = driver_v = ''
return BUGREPORT_INFO % {
'system': _platform.system(),
'arch': ', '.join(filter(None, _platform.architecture())),
'py_i': platforms.pyimplementation(),
'celery_v': celery.VERSION_BANNER,
'kombu_v': kombu.__version__,
'billiard_v': billiard.__version__,
'py_v': _platform.python_version(),
'driver_v': driver_v,
'transport': transport,
'results': app.conf.CELERY_RESULT_BACKEND or 'disabled',
'human_settings': app.conf.humanize(),
'loader': qualname(app.loader.__class__),
}
| bsd-3-clause | 5f99ee4b8fc65e04565b029d91c626d0 | 30.2375 | 77 | 0.605842 | 3.90774 | false | false | false | false |
mozilla/firefox-flicks | vendor-local/lib/python/oauthlib/oauth2/draft25/__init__.py | 1 | 30698 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
"""
oauthlib.oauth2.draft_25
~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for signing and checking OAuth 2.0 draft 25 requests.
"""
from oauthlib.common import Request
from oauthlib.oauth2.draft25 import tokens, grant_types
from .parameters import prepare_grant_uri, prepare_token_request
from .parameters import parse_authorization_code_response
from .parameters import parse_implicit_response, parse_token_response
AUTH_HEADER = 'auth_header'
URI_QUERY = 'query'
BODY = 'body'
class Client(object):
"""Base OAuth2 client responsible for access tokens.
While this class can be used to simply append tokens onto requests
it is often more useful to use a client targeted at a specific workflow.
"""
def __init__(self, client_id,
default_token_placement=AUTH_HEADER,
token_type='Bearer',
access_token=None,
refresh_token=None,
mac_key=None,
mac_algorithm=None,
**kwargs):
"""Initialize a client with commonly used attributes."""
self.client_id = client_id
self.default_token_placement = default_token_placement
self.token_type = token_type
self.access_token = access_token
self.refresh_token = refresh_token
self.mac_key = mac_key
self.mac_algorithm = mac_algorithm
@property
def token_types(self):
"""Supported token types and their respective methods
Additional tokens can be supported by extending this dictionary.
The Bearer token spec is stable and safe to use.
The MAC token spec is not yet stable and support for MAC tokens
is experimental and currently matching version 00 of the spec.
"""
return {
'Bearer': self._add_bearer_token,
'MAC': self._add_mac_token
}
def add_token(self, uri, http_method='GET', body=None, headers=None,
token_placement=None, **kwargs):
"""Add token to the request uri, body or authorization header.
The access token type provides the client with the information
required to successfully utilize the access token to make a protected
resource request (along with type-specific attributes). The client
MUST NOT use an access token if it does not understand the token
type.
For example, the "bearer" token type defined in
[I-D.ietf-oauth-v2-bearer] is utilized by simply including the access
token string in the request:
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: Bearer mF_9.B5f-4.1JqM
while the "mac" token type defined in [I-D.ietf-oauth-v2-http-mac] is
utilized by issuing a MAC key together with the access token which is
used to sign certain components of the HTTP requests:
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: MAC id="h480djs93hd8",
nonce="274312:dj83hs9s",
mac="kDZvddkndxvhGRXZhvuDjEWhGeE="
.. _`I-D.ietf-oauth-v2-bearer`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#ref-I-D.ietf-oauth-v2-bearer
.. _`I-D.ietf-oauth-v2-http-mac`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#ref-I-D.ietf-oauth-v2-http-mac
"""
token_placement = token_placement or self.default_token_placement
if not self.token_type in self.token_types:
raise ValueError("Unsupported token type: %s" % self.token_type)
if not self.access_token:
raise ValueError("Missing access token.")
return self.token_types[self.token_type](uri, http_method, body,
headers, token_placement, **kwargs)
def prepare_refresh_body(self, body='', refresh_token=None, scope=None, **kwargs):
"""Prepare an access token request, using a refresh token.
If the authorization server issued a refresh token to the client, the
client makes a refresh request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "refresh_token".
refresh_token
REQUIRED. The refresh token issued to the client.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3. The requested scope MUST NOT include any scope
not originally granted by the resource owner, and if omitted is
treated as equal to the scope originally granted by the
resource owner.
"""
refresh_token = refresh_token or self.refresh_token
return prepare_token_request('refresh_token', body=body, scope=scope,
refresh_token=refresh_token, **kwargs)
def _add_bearer_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=None):
"""Add a bearer token to the request uri, body or authorization header."""
if token_placement == AUTH_HEADER:
headers = tokens.prepare_bearer_headers(self.access_token, headers)
elif token_placement == URI_QUERY:
uri = tokens.prepare_bearer_uri(self.access_token, uri)
elif token_placement == BODY:
body = tokens.prepare_bearer_body(self.access_token, body)
else:
raise ValueError("Invalid token placement.")
return uri, headers, body
def _add_mac_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=AUTH_HEADER, ext=None, **kwargs):
"""Add a MAC token to the request authorization header.
Warning: MAC token support is experimental as the spec is not yet stable.
"""
headers = tokens.prepare_mac_header(self.access_token, uri,
self.mac_key, http_method, headers=headers, body=body, ext=ext,
hash_algorithm=self.mac_algorithm, **kwargs)
return uri, headers, body
def _populate_attributes(self, response):
"""Add commonly used values such as access_token to self."""
if 'access_token' in response:
self.access_token = response.get('access_token')
if 'refresh_token' in response:
self.refresh_token = response.get('refresh_token')
if 'token_type' in response:
self.token_type = response.get('token_type')
if 'expires_in' in response:
self.expires_in = response.get('expires_in')
if 'code' in response:
self.code = response.get('code')
if 'mac_key' in response:
self.mac_key = response.get('mac_key')
if 'mac_algorithm' in response:
self.mac_algorithm = response.get('mac_algorithm')
def prepare_request_uri(self, *args, **kwargs):
"""Abstract method used to create request URIs."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def prepare_request_body(self, *args, **kwargs):
"""Abstract method used to create request bodies."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def parse_request_uri_response(self, *args, **kwargs):
"""Abstract method used to parse redirection responses."""
def parse_request_body_response(self, *args, **kwargs):
"""Abstract method used to parse JSON responses."""
class WebApplicationClient(Client):
"""A client utilizing the authorization code grant workflow.
A web application is a confidential client running on a web
server. Resource owners access the client via an HTML user
interface rendered in a user-agent on the device used by the
resource owner. The client credentials as well as any access
token issued to the client are stored on the web server and are
not exposed to or accessible by the resource owner.
The authorization code grant type is used to obtain both access
tokens and refresh tokens and is optimized for confidential clients.
As a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server.
"""
def __init__(self, client_id, code=None, **kwargs):
super(WebApplicationClient, self).__init__(client_id, **kwargs)
if code:
self.code = code
def prepare_request_uri(self, uri, redirect_uri=None, scope=None,
state=None, **kwargs):
"""Prepare the authorization code request URI
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format as defined by
[`W3C.REC-html401-19991224`_]:
response_type
REQUIRED. Value MUST be set to "code".
client_id
REQUIRED. The client identifier as described in `Section 2.2`_.
redirect_uri
OPTIONAL. As described in `Section 3.1.2`_.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
.. _`W3C.REC-html401-19991224`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#ref-W3C.REC-html401-19991224
.. _`Section 2.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-2.2
.. _`Section 3.1.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.1.2
.. _`Section 3.3`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.3
.. _`Section 10.12`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-10.12
"""
return prepare_grant_uri(uri, self.client_id, 'code',
redirect_uri=redirect_uri, scope=scope, state=state, **kwargs)
def prepare_request_body(self, code=None, body='', redirect_uri=None, **kwargs):
"""Prepare the access token request body.
The client makes a request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "authorization_code".
code
REQUIRED. The authorization code received from the
authorization server.
redirect_uri
REQUIRED, if the "redirect_uri" parameter was included in the
authorization request as described in Section 4.1.1, and their
values MUST be identical.
.. _`Section 4.1.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-4.1.1
"""
code = code or self.code
return prepare_token_request('authorization_code', code=code, body=body,
redirect_uri=redirect_uri, **kwargs)
def parse_request_uri_response(self, uri, state=None):
"""Parse the URI query for code and state.
If the resource owner grants the access request, the authorization
server issues an authorization code and delivers it to the client by
adding the following parameters to the query component of the
redirection URI using the "application/x-www-form-urlencoded" format:
code
REQUIRED. The authorization code generated by the
authorization server. The authorization code MUST expire
shortly after it is issued to mitigate the risk of leaks. A
maximum authorization code lifetime of 10 minutes is
RECOMMENDED. The client MUST NOT use the authorization code
more than once. If an authorization code is used more than
once, the authorization server MUST deny the request and SHOULD
revoke (when possible) all tokens previously issued based on
that authorization code. The authorization code is bound to
the client identifier and redirection URI.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
"""
response = parse_authorization_code_response(uri, state=state)
self._populate_attributes(response)
return response
def parse_request_body_response(self, body, scope=None):
"""Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token and optional refresh
token as described in `Section 5.1`_. If the request client
authentication failed or is invalid, the authorization server returns
an error response as described in `Section 5.2`_.
.. `Section 5.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.1
.. `Section 5.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.2
"""
response = parse_token_response(body, scope=scope)
self._populate_attributes(response)
return response
class UserAgentClient(Client):
"""A public client utilizing the implicit code grant workflow.
A user-agent-based application is a public client in which the
client code is downloaded from a web server and executes within a
user-agent (e.g. web browser) on the device used by the resource
owner. Protocol data and credentials are easily accessible (and
often visible) to the resource owner. Since such applications
reside within the user-agent, they can make seamless use of the
user-agent capabilities when requesting authorization.
The implicit grant type is used to obtain access tokens (it does not
support the issuance of refresh tokens) and is optimized for public
clients known to operate a particular redirection URI. These clients
are typically implemented in a browser using a scripting language
such as JavaScript.
As a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server.
Unlike the authorization code grant type in which the client makes
separate requests for authorization and access token, the client
receives the access token as the result of the authorization request.
The implicit grant type does not include client authentication, and
relies on the presence of the resource owner and the registration of
the redirection URI. Because the access token is encoded into the
redirection URI, it may be exposed to the resource owner and other
applications residing on the same device.
"""
def prepare_request_uri(self, uri, redirect_uri=None, scope=None,
state=None, **kwargs):
"""Prepare the implicit grant request URI.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format:
response_type
REQUIRED. Value MUST be set to "token".
client_id
REQUIRED. The client identifier as described in Section 2.2.
redirect_uri
OPTIONAL. As described in Section 3.1.2.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in Section 10.12.
"""
return prepare_grant_uri(uri, self.client_id, 'token',
redirect_uri=redirect_uri, state=state, scope=scope, **kwargs)
def parse_request_uri_response(self, uri, state=None, scope=None):
"""Parse the response URI fragment.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format:
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
`Section 7.1`_. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
scope
OPTIONAL, if identical to the scope requested by the client,
otherwise REQUIRED. The scope of the access token as described
by `Section 3.3`_.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
.. _`Section 7.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-7.1
.. _`Section 3.3`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.3
"""
response = parse_implicit_response(uri, state=state, scope=scope)
self._populate_attributes(response)
return response
class ClientCredentialsClient(Client):
"""A public client utilizing the client credentials grant workflow.
The client can request an access token using only its client
credentials (or other supported means of authentication) when the
client is requesting access to the protected resources under its
control, or those of another resource owner which has been previously
arranged with the authorization server (the method of which is beyond
the scope of this specification).
The client credentials grant type MUST only be used by confidential
clients.
Since the client authentication is used as the authorization grant,
no additional authorization request is needed.
"""
def prepare_request_body(self, body='', scope=None, **kwargs):
"""Add the client credentials to the request body.
The client makes a request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "client_credentials".
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
.. _`Section 3.3`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.3
"""
return prepare_token_request('client_credentials', body=body,
scope=scope, **kwargs)
def parse_request_body_response(self, body, scope=None):
"""Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token as described in
`Section 5.1`_. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in `Section 5.2`_.
.. `Section 5.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.1
.. `Section 5.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.2
"""
response = parse_token_response(body, scope=scope)
self._populate_attributes(response)
return response
class PasswordCredentialsClient(Client):
"""A public client using the resource owner password and username directly.
The resource owner password credentials grant type is suitable in
cases where the resource owner has a trust relationship with the
client, such as the device operating system or a highly privileged
application. The authorization server should take special care when
enabling this grant type, and only allow it when other flows are not
viable.
The grant type is suitable for clients capable of obtaining the
resource owner's credentials (username and password, typically using
an interactive form). It is also used to migrate existing clients
using direct authentication schemes such as HTTP Basic or Digest
authentication to OAuth by converting the stored credentials to an
access token.
The method through which the client obtains the resource owner
credentials is beyond the scope of this specification. The client
MUST discard the credentials once an access token has been obtained.
"""
def __init__(self, client_id, username, password, **kwargs):
super(PasswordCredentialsClient, self).__init__(client_id, **kwargs)
self.username = username
self.password = password
def prepare_request_body(self, body='', scope=None, **kwargs):
"""Add the resource owner password and username to the request body.
The client makes a request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "password".
username
REQUIRED. The resource owner username.
password
REQUIRED. The resource owner password.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
.. _`Section 3.3`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.3
"""
return prepare_token_request('password', body=body, username=self.username,
password=self.password, scope=scope, **kwargs)
def parse_request_body_response(self, body, scope=None):
"""Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token and optional refresh
token as described in `Section 5.1`_. If the request failed client
authentication or is invalid, the authorization server returns an
error response as described in `Section 5.2`_.
.. `Section 5.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.1
.. `Section 5.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.2
"""
response = parse_token_response(body, scope=scope)
self._populate_attributes(response)
return response
class AuthorizationEndpoint(object):
"""Authorization endpoint - used by the client to obtain authorization
from the resource owner via user-agent redirection.
The authorization endpoint is used to interact with the resource
owner and obtain an authorization grant. The authorization server
MUST first verify the identity of the resource owner. The way in
which the authorization server authenticates the resource owner (e.g.
username and password login, session cookies) is beyond the scope of
this specification.
The endpoint URI MAY include an "application/x-www-form-urlencoded"
formatted (per Appendix B) query component ([RFC3986] section 3.4),
which MUST be retained when adding additional query parameters. The
endpoint URI MUST NOT include a fragment component.
Since requests to the authorization endpoint result in user
authentication and the transmission of clear-text credentials (in the
HTTP response), the authorization server MUST require the use of TLS
as described in Section 1.6 when sending requests to the
authorization endpoint.
The authorization server MUST support the use of the HTTP "GET"
method [RFC2616] for the authorization endpoint, and MAY support the
use of the "POST" method as well.
Parameters sent without a value MUST be treated as if they were
omitted from the request. The authorization server MUST ignore
unrecognized request parameters. Request and response parameters
MUST NOT be included more than once.
"""
def __init__(self, default_response_type=None, default_token=None,
response_types=None):
self._response_types = response_types or {}
self._default_response_type = default_response_type
self._default_token = default_token or tokens.BearerToken()
@property
def response_types(self):
return self._response_types
@property
def default_response_type(self):
return self._default_response_type
@property
def default_response_type_handler(self):
return self.response_types.get(self.default_response_type)
@property
def default_token(self):
return self._default_token
def create_authorization_response(self, uri, http_method='GET', body=None,
headers=None, scopes=None, credentials=None):
"""Extract response_type and route to the designated handler."""
request = Request(uri, http_method=http_method, body=body, headers=headers)
request.authorized_scopes = scopes
for k, v in (credentials or {}).items():
setattr(request, k, v)
response_type_handler = self.response_types.get(
request.response_type, self.default_response_type_handler)
return response_type_handler.create_authorization_response(
request, self.default_token)
def validate_authorization_request(self, uri, http_method='GET', body=None,
headers=None):
"""Extract response_type and route to the designated handler."""
request = Request(uri, http_method=http_method, body=body, headers=headers)
response_type_handler = self.response_types.get(
request.response_type, self.default_response_type_handler)
return response_type_handler.validate_authorization_request(request)
class TokenEndpoint(object):
def __init__(self, default_grant_type=None, default_token_type=None,
grant_types=None):
self._grant_types = grant_types or {}
self._default_token_type = default_token_type or tokens.BearerToken()
self._default_grant_type = default_grant_type
@property
def grant_types(self):
return self._grant_types
@property
def default_grant_type(self):
return self._default_grant_type
@property
def default_grant_type_handler(self):
return self.grant_types.get(self.default_grant_type)
@property
def default_token_type(self):
return self._default_token_type
def create_token_response(self, uri, http_method='GET', body=None, headers=None):
"""Extract grant_type and route to the designated handler."""
request = Request(uri, http_method=http_method, body=body, headers=headers)
grant_type_handler = self.grant_types.get(request.grant_type,
self.default_grant_type_handler)
return grant_type_handler.create_token_response(
request, self.default_token_type)
class ResourceEndpoint(object):
def __init__(self, default_token=None, token_types=None):
self._tokens = token_types or {'Bearer': tokens.BearerToken()}
self._default_token = default_token
@property
def default_token(self):
return self._default_token
@property
def default_token_type_handler(self):
return self.tokens.get(self.default_token)
@property
def tokens(self):
return self._tokens
def verify_request(self, uri, http_method='GET', body=None, headers=None):
"""Validate client, code etc, return body + headers"""
request = Request(uri, http_method, body, headers)
request.token_type = self.find_token_type(request)
token_type_handler = self.tokens.get(request.token_type,
self.default_token_type_handler)
return token_type_handler.validate_request(request), request
def find_token_type(self, request):
"""Token type identification.
RFC 6749 does not provide a method for easily differentiating between
different token types during protected resource access. We estimate
the most likely token type (if any) by asking each known token type
to give an estimation based on the request.
"""
estimates = sorted(((t.estimate_type(request), n) for n, t in self.tokens.items()))
return estimates[0][1] if len(estimates) else None
class Server(AuthorizationEndpoint, TokenEndpoint, ResourceEndpoint):
pass
class WebApplicationServer(AuthorizationEndpoint, TokenEndpoint, ResourceEndpoint):
"""An all-in-one endpoint featuring Authorization code grant and Bearer tokens."""
def __init__(self, request_validator, *args, **kwargs):
auth_grant = grant_types.AuthorizationCodeGrant(request_validator)
bearer = tokens.BearerToken(request_validator)
AuthorizationEndpoint.__init__(self, default_response_type='code',
response_types={'code': auth_grant})
TokenEndpoint.__init__(self, default_grant_type='authorization_code',
grant_types={'authorization_code': auth_grant},
default_token_type=bearer)
ResourceEndpoint.__init__(self, default_token='Bearer',
token_types={'Bearer': bearer})
| bsd-3-clause | 92966c379242121909d9514da3e30eff | 42.854286 | 122 | 0.663268 | 4.460622 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/fhirreference_tests.py | 1 | 5773 | # -*- coding: utf-8 -*-
import io
import json
import os.path
import logging
import unittest
import models.questionnaire as questionnaire
import models.medication as medication
import models.resource as resource
import models.valueset as valueset
import models.patient as patient
import models.bundle as bundle
import server
logging.basicConfig(level=logging.CRITICAL)
class TestResourceReference(unittest.TestCase):
def testContainedResourceDetection(self):
with io.open('test_contained_resource.json', 'r', encoding='utf-8') as h:
data = json.load(h)
q = questionnaire.Questionnaire(data)
self.assertIsNotNone(q, "Must instantiate Questionnaire")
self.assertEqual('Questionnaire', q.resource_type)
group = q.item[0].item[3]
self.assertEqual('Observation.subject', group.linkId)
question = group.item[0]
self.assertEqual('Observation.subject._type', question.linkId)
self.assertIsNotNone(question.answerOption)
with self.assertRaises(Exception):
question.answerOption[0].valueReference.resolved()
reference = question.answerOption[0].valueReference
# 1st resolve, extracting from contained resources
contained = reference.resolved(medication.Medication)
self.assertIsNone(contained, "Must not resolve on resource type mismatch")
contained = reference.resolved(valueset.ValueSet)
self.assertIsNotNone(contained, "Must resolve contained ValueSet")
self.assertEqual('ValueSet', contained.resource_type)
self.assertEqual('Type options for Observation.subject', contained.name)
# 2nd resolve, should pull from cache
contained = reference.resolved(medication.Medication)
self.assertIsNone(contained, "Must not resolve on resource type mismatch")
contained = reference.resolved(resource.Resource)
self.assertIsNotNone(contained, "Must resolve contained ValueSet even if requesting `Resource`")
contained = reference.resolved(valueset.ValueSet)
self.assertIsNotNone(contained, "Must resolve contained ValueSet")
self.assertEqual('ValueSet', contained.resource_type)
def testRelativeReference(self):
with io.open('test_relative_reference.json', 'r', encoding='utf-8') as h:
data = json.load(h)
q = questionnaire.Questionnaire(data)
self.assertIsNotNone(q, "Must instantiate Questionnaire")
self.assertEqual('Questionnaire', q.resource_type)
q._server = MockServer()
group = q.item[0].item[0]
self.assertEqual('Observation.subject', group.linkId)
question = group.item[0]
self.assertEqual('Observation.subject._type', question.linkId)
self.assertIsNotNone(question.answerOption)
with self.assertRaises(Exception):
question.answerOption[0].valueReference.resolved()
reference = question.answerOption[0].valueReference
# resolve relative resource
relative = reference.resolved(valueset.ValueSet)
self.assertIsNotNone(relative, "Must resolve relative ValueSet")
self.assertEqual('ValueSet', relative.resource_type)
self.assertEqual('Type options for Observation.subject', relative.name)
# 2nd resolve, should pull from cache
relative = reference.resolved(medication.Medication)
self.assertIsNone(relative, "Must not resolve on resource type mismatch")
relative = reference.resolved(resource.Resource)
self.assertIsNotNone(relative, "Must resolve relative ValueSet even if requesting `Resource`")
def testBundleReferences(self):
with io.open('test_bundle.json', 'r', encoding='utf-8') as h:
data = json.load(h)
b = bundle.Bundle(data)
self.assertIsNotNone(b, "Must instantiate Bundle")
self.assertEqual('Bundle', b.resource_type)
#b._server = MockServer()
# get resources
pat23 = b.entry[0].resource
self.assertEqual('Patient', pat23.resource_type)
self.assertEqual('Darth', pat23.name[0].given[0])
patURN = b.entry[1].resource
self.assertEqual('Patient', patURN.resource_type)
self.assertEqual('Ben', patURN.name[0].given[0])
obs123 = b.entry[2].resource
self.assertEqual('Observation', obs123.resource_type)
obs56 = b.entry[3].resource
self.assertEqual('Observation', obs56.resource_type)
obs34 = b.entry[4].resource
self.assertEqual('Observation', obs34.resource_type)
# test resolving w/o server (won't work)
res = obs123.subject.resolved(patient.Patient)
self.assertIsNone(res)
# test resolving with server
b._server = MockServer()
res = obs123.subject.resolved(patient.Patient)
self.assertEqual(res, pat23)
res = obs123.subject.resolved(medication.Medication)
self.assertIsNone(res, "Must not resolve on type mismatch")
res = obs56.subject.resolved(patient.Patient)
self.assertEqual(res, patURN)
res = obs34.subject.resolved(patient.Patient)
self.assertIsNone(res, "Must not resolve Patient on same server but different endpoint")
class MockServer(server.FHIRServer):
""" Reads local files.
"""
def __init__(self):
super().__init__(None, base_uri='https://fhir.smarthealthit.org')
def request_json(self, path, nosign=False):
assert path
parts = os.path.split(path)
filename = '_'.join(parts) + '.json'
with io.open(filename, 'r', encoding='utf-8') as handle:
return json.load(handle)
return None
| bsd-3-clause | d89241f3dd285dab028c98cbdfe31b0d | 41.762963 | 104 | 0.671228 | 4.244853 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/a2424db07997_add_last_active_retention_time.py | 1 | 2278 | """add_last_active_retention_time
Revision ID: a2424db07997
Revises: b54f52bb5fc5, 56c49230d778
Create Date: 2021-09-14 16:26:40.798067
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = 'a2424db07997'
down_revision = ('b54f52bb5fc5', '56c49230d778')
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('participant_summary', sa.Column('last_active_retention_activity_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('retention_eligible_metrics', sa.Column('last_active_retention_activity_time', rdr_service.model.utils.UTCDateTime6(fsp=6), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('retention_eligible_metrics', 'last_active_retention_activity_time')
op.drop_column('participant_summary', 'last_active_retention_activity_time')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | d226c2b6d5b75e5a94bf03e388e65a3d | 35.741935 | 157 | 0.756365 | 3.553822 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/16452fdb1334_summary_cope_survey_fields.py | 1 | 3793 | """summary cope survey fields
Revision ID: 16452fdb1334
Revises: c0394a487b8b
Create Date: 2020-05-12 14:02:39.592019
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from sqlalchemy.dialects import mysql
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '16452fdb1334'
down_revision = 'c0394a487b8b'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_july', rdr_service.model.utils.Enum(QuestionnaireStatus), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_july_authored', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_july_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_june', rdr_service.model.utils.Enum(QuestionnaireStatus), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_june_authored', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_june_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_may', rdr_service.model.utils.Enum(QuestionnaireStatus), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_may_authored', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_may_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('participant_summary', 'questionnaire_on_cope_may_time')
op.drop_column('participant_summary', 'questionnaire_on_cope_may_authored')
op.drop_column('participant_summary', 'questionnaire_on_cope_may')
op.drop_column('participant_summary', 'questionnaire_on_cope_june_time')
op.drop_column('participant_summary', 'questionnaire_on_cope_june_authored')
op.drop_column('participant_summary', 'questionnaire_on_cope_june')
op.drop_column('participant_summary', 'questionnaire_on_cope_july_time')
op.drop_column('participant_summary', 'questionnaire_on_cope_july_authored')
op.drop_column('participant_summary', 'questionnaire_on_cope_july')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | c8f1811d5a23a36f1a2ed465d31fac36 | 48.907895 | 147 | 0.757712 | 3.420198 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/tools/load_test_locustfile.py | 1 | 8228 | """User behavior definition for load-testing via Locust. Run using tools/load_test.sh.
Locust docs: http://docs.locust.io/en/latest/writing-a-locustfile.html
Instructions:
* In your venv, run easy_install install locustio
* Run "export sdk_dir" to export the path to your locally installed Google Cloud SDK.
* Run load_test.sh, which wraps this and starts a locust server, e.g.:
tools/load_test.sh --project all-of-us-rdr-staging --account dan.rodney@pmi-ops.org
* Once started, locust prints "Starting web monitor at *:8089". Open
http://localhost:8089 to view the control/status page.
* Set the number of users to 100 (and hatch/sec to an arbitrary number, using 100 will start all
workers immediately). With 100 locusts, weights can be thought of as "number of workers."
* Each worker will run a task and then pause (somewhere from `min_wait` to `max_wait`
milliseconds). It picks one of its class methods to run, which in turn are weighted by
the argument to `@task`.
* Click run, locusts hatch and run, gather stats, click stop.
We expect very low traffic (100-1K qpd for most endpoints). These load tests generate much more
traffic to stress test the system / simulate traffic spikes.
"""
import json
import os
import random
import re
import time
import urllib.parse
from urllib.parse import urlencode
from locust import Locust, TaskSet, events, task
from rdr_service.data_gen.fake_participant_generator import FakeParticipantGenerator
from rdr_service.rdr_client.client import Client, HttpException
class _ReportingClient(Client):
"""Wrapper around the API Client which reports request stats to Locust."""
def request_json(self, path, **kwargs):
event_data = {"request_type": "REST", "name": self._clean_up_url(path)}
event = None
try:
start_seconds = time.time()
resp = super(_ReportingClient, self).request_json(path, **kwargs)
event = events.request_success
event_data["response_length"] = len(json.dumps(resp))
return resp
except HttpException as e:
event = events.request_failure
event_data["exception"] = e
finally:
if event is not None:
event_data["response_time"] = int(1000 * (time.time() - start_seconds))
event.fire(**event_data)
def _clean_up_url(self, path):
# Replace varying IDs with a placeholder so counts get aggregated.
name = re.sub("P[0-9]+", ":participant_id", path)
# Convert absolute URLs to relative.
strip_prefix = "%s/%s/" % (self.instance, self.base_path)
if name.startswith(strip_prefix):
name = name[len(strip_prefix) :]
# Prefix relative URLs with the root path for readability.
if not name.startswith("http"):
name = "/" + name
# Replace query parameters with non-varying placeholders.
parsed = urllib.parse.urlparse(name)
query = urllib.parse.parse_qs(parsed.query)
for k in list(query.keys()):
query[k] = "X"
name = parsed._replace(query=urlencode(query)).geturl()
return name
class _AuthenticatedLocust(Locust):
"""Base for authenticated RDR REST API callers."""
def __init__(self, *args, **kwargs):
super(_AuthenticatedLocust, self).__init__(*args, **kwargs)
creds_file = os.environ["LOCUST_CREDS_FILE"]
instance = os.environ["LOCUST_TARGET_INSTANCE"]
# The "client" field gets copied to TaskSet instances.
self.client = _ReportingClient(creds_file=creds_file, default_instance=instance, parse_cli=False)
self.participant_generator = FakeParticipantGenerator(self.client, use_local_files=True)
class VersionCheckUser(_AuthenticatedLocust):
# 2 out of 100 users (Locust count of 100 recommended in load_test.sh).
weight = 2
# Hit the root/version endpoint every 10s.
min_wait = 1000 * 10 * weight
max_wait = min_wait
class task_set(TaskSet): # The "task_set" field name is what's used by the Locust superclass.
@task(1) # task weight: larger number == pick this task more often
def index(self):
self.client.request_json("")
class SyncPhysicalMeasurementsUser(_AuthenticatedLocust):
weight = 2
# In practice we expect 1 sync request/minute. Use the default 1s wait time here.
class task_set(TaskSet):
@task(1)
def get_sync(self):
next_url = "PhysicalMeasurements/_history"
absolute_path = False
while next_url:
history = self.client.request_json(next_url, absolute_path=absolute_path)
link = history.get("link")
if link and link[0]["relation"] == "next":
next_url = link[0]["url"]
absolute_path = True
else:
next_url = None
class SignupUser(_AuthenticatedLocust):
weight = 2
# We estimate 100-1000 signups/day or 80-800s between signups (across all users).
# Simulate 2 signups/s across a number of users for the load test.
min_wait = weight * 500
max_wait = min_wait
class task_set(TaskSet):
@task(1)
def register_participant(self):
self.locust.participant_generator.generate_participant(
True, True # include_physical_measurements
) # include_biobank_orders
class HealthProUser(_AuthenticatedLocust):
"""Queries run by HealthPro: look up user by name + dob or ID, and get summaries."""
weight = 94
# As of 2017 August, in 24h we see about 1000 ParticipantSummary and a similar number of
# individual summary requests. Simulate more load than that (about 1M/day) to force resource
# contention.
min_wait = 1000
max_wait = 10000
class task_set(TaskSet):
def __init__(self, *args, **kwargs):
super(HealthProUser.task_set, self).__init__(*args, **kwargs)
self.participant_ids = []
self.participant_name_dobs = []
def on_start(self):
"""Fetches some participant data from the work queue API for subsequent tasks."""
absolute_path = False
summary_url = "ParticipantSummary?hpoId=PITT"
for _ in range(3): # Fetch a few pages of participants.
resp = self.client.request_json(summary_url, absolute_path=absolute_path)
for summary in resp["entry"]:
resource = summary["resource"]
self.participant_ids.append(resource["participantId"])
try:
self.participant_name_dobs.append(
[resource[k] for k in ("firstName", "lastName", "dateOfBirth")]
)
except KeyError:
pass # Ignore some participants, missing DOB.
if "link" in resp and resp["link"][0]["relation"] == "next":
summary_url = resp["link"][0]["url"]
absolute_path = True
else:
break
@task(10) # ParticipantSummary is the most popular API endpoint.
def query_summary(self):
search_params = {
# HealthPro always requests 1000 for the work queue.
"_sort%3Adesc": "consentForStudyEnrollmentTime",
"_count": "1000",
"hpoId": random.choice(("PITT", "UNSET", "COLUMBIA")),
}
self.client.request_json("ParticipantSummary?%s" % urlencode(search_params))
@task(1)
def get_participant_by_id(self):
self.client.request_json("Participant/%s" % random.choice(self.participant_ids))
@task(1)
def get_participant_summary_by_id(self):
self.client.request_json("Participant/%s/Summary" % random.choice(self.participant_ids))
@task(1)
def look_up_participant_by_name_dob(self):
_, last_name, dob = random.choice(self.participant_name_dobs)
self.client.request_json("ParticipantSummary?dateOfBirth=%s&lastName=%s" % (dob, last_name))
| bsd-3-clause | c65187d6461d667db79f48abec7ba22e | 41.194872 | 105 | 0.620564 | 3.994175 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/978d12edb6c5_rename_isduplicate_column.py | 1 | 2454 | """rename isDuplicate column
Revision ID: 978d12edb6c5
Revises: e7cf7c2a25f2
Create Date: 2022-01-12 19:44:59.564063
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from sqlalchemy.dialects import mysql
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '978d12edb6c5'
down_revision = 'e7cf7c2a25f2'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
# Manually edited commands to force alter_column instead of add_column/drop_column sequence. Maintains nullable=True
# from original column creation (1dff4309d707_hash_and_duplicate_flag_on_.py) to minimize impact of migration
def upgrade_rdr():
op.alter_column('questionnaire_response', 'is_duplicate', new_column_name='classification_type',
existing_type=mysql.TINYINT, nullable=True)
# Production data has no nulls but adding this UPDATE to ensure lower environments are clean after migration
op.execute('UPDATE questionnaire_response SET classification_type = 0 where classification_type is NULL')
# ### end Alembic commands ###
def downgrade_rdr():
op.alter_column('questionnaire_response', 'classification_type', new_column_name='is_duplicate',
existing_type=mysql.TINYINT, nullable=True)
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 7f7ceb4efd4026859efaec71a9a623cd | 37.952381 | 125 | 0.762429 | 3.73516 | false | false | false | false |
all-of-us/raw-data-repository | tests/dao_tests/test_response_validator.py | 1 | 21957 | from dataclasses import dataclass, field
from datetime import date, datetime
import mock
from typing import Dict, List
from rdr_service.code_constants import PMI_SKIP_CODE
from rdr_service.dao.questionnaire_response_dao import ResponseValidator
from rdr_service.model.code import Code
from rdr_service.model.questionnaire import QuestionnaireConcept, QuestionnaireQuestion
from rdr_service.model.questionnaire_response import QuestionnaireResponse, QuestionnaireResponseAnswer
from rdr_service.model.survey import SurveyQuestionType
from tests.helpers.unittest_base import BaseTestCase
@dataclass
class QuestionDefinition:
question_type: SurveyQuestionType = None
options: List[Code] = field(default_factory=list)
validation: str = None
validation_min: str = None
validation_max: str = None
@mock.patch('rdr_service.dao.questionnaire_response_dao.logging')
class ResponseValidatorTest(BaseTestCase):
def setUp(self, **kwargs) -> None:
super(ResponseValidatorTest, self).setUp(**kwargs)
self.skip_answer_code = self.data_generator.create_database_code(value=PMI_SKIP_CODE)
def _build_questionnaire_and_response(self, questions: Dict[Code, QuestionDefinition],
answers: Dict[Code, QuestionnaireResponseAnswer],
survey_import_time=datetime(2020, 12, 4),
questionnaire_created_time=datetime(2021, 4, 1)):
module_code = self.data_generator.create_database_code(value='test_survey')
# Build survey structure for defined questions
survey_questions = []
for question_code, definition in questions.items():
survey_question_options = [
self.data_generator.create_database_survey_question_option(codeId=option_code.codeId)
for option_code in definition.options
]
survey_questions.append(self.data_generator.create_database_survey_question(
code=question_code,
options=survey_question_options,
questionType=definition.question_type,
validation=definition.validation,
validation_min=definition.validation_min,
validation_max=definition.validation_max
))
self.data_generator.create_database_survey(
importTime=survey_import_time,
code=module_code,
questions=survey_questions
)
# Build related QuestionnaireHistory for the response
questionnaire_questions = [
self.data_generator._questionnaire_question(
codeId=question_code.codeId,
code=question_code
)
for question_code in questions.keys()
]
questionnaire_history = self.data_generator.create_database_questionnaire_history(
questions=questionnaire_questions,
concepts=[QuestionnaireConcept(codeId=module_code.codeId)],
created=questionnaire_created_time
)
# Build the response to the questionnaire
question_code_map: Dict[int, QuestionnaireQuestion] = {
question.codeId: question for question in questionnaire_questions
}
for code, answer in answers.items():
question = question_code_map[code.codeId]
answer.questionId = question.questionnaireQuestionId
answer.question = question
questionnaire_response = QuestionnaireResponse(
answers=list(answers.values())
)
return questionnaire_history, questionnaire_response
def test_simple_survey_response_validation(self, mock_logging):
"""
For surveys derived from the legacy code system, we can only verify that questions that
have options are responded to with valueCodeId
"""
multi_select_question_code = self.data_generator.create_database_code(value='multi_select')
free_text_question_code = self.data_generator.create_database_code(value='free_text')
questionnaire_history, response = self._build_questionnaire_and_response(
questions={
multi_select_question_code: QuestionDefinition(question_type=SurveyQuestionType.UNKNOWN, options=[
self.data_generator.create_database_code(value='option_a'),
self.data_generator.create_database_code(value='option_b')
]),
free_text_question_code: QuestionDefinition(question_type=SurveyQuestionType.UNKNOWN)
},
answers={
multi_select_question_code: QuestionnaireResponseAnswer(
valueString='answering with string rather than something selected from a list of options'
),
free_text_question_code: QuestionnaireResponseAnswer(
valueCodeId=self.data_generator.create_database_code(value='unknown_option')
)
}
)
validator = ResponseValidator(questionnaire_history, self.session)
validator.check_response(response)
mock_logging.warning.assert_has_calls([
mock.call(f'Answer for {multi_select_question_code.value} gives no value code id when the question '
f'has options defined'),
mock.call(f'Answer for {free_text_question_code.value} gives a value code id when no options are defined')
])
def test_option_select_data_type_validation(self, mock_logging):
"""
Survey questions that are defined with options should be answered with valueCodeId
"""
dropdown_question_code = self.data_generator.create_database_code(value='dropdown_select')
radio_question_code = self.data_generator.create_database_code(value='radio_select')
checkbox_question_code = self.data_generator.create_database_code(value='checkbox_select')
# The validator only checks to see if there are options and doesn't really mind what they are,
# using the same options for all the questions for simplicity
options = [
self.data_generator.create_database_code(value=option_value)
for option_value in ['option_a', 'option_b']
]
questionnaire_history, response = self._build_questionnaire_and_response(
questions={
dropdown_question_code: QuestionDefinition(question_type=SurveyQuestionType.DROPDOWN, options=options),
radio_question_code: QuestionDefinition(question_type=SurveyQuestionType.RADIO, options=options),
checkbox_question_code: QuestionDefinition(question_type=SurveyQuestionType.CHECKBOX, options=options)
},
answers={
dropdown_question_code: QuestionnaireResponseAnswer(valueString='text answer'),
radio_question_code: QuestionnaireResponseAnswer(valueString='text answer'),
checkbox_question_code: QuestionnaireResponseAnswer(valueString='text answer')
}
)
validator = ResponseValidator(questionnaire_history, self.session)
validator.check_response(response)
no_value_code_id_used_message = 'Answer for {} gives no value code id when the question has options defined'
mock_logging.warning.assert_has_calls([
mock.call(no_value_code_id_used_message.format(dropdown_question_code.value)),
mock.call(no_value_code_id_used_message.format(radio_question_code.value)),
mock.call(no_value_code_id_used_message.format(checkbox_question_code.value))
])
def test_log_for_unimplemented_validation(self, mock_logging):
calc_question_code = self.data_generator.create_database_code(value='calc_question')
yesno_question_code = self.data_generator.create_database_code(value='yesno_question')
truefalse_question_code = self.data_generator.create_database_code(value='truefalse_question')
file_question_code = self.data_generator.create_database_code(value='file_question')
slider_question_code = self.data_generator.create_database_code(value='slider_question')
questionnaire_history, response = self._build_questionnaire_and_response(
questions={
calc_question_code: QuestionDefinition(question_type=SurveyQuestionType.CALC),
yesno_question_code: QuestionDefinition(question_type=SurveyQuestionType.YESNO),
truefalse_question_code: QuestionDefinition(question_type=SurveyQuestionType.TRUEFALSE),
file_question_code: QuestionDefinition(question_type=SurveyQuestionType.FILE),
slider_question_code: QuestionDefinition(question_type=SurveyQuestionType.SLIDER)
},
answers={
question_code: QuestionnaireResponseAnswer()
for question_code in [
calc_question_code,
yesno_question_code,
truefalse_question_code,
file_question_code,
slider_question_code
]
}
)
validator = ResponseValidator(questionnaire_history, self.session)
validator.check_response(response)
no_validation_check_message = 'No validation check implemented for answer to {} with question type {}'
mock_logging.warning.assert_has_calls([
mock.call(no_validation_check_message.format(calc_question_code.value, SurveyQuestionType.CALC)),
mock.call(no_validation_check_message.format(yesno_question_code.value, SurveyQuestionType.YESNO)),
mock.call(no_validation_check_message.format(truefalse_question_code.value, SurveyQuestionType.TRUEFALSE)),
mock.call(no_validation_check_message.format(file_question_code.value, SurveyQuestionType.FILE)),
mock.call(no_validation_check_message.format(slider_question_code.value, SurveyQuestionType.SLIDER))
])
def test_log_for_text_questions_not_answered_with_text(self, mock_logging):
text_question_code = self.data_generator.create_database_code(value='text_question')
note_question_code = self.data_generator.create_database_code(value='note_question')
questionnaire_history, response = self._build_questionnaire_and_response(
questions={
text_question_code: QuestionDefinition(question_type=SurveyQuestionType.TEXT),
note_question_code: QuestionDefinition(question_type=SurveyQuestionType.NOTES)
},
answers={
text_question_code: QuestionnaireResponseAnswer(valueInteger=1),
note_question_code: QuestionnaireResponseAnswer(valueInteger=1)
}
)
validator = ResponseValidator(questionnaire_history, self.session)
validator.check_response(response)
mock_logging.warning.assert_has_calls([
mock.call(f'No valueString answer given for text-based question {text_question_code.value}'),
mock.call(f'No valueString answer given for text-based question {note_question_code.value}')
])
def test_question_validation_data_type(self, mock_logging):
"""Validation strings give that a TEXT question should be another datatype"""
date_question_code = self.data_generator.create_database_code(value='date_question')
integer_question_code = self.data_generator.create_database_code(value='integer_question')
unknown_question_code = self.data_generator.create_database_code(value='unknown_question')
questionnaire_history, response = self._build_questionnaire_and_response(
questions={
date_question_code: QuestionDefinition(
question_type=SurveyQuestionType.TEXT, validation='date_mdy'
),
integer_question_code: QuestionDefinition(
question_type=SurveyQuestionType.TEXT, validation='integer'
),
unknown_question_code: QuestionDefinition(
question_type=SurveyQuestionType.TEXT, validation='abc'
),
},
answers={
date_question_code: QuestionnaireResponseAnswer(valueString='test'),
integer_question_code: QuestionnaireResponseAnswer(valueString='test'),
unknown_question_code: QuestionnaireResponseAnswer(valueString='test')
}
)
validator = ResponseValidator(questionnaire_history, self.session)
validator.check_response(response)
mock_logging.warning.assert_has_calls([
mock.call(f'No valueDate answer given for date-based question {date_question_code.value}'),
mock.call(f'No valueInteger answer given for integer-based question {integer_question_code.value}'),
mock.call(f'Unrecognized validation string "abc" for question {unknown_question_code.value}')
])
def test_question_validation_min_max(self, mock_logging):
date_question_code = self.data_generator.create_database_code(value='date_question')
integer_question_code = self.data_generator.create_database_code(value='integer_question')
broken_date_question_code = self.data_generator.create_database_code(value='broken_date_question')
broken_integer_question_code = self.data_generator.create_database_code(value='broken_integer_question')
questionnaire_history, response = self._build_questionnaire_and_response(
questions={
date_question_code: QuestionDefinition(
question_type=SurveyQuestionType.TEXT, validation='date_mdy', validation_min='2020-09-01'
),
integer_question_code: QuestionDefinition(
question_type=SurveyQuestionType.TEXT, validation='integer', validation_min='0', validation_max='10'
),
broken_date_question_code: QuestionDefinition(
question_type=SurveyQuestionType.TEXT, validation='date', validation_min='test_bad_date'
),
broken_integer_question_code: QuestionDefinition(
question_type=SurveyQuestionType.TEXT, validation='integer', validation_min='five'
)
},
answers={
date_question_code: QuestionnaireResponseAnswer(valueDate=date(2020, 7, 4)),
integer_question_code: QuestionnaireResponseAnswer(valueInteger=11),
broken_date_question_code: QuestionnaireResponseAnswer(valueDate=date(2020, 7, 4)),
broken_integer_question_code: QuestionnaireResponseAnswer(valueInteger=11),
}
)
validator = ResponseValidator(questionnaire_history, self.session)
validator.check_response(response)
mock_logging.warning.assert_has_calls([
mock.call(
f'Given answer "2020-07-04" is less than expected min '
f'"2020-09-01" for question {date_question_code.value}'
),
mock.call(
f'Given answer "11" is greater than expected max "10" for question {integer_question_code.value}'
)
])
mock_logging.error.assert_has_calls([
mock.call(
f'Unable to parse validation string for question {broken_date_question_code.value}', exc_info=True
),
mock.call(
f'Unable to parse validation string for question {broken_integer_question_code.value}', exc_info=True
)
])
def test_option_select_option_validation(self, mock_logging):
"""
Survey questions that are defined with options should be answered with one of those options
"""
dropdown_question_code = self.data_generator.create_database_code(value='dropdown_select')
unrecognized_answer_code = self.data_generator.create_database_code(value='completely_different_option')
questionnaire_history, response = self._build_questionnaire_and_response(
questions={
dropdown_question_code: QuestionDefinition(question_type=SurveyQuestionType.DROPDOWN, options=[
self.data_generator.create_database_code(value='option_a'),
self.data_generator.create_database_code(value='option_b')
])
},
answers={
dropdown_question_code: QuestionnaireResponseAnswer(
valueCodeId=unrecognized_answer_code.codeId
)
}
)
validator = ResponseValidator(questionnaire_history, self.session)
validator.check_response(response)
mock_logging.warning.assert_called_with(
f'Code ID {unrecognized_answer_code.codeId} is an invalid answer to {dropdown_question_code.value}'
)
def test_any_question_can_be_skipped(self, mock_logging):
"""
Any question should be able to be skipped, even if it doesn't take option codes
"""
select_code = self.data_generator.create_database_code(value='select')
text_question_code = self.data_generator.create_database_code(value='text')
questionnaire_history, response = self._build_questionnaire_and_response(
questions={
select_code: QuestionDefinition(question_type=SurveyQuestionType.DROPDOWN, options=[
self.data_generator.create_database_code(value='option_a'),
self.data_generator.create_database_code(value='option_b')
]),
text_question_code: QuestionDefinition(question_type=SurveyQuestionType.TEXT)
},
answers={
select_code: QuestionnaireResponseAnswer(valueCodeId=self.skip_answer_code.codeId),
text_question_code: QuestionnaireResponseAnswer(valueCodeId=self.skip_answer_code.codeId)
}
)
validator = ResponseValidator(questionnaire_history, self.session)
validator.check_response(response)
# No logs should have been made because of the skip codes
mock_logging.warning.assert_not_called()
mock_logging.error.assert_not_called()
def test_questions_answered_multiple_times(self, mock_logging):
"""We should only get one answer for a question (except Checkbox questions)"""
dropdown_question_code = self.data_generator.create_database_code(value='dropdown_select')
checkbox_question_code = self.data_generator.create_database_code(value='checkbox_select')
# The validator only checks to see if there are options and doesn't really mind what they are,
# using the same options for all the questions for simplicity
option_a_code = self.data_generator.create_database_code(value='option_a')
option_b_code = self.data_generator.create_database_code(value='option_b')
options = [option_a_code, option_b_code]
questionnaire_history, response = self._build_questionnaire_and_response(
questions={
dropdown_question_code: QuestionDefinition(question_type=SurveyQuestionType.DROPDOWN, options=options),
checkbox_question_code: QuestionDefinition(question_type=SurveyQuestionType.CHECKBOX, options=options)
},
answers={
dropdown_question_code: QuestionnaireResponseAnswer(valueCodeId=option_a_code.codeId),
checkbox_question_code: QuestionnaireResponseAnswer(valueCodeId=option_a_code.codeId)
}
)
# Add extra answers to the response for each question
for question in questionnaire_history.questions:
response.answers.append(QuestionnaireResponseAnswer(
questionId=question.questionnaireQuestionId,
valueCodeId=option_b_code.codeId
))
validator = ResponseValidator(questionnaire_history, self.session)
validator.check_response(response)
mock_logging.error.assert_called_once_with(f'Too many answers given for {dropdown_question_code.value}')
def test_unknown_types_can_be_checkboxes(self, mock_logging):
"""
We should assume the best and allow for unknown types to be answered multiple times (they could be checkboxes)
"""
multi_select = self.data_generator.create_database_code(value='dropdown_select')
# The validator only checks to see if there are options and doesn't really mind what they are,
# using the same options for all the questions for simplicity
option_a_code = self.data_generator.create_database_code(value='option_a')
option_b_code = self.data_generator.create_database_code(value='option_b')
options = [option_a_code, option_b_code]
questionnaire_history, response = self._build_questionnaire_and_response(
questions={
multi_select: QuestionDefinition(question_type=SurveyQuestionType.UNKNOWN, options=options)
},
answers={
multi_select: QuestionnaireResponseAnswer(valueCodeId=option_a_code.codeId)
}
)
# Add extra answers to the response for each question
for question in questionnaire_history.questions:
response.answers.append(QuestionnaireResponseAnswer(
questionId=question.questionnaireQuestionId,
valueCodeId=option_b_code.codeId
))
validator = ResponseValidator(questionnaire_history, self.session)
validator.check_response(response)
# No logs should have been made because of the additional answers
mock_logging.warning.assert_not_called()
mock_logging.error.assert_not_called()
| bsd-3-clause | ceb75ab63713cddae0443eb0b3cc616b | 50.907801 | 120 | 0.656738 | 4.390522 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/contactpoint.py | 1 | 2151 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/ContactPoint) on 2017-03-22.
# 2017, SMART Health IT.
from . import element
class ContactPoint(element.Element):
""" Details of a Technology mediated contact point (phone, fax, email, etc.).
Details for all kinds of technology mediated contact points for a person or
organization, including telephone, email, etc.
"""
resource_type = "ContactPoint"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.period = None
""" Time period when the contact point was/is in use.
Type `Period` (represented as `dict` in JSON). """
self.rank = None
""" Specify preferred order of use (1 = highest).
Type `int`. """
self.system = None
""" phone | fax | email | pager | url | sms | other.
Type `str`. """
self.use = None
""" home | work | temp | old | mobile - purpose of this contact point.
Type `str`. """
self.value = None
""" The actual contact point details.
Type `str`. """
super(ContactPoint, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ContactPoint, self).elementProperties()
js.extend([
("period", "period", period.Period, False, None, False),
("rank", "rank", int, False, None, False),
("system", "system", str, False, None, False),
("use", "use", str, False, None, False),
("value", "value", str, False, None, False),
])
return js
import sys
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
| bsd-3-clause | c491c37d07bf6b460591c1932ecc686e | 32.092308 | 104 | 0.578336 | 4.184825 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/genomic/genomic_job_components.py | 1 | 154004 | """
Component Classes for Genomic Jobs
Components are assembled by the JobController for a particular Genomic Job
"""
import csv
import json
import logging
import re
import pytz
from collections import deque, namedtuple
from copy import deepcopy
from dateutil.parser import parse
import sqlalchemy
from werkzeug.exceptions import NotFound
from rdr_service import clock, config
from rdr_service.dao.code_dao import CodeDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.genomic_enums import ResultsModuleType, ResultsWorkflowState
from rdr_service.genomic.genomic_data import GenomicQueryClass
from rdr_service.genomic.genomic_state_handler import GenomicStateHandler
from rdr_service.model.biobank_stored_sample import BiobankStoredSample
from rdr_service.model.code import Code
from rdr_service.model.participant_summary import ParticipantRaceAnswers, ParticipantSummary
from rdr_service.model.config_utils import get_biobank_id_prefix
from rdr_service.resource.generators.genomics import genomic_user_event_metrics_batch_update
from rdr_service.api_util import (
open_cloud_file,
copy_cloud_file,
delete_cloud_file,
list_blobs,
get_blob)
from rdr_service.model.genomics import (
GenomicSet,
GenomicSetMember,
GenomicGCValidationMetrics,
GenomicSampleContamination)
from rdr_service.participant_enums import (
WithdrawalStatus,
QuestionnaireStatus,
SampleStatus,
Race,
SuspensionStatus,
ParticipantCohort)
from rdr_service.genomic_enums import GenomicSetStatus, GenomicSetMemberStatus, GenomicValidationFlag, GenomicJob, \
GenomicWorkflowState, GenomicSubProcessStatus, GenomicSubProcessResult, GenomicManifestTypes, \
GenomicContaminationCategory, GenomicQcStatus, GenomicIncidentCode
from rdr_service.dao.genomics_dao import (
GenomicGCValidationMetricsDao,
GenomicSetMemberDao,
GenomicFileProcessedDao,
GenomicSetDao,
GenomicJobRunDao,
GenomicManifestFeedbackDao,
GenomicManifestFileDao,
GenomicAW1RawDao,
GenomicAW2RawDao,
GenomicGcDataFileDao,
GenomicGcDataFileMissingDao,
GenomicIncidentDao,
UserEventMetricsDao,
GenomicQueriesDao,
GenomicCVLAnalysisDao, GenomicResultWorkflowStateDao, GenomicCVLSecondSampleDao, GenomicAppointmentEventMetricsDao)
from rdr_service.dao.biobank_stored_sample_dao import BiobankStoredSampleDao
from rdr_service.dao.site_dao import SiteDao
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.genomic.genomic_biobank_manifest_handler import (
create_and_upload_genomic_biobank_manifest_file,
)
from rdr_service.genomic.validation import (
GENOMIC_VALID_AGE,
)
from rdr_service.offline.sql_exporter import SqlExporter
from rdr_service.config import (
getSetting,
GENOMIC_CVL_RECONCILIATION_REPORT_SUBFOLDER,
GENOMIC_GEM_A1_MANIFEST_SUBFOLDER,
GENOMIC_GEM_A3_MANIFEST_SUBFOLDER,
GENOME_TYPE_ARRAY,
GENOME_TYPE_ARRAY_INVESTIGATION,
GENOME_TYPE_WGS,
GENOME_TYPE_WGS_INVESTIGATION,
GENOMIC_AW3_ARRAY_SUBFOLDER,
GENOMIC_AW3_WGS_SUBFOLDER,
BIOBANK_AW2F_SUBFOLDER,
GENOMIC_INVESTIGATION_GENOME_TYPES,
CVL_W1IL_HDR_MANIFEST_SUBFOLDER,
CVL_W1IL_PGX_MANIFEST_SUBFOLDER,
CVL_W2W_MANIFEST_SUBFOLDER,
CVL_W3SR_MANIFEST_SUBFOLDER
)
from rdr_service.code_constants import COHORT_1_REVIEW_CONSENT_YES_CODE
from rdr_service.genomic.genomic_mappings import wgs_file_types_attributes, array_file_types_attributes, \
genome_center_datafile_prefix_map
from sqlalchemy.orm import aliased
class GenomicFileIngester:
"""
This class ingests a file from a source GC bucket into the destination table
"""
def __init__(self, job_id=None,
job_run_id=None,
bucket=None,
archive_folder=None,
sub_folder=None,
_controller=None,
target_file=None):
self.controller = _controller
self.job_id = job_id
self.job_run_id = job_run_id
self.file_obj = None
self.file_queue = deque()
self.target_file = target_file
self.bucket_name = bucket
self.archive_folder_name = archive_folder
self.sub_folder_name = sub_folder
self.investigation_set_id = None
self.participant_dao = None
# Sub Components
self.file_validator = GenomicFileValidator(
job_id=self.job_id,
controller=self.controller
)
self.file_mover = GenomicFileMover(archive_folder=self.archive_folder_name)
self.metrics_dao = GenomicGCValidationMetricsDao()
self.file_processed_dao = GenomicFileProcessedDao()
self.member_dao = GenomicSetMemberDao()
self.job_run_dao = GenomicJobRunDao()
self.sample_dao = BiobankStoredSampleDao()
self.feedback_dao = GenomicManifestFeedbackDao()
self.manifest_dao = GenomicManifestFileDao()
self.incident_dao = GenomicIncidentDao()
self.user_metrics_dao = UserEventMetricsDao()
self.cvl_analysis_dao = GenomicCVLAnalysisDao()
self.results_workflow_dao = GenomicResultWorkflowStateDao()
self.analysis_cols = self.cvl_analysis_dao.model_type.__table__.columns.keys()
self.set_dao = None
self.cvl_second_sample_dao = None
def generate_file_processing_queue(self):
"""
Creates the list of files to be ingested in this run.
Ordering is currently arbitrary;
"""
# Check Target file is set.
# It will not be set in cron job, but will be set by tool when run manually
_manifest_file_id = None
try:
_manifest_file_id = self.controller.task_data.manifest_file.id
except AttributeError:
pass
if self.target_file is not None:
if self.controller.storage_provider is not None:
_blob = self.controller.storage_provider.get_blob(self.bucket_name, self.target_file)
else:
_blob = get_blob(self.bucket_name, self.target_file)
files = [(self.target_file, _blob.updated)]
else:
files = self._get_new_file_names_and_upload_dates_from_bucket()
if files == GenomicSubProcessResult.NO_FILES:
return files
else:
for file_data in files:
new_file_record = self.file_processed_dao.insert_file_record(
self.job_run_id,
f'{self.bucket_name}/{file_data[0]}',
self.bucket_name,
file_data[0].split('/')[-1],
upload_date=file_data[1],
manifest_file_id=_manifest_file_id)
self.file_queue.append(new_file_record)
def _get_new_file_names_and_upload_dates_from_bucket(self):
"""
Searches the bucket for un-processed files.
:return: list of (filenames, upload_date) or NO_FILES result code
"""
# Setup date
timezone = pytz.timezone('Etc/Greenwich')
date_limit_obj = timezone.localize(self.controller.last_run_time)
# Look for new files with valid filenames
bucket = '/' + self.bucket_name
files = list_blobs(bucket, prefix=self.sub_folder_name)
files = [(s.name, s.updated) for s in files
if s.updated > date_limit_obj
and self.file_validator.validate_filename(s.name)]
if not files:
logging.info('No files in cloud bucket {}'.format(self.bucket_name))
return GenomicSubProcessResult.NO_FILES
return files
def generate_file_queue_and_do_ingestion(self):
"""
Main method of the ingestor component,
generates a queue and processes each file
:return: result code
"""
file_queue_result = self.generate_file_processing_queue()
if file_queue_result == GenomicSubProcessResult.NO_FILES:
logging.info('No files to process.')
return file_queue_result
else:
logging.info('Processing files in queue.')
results = []
current_file = None
while len(self.file_queue):
try:
current_file = self.file_queue[0]
ingestion_result = self._ingest_genomic_file(current_file)
file_ingested = self.file_queue.popleft()
results.append(ingestion_result == GenomicSubProcessResult.SUCCESS)
if ingestion_result:
ingestion_message = f'Ingestion attempt for {file_ingested.fileName}: {ingestion_result}'
if 'invalid' in ingestion_result.name.lower():
logging.warning(ingestion_message)
else:
logging.info(ingestion_message)
self.file_processed_dao.update_file_record(
file_ingested.id,
GenomicSubProcessStatus.COMPLETED,
ingestion_result
)
# pylint: disable=broad-except
except Exception as e:
logging.error(f'Exception occured when ingesting manifest {current_file.filePath}: {e}')
self.file_queue.popleft()
except IndexError:
logging.info('No files left in file queue.')
return GenomicSubProcessResult.SUCCESS if all(results) \
else GenomicSubProcessResult.ERROR
@staticmethod
def _clean_row_keys(val):
def str_clean(str_val):
return str_val.lower() \
.replace(' ', '') \
.replace('_', '')
if type(val) is str or 'quoted_name' in val.__class__.__name__.lower():
return str_clean(val)
elif 'dict' in val.__class__.__name__.lower():
return dict(zip([str_clean(key)
for key in val], val.values()))
@staticmethod
def _clean_alpha_values(value):
return value[1:] if value[0].isalpha() else value
def _ingest_genomic_file(self, file_obj):
"""
Reads a file object from bucket and inserts into DB
:param: file_obj: A genomic file object
:return: A GenomicSubProcessResultCode
"""
self.file_obj = file_obj
data_to_ingest = self._retrieve_data_from_path(self.file_obj.filePath)
if data_to_ingest == GenomicSubProcessResult.ERROR:
return GenomicSubProcessResult.ERROR
elif data_to_ingest:
logging.info(f'Ingesting data from {self.file_obj.fileName}')
logging.info("Validating file.")
ingestion_map = {
GenomicJob.AW1_MANIFEST: self._ingest_aw1_manifest,
GenomicJob.AW1F_MANIFEST: self._ingest_aw1_manifest,
GenomicJob.METRICS_INGESTION: self._process_gc_metrics_data_for_insert,
GenomicJob.GEM_A2_MANIFEST: self._ingest_gem_a2_manifest,
GenomicJob.GEM_METRICS_INGEST: self._ingest_gem_metrics_manifest,
GenomicJob.AW4_ARRAY_WORKFLOW: self._ingest_aw4_manifest,
GenomicJob.AW4_WGS_WORKFLOW: self._ingest_aw4_manifest,
GenomicJob.AW1C_INGEST: self._ingest_aw1c_manifest,
GenomicJob.AW1CF_INGEST: self._ingest_aw1c_manifest,
GenomicJob.AW5_ARRAY_MANIFEST: self._ingest_aw5_manifest,
GenomicJob.AW5_WGS_MANIFEST: self._ingest_aw5_manifest,
GenomicJob.CVL_W2SC_WORKFLOW: self._ingest_cvl_w2sc_manifest,
GenomicJob.CVL_W3NS_WORKFLOW: self._ingest_cvl_w3ns_manifest,
GenomicJob.CVL_W3SS_WORKFLOW: self._ingest_cvl_w3ss_manifest,
GenomicJob.CVL_W3SC_WORKFLOW: self._ingest_cvl_w3sc_manifest,
GenomicJob.CVL_W4WR_WORKFLOW: self._ingest_cvl_w4wr_manifest,
GenomicJob.CVL_W5NF_WORKFLOW: self._ingest_cvl_w5nf_manifest
}
self.file_validator.valid_schema = None
validation_result = self.file_validator.validate_ingestion_file(
filename=self.file_obj.fileName,
data_to_validate=data_to_ingest
)
if validation_result != GenomicSubProcessResult.SUCCESS:
# delete raw records
if self.job_id == GenomicJob.AW1_MANIFEST:
raw_dao = GenomicAW1RawDao()
raw_dao.delete_from_filepath(file_obj.filePath)
if self.job_id == GenomicJob.METRICS_INGESTION:
raw_dao = GenomicAW2RawDao()
raw_dao.delete_from_filepath(file_obj.filePath)
return validation_result
try:
ingestion_type = ingestion_map[self.job_id]
ingestions = self._set_data_ingest_iterations(data_to_ingest['rows'])
for row in ingestions:
ingestion_type(row)
self._set_manifest_file_resolved()
return GenomicSubProcessResult.SUCCESS
except RuntimeError:
return GenomicSubProcessResult.ERROR
else:
logging.info("No data to ingest.")
return GenomicSubProcessResult.NO_FILES
def _set_data_ingest_iterations(self, data_rows):
all_ingestions = []
if self.controller.max_num and len(data_rows) > self.controller.max_num:
current_rows = []
for row in data_rows:
current_rows.append(row)
if len(current_rows) == self.controller.max_num:
all_ingestions.append(current_rows.copy())
current_rows.clear()
if current_rows:
all_ingestions.append(current_rows)
else:
all_ingestions.append(data_rows)
return all_ingestions
def _set_manifest_file_resolved(self):
if not self.file_obj:
return
has_failed_validation = self.incident_dao.get_open_incident_by_file_name(self.file_obj.fileName)
if not has_failed_validation:
return
self.incident_dao.batch_update_incident_fields(
[obj.id for obj in has_failed_validation],
_type='resolved'
)
@staticmethod
def get_aw1_manifest_column_mappings():
return {
'packageId': 'packageid',
'sampleId': 'sampleid',
'gcManifestBoxStorageUnitId': 'boxstorageunitid',
'gcManifestBoxPlateId': 'boxid/plateid',
'gcManifestWellPosition': 'wellposition',
'gcManifestParentSampleId': 'parentsampleid',
'collectionTubeId': 'collectiontubeid',
'gcManifestMatrixId': 'matrixid',
'gcManifestTreatments': 'treatments',
'gcManifestQuantity_ul': 'quantity(ul)',
'gcManifestTotalConcentration_ng_per_ul': 'totalconcentration(ng/ul)',
'gcManifestTotalDNA_ng': 'totaldna(ng)',
'gcManifestVisitDescription': 'visitdescription',
'gcManifestSampleSource': 'samplesource',
'gcManifestStudy': 'study',
'gcManifestTrackingNumber': 'trackingnumber',
'gcManifestContact': 'contact',
'gcManifestEmail': 'email',
'gcManifestStudyPI': 'studypi',
'gcManifestTestName': 'genometype',
'gcManifestFailureMode': 'failuremode',
'gcManifestFailureDescription': 'failuremodedesc',
}
def _ingest_aw1_manifest(self, rows):
"""
AW1 ingestion method: Updates the GenomicSetMember with AW1 data
If the row is determined to be a control sample,
insert a new GenomicSetMember with AW1 data
:param rows:
:return: result code
"""
_states = [GenomicWorkflowState.AW0, GenomicWorkflowState.EXTRACT_REQUESTED]
_site = self._get_site_from_aw1()
for row in rows:
row_copy = self._clean_row_keys(row)
row_copy['site_id'] = _site
# Skip rows if biobank_id is an empty string (row is empty well)
if row_copy['biobankid'] == "":
continue
# Check if this sample has a control sample parent tube
control_sample_parent = self.member_dao.get_control_sample_parent(
row_copy['genometype'],
int(row_copy['parentsampleid'])
)
# Create new set member record if the sample
# has the investigation genome type
if row_copy['genometype'] in GENOMIC_INVESTIGATION_GENOME_TYPES:
self.create_investigation_member_record_from_aw1(row_copy)
# Move to next row in file
continue
if control_sample_parent:
logging.warning(f"Control sample found: {row_copy['parentsampleid']}")
# Check if the control sample member exists for this GC, BID, collection tube, and sample ID
# Since the Biobank is reusing the sample and collection tube IDs (which are supposed to be unique)
cntrl_sample_member = self.member_dao.get_control_sample_for_gc_and_genome_type(
_site,
row_copy['genometype'],
row_copy['biobankid'],
row_copy['collectiontubeid'],
row_copy['sampleid']
)
if not cntrl_sample_member:
# Insert new GenomicSetMember record if none exists
# for this control sample, genome type, and gc site
self.create_new_member_from_aw1_control_sample(row_copy)
# Skip rest of iteration and go to next row
continue
# Find the existing GenomicSetMember
if self.job_id == GenomicJob.AW1F_MANIFEST:
# Set the member based on collection tube ID will null sample
member = self.member_dao.get_member_from_collection_tube(
row_copy['collectiontubeid'],
row_copy['genometype'],
state=GenomicWorkflowState.AW1
)
else:
# Set the member based on collection tube ID will null sample
member = self.member_dao.get_member_from_collection_tube_with_null_sample_id(
row_copy['collectiontubeid'],
row_copy['genometype'])
# Since member not found, and not a control sample,
# check if collection tube id was swapped by Biobank
if not member:
bid = row_copy['biobankid']
# Strip biobank prefix if it's there
if bid[0] in [get_biobank_id_prefix(), 'T']:
bid = bid[1:]
member = self.member_dao.get_member_from_biobank_id_in_state(
bid,
row_copy['genometype'],
_states
)
# If member found, validate new collection tube ID, set collection tube ID
if member:
if self._validate_collection_tube_id(row_copy['collectiontubeid'], bid):
if member.genomeType in [GENOME_TYPE_ARRAY, GENOME_TYPE_WGS]:
if member.collectionTubeId:
with self.member_dao.session() as session:
self._record_sample_as_contaminated(session, member.collectionTubeId)
member.collectionTubeId = row_copy['collectiontubeid']
else:
# Couldn't find genomic set member based on either biobank ID or collection tube
_message = f"{self.job_id.name}: Cannot find genomic set member: " \
f"collection_tube_id: {row_copy['collectiontubeid']}, " \
f"biobank id: {bid}, " \
f"genome type: {row_copy['genometype']}"
self.controller.create_incident(source_job_run_id=self.job_run_id,
source_file_processed_id=self.file_obj.id,
code=GenomicIncidentCode.UNABLE_TO_FIND_MEMBER.name,
message=_message,
biobank_id=bid,
collection_tube_id=row_copy['collectiontubeid'],
sample_id=row_copy['sampleid'],
)
# Skip rest of iteration and continue processing file
continue
# Check for diversion pouch site
div_pouch_site_id = self.sample_dao.get_diversion_pouch_site_id(row_copy['collectiontubeid'])
if div_pouch_site_id:
member.diversionPouchSiteFlag = 1
# Process the attribute data
member_changed, member = self._process_aw1_attribute_data(row_copy, member)
if member_changed:
self.member_dao.update(member)
return GenomicSubProcessResult.SUCCESS
def create_investigation_member_record_from_aw1(self, aw1_data):
# Create genomic_set
if not self.investigation_set_id:
new_set = self.create_new_genomic_set()
self.investigation_set_id = new_set.id
self.participant_dao = ParticipantDao()
# Get IDs
biobank_id = aw1_data['biobankid']
# Strip biobank prefix if it's there
if biobank_id[0] in [get_biobank_id_prefix(), 'T']:
biobank_id = biobank_id[1:]
participant = self.participant_dao.get_by_biobank_id(biobank_id)
# Create new genomic_set_member
new_member = GenomicSetMember(
genomicSetId=self.investigation_set_id,
biobankId=biobank_id,
participantId=participant.participantId,
reconcileGCManifestJobRunId=self.job_run_id,
genomeType=aw1_data['genometype'],
sexAtBirth=aw1_data['sexatbirth'],
blockResearch=1,
blockResearchReason="Created from AW1 with investigation genome type.",
blockResults=1,
blockResultsReason="Created from AW1 with investigation genome type.",
genomicWorkflowState=GenomicWorkflowState.AW1,
genomicWorkflowStateStr=GenomicWorkflowState.AW1.name,
)
_, member = self._process_aw1_attribute_data(aw1_data, new_member)
self.member_dao.insert(member)
def create_new_genomic_set(self):
new_set = GenomicSet(
genomicSetName=f"investigation_{self.job_run_id}",
genomicSetCriteria="investigation genome type",
genomicSetVersion=1,
)
self.set_dao = GenomicSetDao()
with self.set_dao.session() as session:
session.add(new_set)
return new_set
def load_raw_awn_file(self, raw_dao, **kwargs):
"""
Loads raw models with raw data from manifests file
Ex: genomic_aw1_raw => aw1_manifest
:param raw_dao: Model Dao Class
:return:
"""
dao = raw_dao()
# look up if any rows exist already for the file
records = dao.get_from_filepath(self.target_file)
if records:
logging.warning(f'File already exists in raw table: {self.target_file}')
return GenomicSubProcessResult.SUCCESS
file_data = self._retrieve_data_from_path(self.target_file)
# Return the error status if there is an error in file_data
if not isinstance(file_data, dict):
return file_data
model_columns = dao.model_type.__table__.columns.keys()
# Processing raw data in batches
batch_size = 100
item_count = 0
batch = list()
for row in file_data['rows']:
row_obj = self._set_raw_awn_attributes(row, model_columns)
if kwargs.get('cvl_site_id'):
row_obj['cvl_site_id'] = kwargs.get('cvl_site_id')
row_obj = dao.get_model_obj_from_items(row_obj.items())
batch.append(row_obj)
item_count += 1
if item_count == batch_size:
# Insert batch into DB
with dao.session() as session:
session.bulk_save_objects(batch)
# Reset batch
item_count = 0
batch = list()
if item_count:
# insert last batch if needed
with dao.session() as session:
session.bulk_save_objects(batch)
return GenomicSubProcessResult.SUCCESS
def ingest_single_aw1_row_for_member(self, member):
# Open file and pull row based on member.biobankId
with self.controller.storage_provider.open(self.target_file, 'r') as aw1_file:
reader = csv.DictReader(aw1_file, delimiter=',')
row = [r for r in reader if r['BIOBANK_ID'][1:] == str(member.biobankId)][0]
# Alter field names to remove spaces and change to lower case
row = self._clean_row_keys(row)
ingested_before = member.reconcileGCManifestJobRunId is not None
# Write AW1 data to genomic_set_member table
gc_manifest_column_mappings = self.get_aw1_manifest_column_mappings()
# Set attributes from file
for key in gc_manifest_column_mappings.keys():
try:
member.__setattr__(key, row[gc_manifest_column_mappings[key]])
except KeyError:
member.__setattr__(key, None)
# Set other fields not in AW1 file
member.reconcileGCManifestJobRunId = self.job_run_id
member.aw1FileProcessedId = self.file_obj.id
member.gcSite = self._get_site_from_aw1()
# Only update the member's genomicWorkflowState if it was AW0
if member.genomicWorkflowState == GenomicWorkflowState.AW0:
member.genomicWorkflowState = GenomicWorkflowState.AW1
member.genomicWorkflowStateStr = GenomicWorkflowState.AW1.name
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
# Update member in DB
self.member_dao.update(member)
# Update AW1 manifest record count
if not ingested_before and not self.controller.bypass_record_count:
self.increment_manifest_file_record_count_from_id()
return GenomicSubProcessResult.SUCCESS
def ingest_single_aw2_row_for_member(self, member: GenomicSetMember) -> GenomicSubProcessResult:
# Open file and pull row based on member.biobankId
with self.controller.storage_provider.open(self.target_file, 'r') as aw1_file:
reader = csv.DictReader(aw1_file, delimiter=',')
row = [r for r in reader if r['Biobank ID'] == str(member.biobankId)][0]
# Alter field names to remove spaces and change to lower case
row = self._clean_row_keys(row)
# Beging prep aw2 row
if row['genometype'] in (GENOME_TYPE_WGS, GENOME_TYPE_WGS_INVESTIGATION):
row = self._set_metrics_wgs_data_file_paths(row)
elif row['genometype'] in (GENOME_TYPE_ARRAY, GENOME_TYPE_ARRAY_INVESTIGATION):
row = self._set_metrics_array_data_file_paths(row)
row = self.prep_aw2_row_attributes(row, member)
if row == GenomicSubProcessResult.ERROR:
return GenomicSubProcessResult.ERROR
# check whether metrics object exists for that member
existing_metrics_obj = self.metrics_dao.get_metrics_by_member_id(member.id)
if existing_metrics_obj is not None:
metric_id = existing_metrics_obj.id
else:
metric_id = None
self.metrics_dao.upsert_gc_validation_metrics_from_dict(row, metric_id)
self.update_member_for_aw2(member)
# Update member in DB
self.member_dao.update(member)
self._update_member_state_after_aw2(member)
# Update AW1 manifest feedback record count
if existing_metrics_obj is None and not self.controller.bypass_record_count:
# For feedback manifest loop
# Get the genomic_manifest_file
manifest_file = self.file_processed_dao.get(member.aw1FileProcessedId)
if manifest_file is not None:
self.feedback_dao.increment_feedback_count(manifest_file.genomicManifestFileId)
return GenomicSubProcessResult.SUCCESS
def increment_manifest_file_record_count_from_id(self):
"""
Increments the manifest record count by 1
"""
manifest_file = self.manifest_dao.get(self.file_obj.genomicManifestFileId)
manifest_file.recordCount += 1
with self.manifest_dao.session() as s:
s.merge(manifest_file)
def prep_aw2_row_attributes(self, row: dict, member: GenomicSetMember):
"""
Set contamination, contamination category,
call rate, member_id, and file_id on AW2 row dictionary
:param member:
:param row:
:return: row dictionary or ERROR code
"""
row['member_id'] = member.id
row['file_id'] = self.file_obj.id
# handle mapped reads in case they are longer than field length
if 'mappedreadspct' in row.keys():
if len(row['mappedreadspct']) > 10:
row['mappedreadspct'] = row['mappedreadspct'][0:10]
# Set default values in case they upload "" and processing status of "fail"
row['contamination_category'] = GenomicContaminationCategory.UNSET
row['contamination_category_str'] = "UNSET"
# Truncate call rate
try:
row['callrate'] = row['callrate'][:10]
except KeyError:
pass
# Convert blank alignedq30bases to none
try:
if row['alignedq30bases'] == '':
row['alignedq30bases'] = None
except KeyError:
pass
# Validate and clean contamination data
try:
row['contamination'] = float(row['contamination'])
# Percentages shouldn't be less than 0
if row['contamination'] < 0:
row['contamination'] = 0
except ValueError:
if row['processingstatus'].lower() != 'pass':
return row
_message = f'{self.job_id.name}: Contamination must be a number for sample_id: {row["sampleid"]}'
self.controller.create_incident(source_job_run_id=self.job_run_id,
source_file_processed_id=self.file_obj.id,
code=GenomicIncidentCode.DATA_VALIDATION_FAILED.name,
message=_message,
biobank_id=member.biobankId,
sample_id=row['sampleid'],
)
return GenomicSubProcessResult.ERROR
# Calculate contamination_category
contamination_value = float(row['contamination'])
category = self.calculate_contamination_category(
member.collectionTubeId,
contamination_value,
member
)
row['contamination_category'] = category
row['contamination_category_str'] = category.name
return row
def update_member_for_aw2(self, member: GenomicSetMember):
"""
Updates the aw2FileProcessedId and possibly the genomicWorkflowState
of a GenomicSetMember after AW2 data has been ingested
:param member:
"""
member.aw2FileProcessedId = self.file_obj.id
# Only update the state if it was AW1
if member.genomicWorkflowState == GenomicWorkflowState.AW1:
member.genomicWorkflowState = GenomicWorkflowState.AW2
member.genomicWorkflowStateStr = GenomicWorkflowState.AW2.name
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
self.member_dao.update(member)
def _ingest_gem_a2_manifest(self, rows):
"""
Processes the GEM A2 manifest file data
Updates GenomicSetMember object with gem_pass field.
:param rows:
:return: Result Code
"""
try:
for row in rows:
sample_id = row['sample_id']
member = self.member_dao.get_member_from_sample_id_with_state(sample_id,
GENOME_TYPE_ARRAY,
GenomicWorkflowState.A1)
if member is None:
logging.warning(f'Invalid sample ID: {sample_id}')
continue
member.gemPass = row['success']
member.gemA2ManifestJobRunId = self.job_run_id
member.gemDateOfImport = parse(row['date_of_import'])
_signal = 'a2-gem-pass' if member.gemPass.lower() == 'y' else 'a2-gem-fail'
# update state and state modifed time only if changed
if member.genomicWorkflowState != GenomicStateHandler.get_new_state(
member.genomicWorkflowState, signal=_signal):
member.genomicWorkflowState = GenomicStateHandler.get_new_state(
member.genomicWorkflowState,
signal=_signal)
member.genomicWorkflowStateStr = member.genomicWorkflowState.name
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
self.member_dao.update(member)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_gem_metrics_manifest(self, rows):
"""
Processes the GEM Metrics manifest file data
Updates GenomicSetMember object with metrics fields.
:param rows:
:return: Result Code
"""
try:
for row in rows:
sample_id = row['sample_id']
member = self.member_dao.get_member_from_sample_id_with_state(sample_id,
GENOME_TYPE_ARRAY,
GenomicWorkflowState.GEM_RPT_READY)
if member is None:
logging.warning(f'Invalid sample ID: {sample_id}')
continue
member.gemMetricsAncestryLoopResponse = row['ancestry_loop_response']
member.gemMetricsAvailableResults = row['available_results']
member.gemMetricsResultsReleasedAt = row['results_released_at']
member.colorMetricsJobRunID = self.job_run_id
self.member_dao.update(member)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_aw4_manifest(self, rows):
"""
Processes the AW4 manifest file data
:param rows:
:return:
"""
try:
for row in rows:
row_copy = self._clean_row_keys(row)
sample_id = row_copy['sampleid']
member = self.member_dao.get_member_from_aw3_sample(sample_id)
if member is None:
logging.warning(f'Invalid sample ID: {sample_id}')
continue
member.aw4ManifestJobRunID = self.job_run_id
member.qcStatus = self._get_qc_status_from_value(row_copy['qcstatus'])
member.qcStatusStr = member.qcStatus.name
metrics = self.metrics_dao.get_metrics_by_member_id(member.id)
if metrics:
metrics.drcSexConcordance = row_copy['drcsexconcordance']
if self.job_id == GenomicJob.AW4_ARRAY_WORKFLOW:
metrics.drcCallRate = row_copy['drccallrate']
elif self.job_id == GenomicJob.AW4_WGS_WORKFLOW:
metrics.drcContamination = row_copy['drccontamination']
metrics.drcMeanCoverage = row_copy['drcmeancoverage']
metrics.drcFpConcordance = row_copy['drcfpconcordance']
self.metrics_dao.upsert(metrics)
self.member_dao.update(member)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def ingest_metrics_file_from_filepath(self, metric_type, file_path):
metric_map = {
'user_events': self.user_metrics_dao
}
file_data = self._retrieve_data_from_path(file_path)
if not isinstance(file_data, dict):
return file_data
batch_size, item_count, batch = 100, 0, []
try:
metric_dao = metric_map[metric_type]
except KeyError:
logging.warning(f'Metric type {metric_type} is invalid for this method')
return GenomicSubProcessResult.ERROR
for row in file_data['rows']:
if row.get('participant_id') and 'P' in row.get('participant_id'):
participant_id = row['participant_id'].split('P')[-1]
row['participant_id'] = int(participant_id)
row['file_path'] = file_path
row['created'] = clock.CLOCK.now()
row['modified'] = clock.CLOCK.now()
row['run_id'] = self.controller.job_run.id
row_insert_obj = metric_dao.get_model_obj_from_items(row.items())
batch.append(row_insert_obj)
item_count += 1
if item_count == batch_size:
with metric_dao.session() as session:
# Use session add_all() so we can get the newly created primary key id values back.
session.add_all(batch)
session.commit()
# Batch update PDR resource records.
genomic_user_event_metrics_batch_update([r.id for r in batch])
item_count = 0
batch.clear()
if item_count:
with metric_dao.session() as session:
# Use session add_all() so we can get the newly created primary key id values back.
session.add_all(batch)
session.commit()
# Batch update PDR resource records.
genomic_user_event_metrics_batch_update([r.id for r in batch])
return GenomicSubProcessResult.SUCCESS
@staticmethod
def ingest_appointment_metrics(file_path):
try:
with open_cloud_file(file_path) as json_file:
json_appointment_data = json.load(json_file)
if not json_appointment_data:
logging.warning(f'Appointment metric file {file_path} is empty')
return GenomicSubProcessResult.NO_RESULTS
batch_size, item_count, batch = 100, 0, []
appointment_metric_dao = GenomicAppointmentEventMetricsDao()
for event in json_appointment_data:
event_obj = {}
message_body = event.get('messageBody')
if event.get('participantId'):
participant_id = event.get('participantId')
if 'P' in participant_id:
participant_id = participant_id.split('P')[-1]
event_obj['participant_id'] = int(participant_id)
event_obj['event_authored_time'] = event.get('eventAuthoredTime')
event_obj['event_type'] = event.get('event')
event_obj['module_type'] = message_body.get('module_type')
event_obj['appointment_event'] = json.dumps(event)
event_obj['file_path'] = file_path
event_obj['created'] = clock.CLOCK.now()
event_obj['modified'] = clock.CLOCK.now()
batch.append(event_obj)
item_count += 1
if item_count == batch_size:
appointment_metric_dao.insert_bulk(batch)
item_count = 0
batch.clear()
if item_count:
appointment_metric_dao.insert_bulk(batch)
except ValueError:
logging.warning('Appointment metric file must be valid json')
return GenomicSubProcessResult.ERROR
return GenomicSubProcessResult.SUCCESS
def _retrieve_data_from_path(self, path):
"""
Retrieves the last genomic data file from a bucket
:param path: The source file to ingest
:return: CSV data as a dictionary
"""
try:
filename = path.split('/')[1]
logging.info(
'Opening CSV file from queue {}: {}.'
.format(path.split('/')[1], filename)
)
if self.controller.storage_provider:
with self.controller.storage_provider.open(path, 'r') as csv_file:
return self._read_data_to_ingest(csv_file)
else:
with open_cloud_file(path) as csv_file:
return self._read_data_to_ingest(csv_file)
except FileNotFoundError:
logging.error(f"File path '{path}' not found")
return GenomicSubProcessResult.ERROR
@staticmethod
def _read_data_to_ingest(csv_file):
data_to_ingest = {'rows': []}
csv_reader = csv.DictReader(csv_file, delimiter=",")
data_to_ingest['fieldnames'] = csv_reader.fieldnames
for row in csv_reader:
for key in row.copy():
if not key:
del row[key]
data_to_ingest['rows'].append(row)
return data_to_ingest
def _process_aw1_attribute_data(self, aw1_data, member):
"""
Checks a GenomicSetMember object for changes provided by AW1 data
And mutates the GenomicSetMember object if necessary
:param aw1_data: dict
:param member: GenomicSetMember
:return: (boolean, GenomicSetMember)
"""
# Check if the member needs updating
if self._test_aw1_data_for_member_updates(aw1_data, member):
member = self._set_member_attributes_from_aw1(aw1_data, member)
member = self._set_rdr_member_attributes_for_aw1(aw1_data, member)
return True, member
return False, member
def _test_aw1_data_for_member_updates(self, aw1_data, member):
"""
Checks each attribute provided by Biobank
for changes to GenomicSetMember Object
:param aw1_data: dict
:param member: GenomicSetMember
:return: boolean (true if member requires updating)
"""
gc_manifest_column_mappings = self.get_aw1_manifest_column_mappings()
member_needs_updating = False
# Iterate each value and test whether the strings for each field correspond
for key in gc_manifest_column_mappings.keys():
if str(member.__getattribute__(key)) != str(aw1_data.get(gc_manifest_column_mappings[key])):
member_needs_updating = True
return member_needs_updating
def _set_member_attributes_from_aw1(self, aw1_data, member):
"""
Mutates the GenomicSetMember attributes provided by the Biobank
:param aw1_data: dict
:param member: GenomicSetMember
:return: GenomicSetMember
"""
gc_manifest_column_mappings = self.get_aw1_manifest_column_mappings()
for key in gc_manifest_column_mappings.keys():
member.__setattr__(key, aw1_data.get(gc_manifest_column_mappings[key]))
return member
def _set_rdr_member_attributes_for_aw1(self, aw1_data, member):
"""
Mutates the GenomicSetMember RDR attributes not provided by the Biobank
:param aw1_data: dict
:param member: GenomicSetMember
:return: GenomicSetMember
"""
# Set job run and file processed IDs
member.reconcileGCManifestJobRunId = self.job_run_id
# Don't overwrite aw1_file_processed_id when ingesting an AW1F
if self.job_id == GenomicJob.AW1_MANIFEST:
member.aw1FileProcessedId = self.file_obj.id
# Set the GC site ID (sourced from file-name)
member.gcSiteId = aw1_data['site_id']
# Only update the state if it was AW0 or AW1 (if in failure manifest workflow)
# We do not want to regress a state for reingested data
states_to_update = [GenomicWorkflowState.AW0, GenomicWorkflowState.EXTRACT_REQUESTED]
if self.controller.job_id == GenomicJob.AW1F_MANIFEST:
states_to_update = [GenomicWorkflowState.AW1]
if member.genomicWorkflowState in states_to_update:
_signal = "aw1-reconciled"
# Set the signal for a failed sample
if aw1_data['failuremode'] is not None and aw1_data['failuremode'] != '':
_signal = 'aw1-failed'
member.genomicWorkflowState = GenomicStateHandler.get_new_state(
member.genomicWorkflowState,
signal=_signal)
member.genomicWorkflowStateStr = member.genomicWorkflowState.name
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
return member
def _set_raw_awn_attributes(self, row_data, model_columns):
"""
Builds dict from row_data and model_columns
:param row_data: dict
:param model_columns: Current obj model attribute keys
:return: dict object
"""
row_obj = {}
row = self._clean_row_keys(row_data)
if self.controller.job_id in [
GenomicJob.LOAD_AW1_TO_RAW_TABLE,
GenomicJob.LOAD_CVL_W3SS_TO_RAW_TABLE
]:
# adjusting for biobank fieldnames
row = dict(zip([re.sub(r'\([^)]*\)', '', key)for key in row], row.values()))
row = dict(zip([key.replace('/', '') for key in row], row.values()))
genome_type = row.get('genometype', "")
if not genome_type and row.get('sampleid'):
member = self.member_dao.get_member_from_sample_id(row.get('sampleid'))
genome_type = member.genomeType if member else ""
row_obj['genome_type'] = genome_type
row_obj['test_name'] = genome_type
for column in model_columns:
clean_column = self._clean_row_keys(column)
row_value = row.get(clean_column)
if row_value or row_value == "":
row_obj[column] = row_value[0:512]
row_obj['file_path'] = self.target_file
row_obj['created'] = clock.CLOCK.now()
row_obj['modified'] = clock.CLOCK.now()
return row_obj
def _process_gc_metrics_data_for_insert(self, rows):
""" Since input files vary in column names,
this standardizes the field-names before passing to the bulk inserter
:param rows:
:return result code
"""
members_to_update = []
for row in rows:
# change all key names to lower
row_copy = self._clean_row_keys(row)
if row_copy['genometype'] in (GENOME_TYPE_ARRAY, GENOME_TYPE_ARRAY_INVESTIGATION):
row_copy = self._set_metrics_array_data_file_paths(row_copy)
elif row_copy['genometype'] in (GENOME_TYPE_WGS, GENOME_TYPE_WGS_INVESTIGATION):
row_copy = self._set_metrics_wgs_data_file_paths(row_copy)
member = self.member_dao.get_member_from_sample_id(
int(row_copy['sampleid']),
)
if not member:
bid = row_copy['biobankid']
if bid[0] in [get_biobank_id_prefix(), 'T']:
bid = bid[1:]
# Couldn't find genomic set member based on either biobank ID or sample ID
_message = f"{self.job_id.name}: Cannot find genomic set member for bid, sample_id: " \
f"{row_copy['biobankid']}, {row_copy['sampleid']}"
self.controller.create_incident(source_job_run_id=self.job_run_id,
source_file_processed_id=self.file_obj.id,
code=GenomicIncidentCode.UNABLE_TO_FIND_MEMBER.name,
message=_message,
biobank_id=bid,
sample_id=row_copy['sampleid'],
)
continue
row_copy = self.prep_aw2_row_attributes(row_copy, member)
if row_copy == GenomicSubProcessResult.ERROR:
continue
# check whether metrics object exists for that member
existing_metrics_obj = self.metrics_dao.get_metrics_by_member_id(member.id)
metric_id = None
if existing_metrics_obj:
if self.controller.skip_updates:
# when running tool, updates can be skipped
continue
else:
metric_id = existing_metrics_obj.id
else:
if member.genomeType in [GENOME_TYPE_ARRAY, GENOME_TYPE_WGS]:
if row_copy['contamination_category'] in [GenomicContaminationCategory.EXTRACT_WGS,
GenomicContaminationCategory.EXTRACT_BOTH]:
# Insert a new member
self.insert_member_for_replating(member, row_copy['contamination_category'])
# For feedback manifest loop
# Get the genomic_manifest_file
manifest_file = self.file_processed_dao.get(member.aw1FileProcessedId)
if manifest_file is not None and existing_metrics_obj is None:
self.feedback_dao.increment_feedback_count(manifest_file.genomicManifestFileId)
self.update_member_for_aw2(member)
# set lists of members to update workflow state
member_dict = {
'id': member.id
}
if row_copy['genometype'] == GENOME_TYPE_ARRAY:
member_dict['genomicWorkflowState'] = int(GenomicWorkflowState.GEM_READY)
member_dict['genomicWorkflowStateStr'] = str(GenomicWorkflowState.GEM_READY)
member_dict['genomicWorkflowStateModifiedTime'] = clock.CLOCK.now()
elif row_copy['genometype'] == GENOME_TYPE_WGS:
member_dict['genomicWorkflowState'] = int(GenomicWorkflowState.CVL_READY)
member_dict['genomicWorkflowStateStr'] = str(GenomicWorkflowState.CVL_READY)
member_dict['genomicWorkflowStateModifiedTime'] = clock.CLOCK.now()
members_to_update.append(member_dict)
# upsert metrics record via cloud task
row_copy['contamination_category'] = int(row_copy['contamination_category'])
self.controller.execute_cloud_task({
'metric_id': metric_id,
'payload_dict': row_copy,
}, 'genomic_gc_metrics_upsert')
if members_to_update:
self.member_dao.bulk_update(members_to_update)
return GenomicSubProcessResult.SUCCESS
def copy_member_for_replating(
self,
member,
genome_type=None,
set_id=None,
block_research_reason=None,
block_results_reason=None
):
"""
Inserts a new member record for replating.
:param member: GenomicSetMember
:param genome_type:
:param set_id:
:param block_research_reason:
:param block_results_reason:
:return:
"""
new_member = GenomicSetMember(
biobankId=member.biobankId,
genomicSetId=set_id if set_id else member.genomicSetId,
participantId=member.participantId,
nyFlag=member.nyFlag,
sexAtBirth=member.sexAtBirth,
validationStatus=member.validationStatus,
validationFlags=member.validationFlags,
ai_an=member.ai_an,
genomeType=genome_type if genome_type else member.genomeType,
collectionTubeId=f'replated_{member.id}',
genomicWorkflowState=GenomicWorkflowState.EXTRACT_REQUESTED,
replatedMemberId=member.id,
participantOrigin=member.participantOrigin,
blockResearch=1 if block_research_reason else 0,
blockResearchReason=block_research_reason if block_research_reason else None,
blockResults=1 if block_results_reason else 0,
blockResultsReason=block_results_reason if block_results_reason else None
)
self.member_dao.insert(new_member)
def insert_member_for_replating(self, member, category):
"""
Inserts a new member record for replating.
:param member: GenomicSetMember
:param category: GenomicContaminationCategory
:return:
"""
new_member_wgs = GenomicSetMember(
biobankId=member.biobankId,
genomicSetId=member.genomicSetId,
participantId=member.participantId,
nyFlag=member.nyFlag,
sexAtBirth=member.sexAtBirth,
validationStatus=member.validationStatus,
validationFlags=member.validationFlags,
collectionTubeId=f'replated_{member.id}',
ai_an=member.ai_an,
genomeType=GENOME_TYPE_WGS,
genomicWorkflowState=GenomicWorkflowState.EXTRACT_REQUESTED,
genomicWorkflowStateStr=GenomicWorkflowState.EXTRACT_REQUESTED.name,
participantOrigin=member.participantOrigin,
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
replatedMemberId=member.id,
)
if category == GenomicContaminationCategory.EXTRACT_BOTH:
new_member_array = deepcopy(new_member_wgs)
new_member_array.genomeType = GENOME_TYPE_ARRAY
self.member_dao.insert(new_member_array)
self.member_dao.insert(new_member_wgs)
@staticmethod
def get_result_module(module_str):
results_attr_mapping = {
'hdrv1': ResultsModuleType.HDRV1,
'pgxv1': ResultsModuleType.PGXV1,
}
return results_attr_mapping.get(module_str)
def _base_cvl_ingestion(self, **kwargs):
row_copy = self._clean_row_keys(kwargs.get('row'))
biobank_id = row_copy.get('biobankid')
sample_id = row_copy.get('sampleid')
if not (biobank_id and sample_id):
return row_copy, None
biobank_id = self._clean_alpha_values(biobank_id)
member = self.member_dao.get_member_from_biobank_id_and_sample_id(
biobank_id,
sample_id
)
if not member:
logging.warning(f'Can not find genomic member record for biobank_id: '
f'{biobank_id} and sample_id: {sample_id}, skipping...')
return row_copy, None
setattr(member, kwargs.get('run_attr'), self.job_run_id)
self.member_dao.update(member)
# result workflow state
if kwargs.get('result_state') and kwargs.get('module_type'):
self.results_workflow_dao.insert_new_result_record(
member_id=member.id,
module_type=kwargs.get('module_type'),
state=kwargs.get('result_state')
)
return row_copy, member
def _base_cvl_analysis_ingestion(self, row_copy, member):
# cvl analysis
analysis_cols_mapping = {}
for column in self.analysis_cols:
col_matched = row_copy.get(self._clean_row_keys(column))
if col_matched:
analysis_cols_mapping[column] = self._clean_row_keys(column)
analysis_obj = self.cvl_analysis_dao.model_type()
setattr(analysis_obj, 'genomic_set_member_id', member.id)
for key, val in analysis_cols_mapping.items():
setattr(analysis_obj, key, row_copy[val])
self.cvl_analysis_dao.insert(analysis_obj)
def _ingest_cvl_w2sc_manifest(self, rows):
"""
Processes the CVL W2SC manifest file data
:param rows:
:return: Result Code
"""
try:
for row in rows:
self._base_cvl_ingestion(
row=row,
run_attr='cvlW2scManifestJobRunID',
result_state=ResultsWorkflowState.CVL_W2SC,
module_type=ResultsModuleType.HDRV1
)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_cvl_w3ns_manifest(self, rows):
"""
Processes the CVL W3NS manifest file data
:param rows:
:return: Result Code
"""
try:
for row in rows:
self._base_cvl_ingestion(
row=row,
run_attr='cvlW3nsManifestJobRunID',
result_state=ResultsWorkflowState.CVL_W3NS,
module_type=ResultsModuleType.HDRV1
)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_cvl_w3sc_manifest(self, rows):
"""
Processes the CVL W3SC manifest file data
:param rows:
:return: Result Code
"""
try:
for row in rows:
row_copy, member = self._base_cvl_ingestion(
row=row,
run_attr='cvlW3scManifestJobRunID',
result_state=ResultsWorkflowState.CVL_W3SC,
module_type=ResultsModuleType.HDRV1
)
if not (row_copy and member):
continue
member.cvlSecondaryConfFailure = row_copy['cvlsecondaryconffailure']
# allows for sample to be resent in subsequent W3SR
# https://docs.google.com/presentation/d/1QqXCzwz6MGLMhNwuXlV6ieoMLaJYuYai8csxagF_2-E/edit#slide=id.g10f369a487f_0_0
member.cvlW3srManifestJobRunID = None
self.member_dao.update(member)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_cvl_w3ss_manifest(self, rows):
"""
Processes the CVL W3SS manifest file data
:param rows:
:return: Result Code
"""
self.cvl_second_sample_dao = GenomicCVLSecondSampleDao()
sample_cols = self.cvl_second_sample_dao.model_type.__table__.columns.keys()
try:
for row in rows:
row_copy, member = self._base_cvl_ingestion(
row=row,
run_attr='cvlW3ssManifestJobRunID',
result_state=ResultsWorkflowState.CVL_W3SS,
module_type=ResultsModuleType.HDRV1
)
if not (row_copy and member):
continue
row_copy = dict(zip([key.replace('/', '').split('(')[0] for key in row_copy],
row_copy.values()))
# cvl second sample
second_sample_obj = self.cvl_second_sample_dao.model_type()
setattr(second_sample_obj, 'genomic_set_member_id', member.id)
for col in sample_cols:
cleaned_col = self._clean_row_keys(col)
col_value = row_copy.get(cleaned_col)
if col_value:
setattr(second_sample_obj, col, col_value)
self.cvl_second_sample_dao.insert(second_sample_obj)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_cvl_w4wr_manifest(self, rows):
"""
Processes the CVL W4WR manifest file data
:param rows:
:return: Result Code
"""
run_attr_mapping = {
'hdrv1': 'cvlW4wrHdrManifestJobRunID',
'pgxv1': 'cvlW4wrPgxManifestJobRunID'
}
run_id, module = None, None
for result_key in run_attr_mapping.keys():
if result_key in self.file_obj.fileName.lower():
run_id = run_attr_mapping[result_key]
module = self.get_result_module(result_key)
break
try:
for row in rows:
row_copy, member = self._base_cvl_ingestion(
row=row,
run_attr=run_id,
result_state=ResultsWorkflowState.CVL_W4WR,
module_type=module
)
if not (row_copy and member):
continue
self._base_cvl_analysis_ingestion(row_copy, member)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_cvl_w5nf_manifest(self, rows):
run_attr_mapping = {
'hdrv1': 'cvlW5nfHdrManifestJobRunID',
'pgxv1': 'cvlW5nfPgxManifestJobRunID'
}
run_id, module = None, None
for result_key in run_attr_mapping.keys():
if result_key in self.file_obj.fileName.lower():
run_id = run_attr_mapping[result_key]
module = self.get_result_module(result_key)
break
try:
for row in rows:
row_copy, member = self._base_cvl_ingestion(
row=row,
run_attr=run_id,
result_state=ResultsWorkflowState.CVL_W5NF,
module_type=module,
)
if not (row_copy and member):
continue
current_analysis = self.cvl_analysis_dao.get_passed_analysis_member_module(
member.id,
module
)
# should have initial record
if current_analysis:
current_analysis.failed = 1
current_analysis.failed_request_reason = row_copy['requestreason']
current_analysis.failed_request_reason_free = row_copy['requestreasonfree'][0:512]
self.cvl_analysis_dao.update(current_analysis)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_aw5_manifest(self, rows):
try:
for row in rows:
row_copy = self._clean_row_keys(row)
biobank_id = row_copy['biobankid']
biobank_id = self._clean_alpha_values(biobank_id)
sample_id = row_copy['sampleid']
member = self.member_dao.get_member_from_biobank_id_and_sample_id(biobank_id, sample_id)
if not member:
logging.warning(f'Can not find genomic member record for biobank_id: '
f'{biobank_id} and sample_id: {sample_id}, skipping...')
continue
existing_metrics_obj = self.metrics_dao.get_metrics_by_member_id(member.id)
if existing_metrics_obj is not None:
metric_id = existing_metrics_obj.id
else:
logging.warning(f'Can not find metrics record for member id: '
f'{member.id}, skipping...')
continue
self.metrics_dao.update_gc_validation_metrics_deleted_flags_from_dict(row_copy, metric_id)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_aw1c_manifest(self, rows):
"""
Processes the CVL AW1C manifest file data
:param rows:
:return: Result Code
"""
try:
for row in rows:
row_copy = self._clean_row_keys(row)
collection_tube_id = row_copy['collectiontubeid']
member = self.member_dao.get_member_from_collection_tube(collection_tube_id, GENOME_TYPE_WGS)
if member is None:
# Currently ignoring invalid cases
logging.warning(f'Invalid collection tube ID: {collection_tube_id}')
continue
# Update the AW1C job run ID and genome_type
member.cvlAW1CManifestJobRunID = self.job_run_id
member.genomeType = row_copy['genometype']
# Handle genomic state
_signal = "aw1c-reconciled"
if row_copy['failuremode'] not in (None, ''):
member.gcManifestFailureMode = row_copy['failuremode']
member.gcManifestFailureDescription = row_copy['failuremodedesc']
_signal = 'aw1c-failed'
# update state and state modifed time only if changed
if member.genomicWorkflowState != GenomicStateHandler.get_new_state(
member.genomicWorkflowState, signal=_signal):
member.genomicWorkflowState = GenomicStateHandler.get_new_state(
member.genomicWorkflowState,
signal=_signal)
member.genomicWorkflowStateStr = member.genomicWorkflowState.name
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
self.member_dao.update(member)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _get_site_from_aw1(self):
"""
Returns the Genomic Center's site ID from the AW1 filename
:return: GC site ID string
"""
return self.file_obj.fileName.split('/')[-1].split("_")[0].lower()
def _validate_collection_tube_id(self, collection_tube_id, bid):
"""
Returns true if biobank_ID is associated to biobank_stored_sample_id
(collection_tube_id)
:param collection_tube_id:
:param bid:
:return: boolean
"""
sample = self.sample_dao.get(collection_tube_id)
if sample:
return int(sample.biobankId) == int(bid)
return False
@staticmethod
def _get_qc_status_from_value(aw4_value):
"""
Returns the GenomicQcStatus enum value for
:param aw4_value: string from AW4 file (PASS/FAIL)
:return: GenomicQcStatus
"""
if aw4_value.strip().lower() == 'pass':
return GenomicQcStatus.PASS
elif aw4_value.strip().lower() == 'fail':
return GenomicQcStatus.FAIL
else:
logging.warning(f'Value from AW4 "{aw4_value}" is not PASS/FAIL.')
return GenomicQcStatus.UNSET
def create_new_member_from_aw1_control_sample(self, aw1_data: dict) -> GenomicSetMember:
"""
Creates a new control sample GenomicSetMember in RDR based on AW1 data
These will look like regular GenomicSetMember samples
:param aw1_data: dict from aw1 row
:return: GenomicSetMember
"""
# Writing new genomic_set_member based on AW1 data
max_set_id = self.member_dao.get_collection_tube_max_set_id()[0]
# Insert new member with biobank_id and collection tube ID from AW1
new_member_obj = GenomicSetMember(
genomicSetId=max_set_id,
participantId=0,
biobankId=aw1_data['biobankid'],
collectionTubeId=aw1_data['collectiontubeid'],
validationStatus=GenomicSetMemberStatus.VALID,
genomeType=aw1_data['genometype'],
genomicWorkflowState=GenomicWorkflowState.AW1,
genomicWorkflowStateStr=GenomicWorkflowState.AW1.name
)
# Set member attribures from AW1
new_member_obj = self._set_member_attributes_from_aw1(aw1_data, new_member_obj)
new_member_obj = self._set_rdr_member_attributes_for_aw1(aw1_data, new_member_obj)
return self.member_dao.insert(new_member_obj)
@staticmethod
def _participant_has_potentially_clean_samples(session, biobank_id):
"""Check for any stored sample for the participant that is not contaminated
and is a 1ED04, 1ED10, or 1SAL2 test"""
query = session.query(BiobankStoredSample).filter(
BiobankStoredSample.biobankId == biobank_id,
BiobankStoredSample.status < SampleStatus.SAMPLE_NOT_RECEIVED
).outerjoin(GenomicSampleContamination).filter(
GenomicSampleContamination.id.is_(None),
BiobankStoredSample.test.in_(['1ED04', '1ED10', '1SAL2'])
)
exists_query = session.query(query.exists())
return exists_query.scalar()
def _record_sample_as_contaminated(self, session, sample_id):
session.add(GenomicSampleContamination(
sampleId=sample_id,
failedInJob=self.job_id
))
def calculate_contamination_category(self, sample_id, raw_contamination, member: GenomicSetMember):
"""
Takes contamination value from AW2 and calculates GenomicContaminationCategory
:param sample_id:
:param raw_contamination:
:param member:
:return: GenomicContaminationCategory
"""
ps_dao = ParticipantSummaryDao()
ps = ps_dao.get(member.participantId)
contamination_category = GenomicContaminationCategory.UNSET
# No Extract if contamination <1%
if raw_contamination < 0.01:
contamination_category = GenomicContaminationCategory.NO_EXTRACT
# Only extract WGS if contamination between 1 and 3 % inclusive AND ROR
elif (0.01 <= raw_contamination <= 0.03) and ps.consentForGenomicsROR == QuestionnaireStatus.SUBMITTED:
contamination_category = GenomicContaminationCategory.EXTRACT_WGS
# No Extract if contamination between 1 and 3 % inclusive and GROR is not Yes
elif (0.01 <= raw_contamination <= 0.03) and ps.consentForGenomicsROR != QuestionnaireStatus.SUBMITTED:
contamination_category = GenomicContaminationCategory.NO_EXTRACT
# Extract Both if contamination > 3%
elif raw_contamination > 0.03:
contamination_category = GenomicContaminationCategory.EXTRACT_BOTH
with ps_dao.session() as session:
if raw_contamination >= 0.01:
# Record in the contamination table, regardless of GROR consent
self._record_sample_as_contaminated(session, sample_id)
if contamination_category != GenomicContaminationCategory.NO_EXTRACT and \
not self._participant_has_potentially_clean_samples(session, member.biobankId):
contamination_category = GenomicContaminationCategory.TERMINAL_NO_EXTRACT
return contamination_category
def _set_metrics_array_data_file_paths(self, row: dict) -> dict:
gc_site_bucket_map = config.getSettingJson(config.GENOMIC_GC_SITE_BUCKET_MAP, {})
site_id = self.file_obj.fileName.split('_')[0].lower()
gc_bucket_name = gc_site_bucket_map.get(site_id)
gc_bucket = config.getSetting(gc_bucket_name, None)
if not gc_bucket:
return row
for file_def in array_file_types_attributes:
if file_def['required']:
if 'idat' in file_def["file_type"]:
file_path = f'gs://{gc_bucket}/Genotyping_sample_raw_data/{row["chipwellbarcode"]}' + \
f'_{file_def["file_type"]}'
else:
file_path = f'gs://{gc_bucket}/Genotyping_sample_raw_data/{row["chipwellbarcode"]}.' + \
f'{file_def["file_type"]}'
row[file_def['file_path_attribute']] = file_path
return row
def _set_metrics_wgs_data_file_paths(self, row: dict) -> dict:
gc_site_bucket_map = config.getSettingJson(config.GENOMIC_GC_SITE_BUCKET_MAP, {})
site_id = self.file_obj.fileName.split('_')[0].lower()
gc_bucket_name = gc_site_bucket_map.get(site_id)
gc_bucket = config.getSetting(gc_bucket_name, None)
if not gc_bucket:
return row
for file_def in wgs_file_types_attributes:
if file_def['required']:
file_path = f'gs://{gc_bucket}/{genome_center_datafile_prefix_map[site_id][file_def["file_type"]]}/' + \
f'{site_id.upper()}_{row["biobankid"]}_{row["sampleid"]}_{row["limsid"]}_1.' + \
f'{file_def["file_type"]}'
row[file_def['file_path_attribute']] = file_path
return row
def _update_member_state_after_aw2(self, member: GenomicSetMember):
if member.genomeType == 'aou_array':
ready_signal = 'gem-ready'
elif member.genomeType == 'aou_wgs':
ready_signal = 'cvl-ready'
else:
# Don't update state for investigation genome types
return
next_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState, signal=ready_signal)
if next_state and next_state != member.genomicWorkflowState:
self.member_dao.update_member_workflow_state(member, next_state)
class GenomicFileValidator:
"""
This class validates the Genomic Centers files
"""
GENOME_TYPE_MAPPINGS = {
'gen': GENOME_TYPE_ARRAY,
'seq': GENOME_TYPE_WGS,
}
def __init__(self, filename=None, data=None, schema=None, job_id=None, controller=None):
self.filename = filename
self.data_to_validate = data
self.valid_schema = schema
self.job_id = job_id
self.genome_type = None
self.controller = controller
self.gc_site_id = None
self.GC_METRICS_SCHEMAS = {
GENOME_TYPE_WGS: (
"biobankid",
"sampleid",
"biobankidsampleid",
"limsid",
"meancoverage",
"genomecoverage",
"aouhdrcoverage",
"contamination",
'samplesource',
'mappedreadspct',
"sexconcordance",
"sexploidy",
"alignedq30bases",
"arrayconcordance",
"processingstatus",
"notes",
"genometype"
),
GENOME_TYPE_ARRAY: (
"biobankid",
"sampleid",
"biobankidsampleid",
"limsid",
"chipwellbarcode",
"callrate",
"sexconcordance",
"contamination",
'samplesource',
"processingstatus",
"notes",
"pipelineid",
"genometype"
),
}
self.VALID_CVL_FACILITIES = ('rdr', 'co', 'uw', 'bcm')
self.CVL_ANALYSIS_TYPES = ('hdrv1', 'pgxv1')
self.VALID_GENOME_CENTERS = ('uw', 'bam', 'bcm', 'bi', 'jh', 'rdr')
self.DRC_BROAD = 'drc_broad'
self.AW1_MANIFEST_SCHEMA = (
"packageid",
"biobankidsampleid",
"boxstorageunitid",
"boxid/plateid",
"wellposition",
"sampleid",
"parentsampleid",
"collectiontubeid",
"matrixid",
"collectiondate",
"biobankid",
"sexatbirth",
"age",
"nystate(y/n)",
"sampletype",
"treatments",
"quantity(ul)",
"totalconcentration(ng/ul)",
"totaldna(ng)",
"visitdescription",
"samplesource",
"study",
"trackingnumber",
"contact",
"email",
"studypi",
"sitename",
"genometype",
"failuremode",
"failuremodedesc"
)
self.GEM_A2_SCHEMA = (
"biobankid",
"sampleid",
"success",
"dateofimport",
)
self.GEM_METRICS_SCHEMA = (
"biobankid",
"sampleid",
"ancestryloopresponse",
"availableresults",
"resultsreleasedat",
)
self.CVL_W2_SCHEMA = (
"genomicsetname",
"biobankid",
"sexatbirth",
"nyflag",
"siteid",
"secondaryvalidation",
"datesubmitted",
"testname",
)
self.CVL_W2SC_SCHEMA = (
"biobankid",
"sampleid",
)
self.CVL_W3NS_SCHEMA = (
"biobankid",
"sampleid",
"unavailablereason"
)
self.CVL_W3SC_SCHEMA = (
"biobankid",
"sampleid",
"cvlsecondaryconffailure"
)
self.CVL_W3SS_SCHEMA = (
"biobankid",
"sampleid",
"packageid",
"version",
"boxstorageunitid",
"boxid/plateid",
"wellposition",
"cvlsampleid",
"parentsampleid",
"collectiontubeid",
"matrixid",
"collectiondate",
"sexatbirth",
"age",
"nystate(y/n)",
"sampletype",
"treatments",
"quantity(ul)",
"totalconcentration(ng/ul)",
"totaldna(ng)",
"visitdescription",
"samplesource",
"study",
"trackingnumber",
"contact",
"email",
"studypi",
"sitename",
"genometype",
"failuremode",
"failuremodedesc"
)
self.CVL_W4WR_SCHEMA = (
"biobankid",
"sampleid",
"healthrelateddatafilename",
"clinicalanalysistype"
)
self.CVL_W5NF_SCHEMA = (
"biobankid",
"sampleid",
"requestreason",
"requestreasonfree",
"healthrelateddatafilename",
"clinicalanalysistype"
)
self.AW4_ARRAY_SCHEMA = (
"biobankid",
"sampleid",
"sexatbirth",
"siteid",
"redidatpath",
"redidatmd5path",
"greenidatpath",
"greenidatmd5path",
"vcfpath",
"vcfindexpath",
"researchid",
"qcstatus",
"drcsexconcordance",
"drccallrate",
"passtoresearchpipeline"
)
self.AW4_WGS_SCHEMA = (
"biobankid",
"sampleid",
"sexatbirth",
"siteid",
"vcfhfpath",
"vcfhfmd5path",
"vcfhfindexpath",
"vcfrawpath",
"vcfrawmd5path",
"vcfrawindexpath",
"crampath",
"crammd5path",
"craipath",
"gvcfpath",
"gvcfmd5path",
"researchid",
"qcstatus",
"drcsexconcordance",
"drccontamination",
"drcmeancoverage",
"drcfpconcordance",
"passtoresearchpipeline"
)
self.AW5_WGS_SCHEMA = {
"biobankid",
"sampleid",
"biobankidsampleid",
"sexatbirth",
"siteid",
"vcfhf",
"vcfhfindex",
"vcfhfmd5",
"vcfhfbasename",
"vcfhfmd5hash",
"vcfraw",
"vcfrawindex",
"vcfrawmd5",
"vcfrawbasename",
"vcfrawmd5hash",
"cram",
"crammd5",
"crai",
"crambasename",
"crammd5hash",
"gvcf",
"gvcfmd5",
"gvcfbasename",
"gvcfmd5hash",
}
self.AW5_ARRAY_SCHEMA = {
"biobankid",
"sampleid",
"biobankidsampleid",
"sexatbirth",
"siteid",
"redidat",
"redidatmd5",
"redidatbasename",
"redidatmd5hash",
"greenidat",
"greenidatbasename",
"greenidatmd5hash",
"greenidatmd5",
"vcf",
"vcfindex",
"vcfmd5",
"vcfbasename",
"vcfmd5hash",
}
self.values_for_validation = {
GenomicJob.METRICS_INGESTION: {
GENOME_TYPE_ARRAY: {
'pipelineid': ['cidr_egt_1', 'original_egt']
},
},
}
def set_genome_type(self):
if self.job_id in [GenomicJob.METRICS_INGESTION] and self.filename:
file_type = self.filename.lower().split("_")[2]
self.genome_type = self.GENOME_TYPE_MAPPINGS[file_type]
def set_gc_site_id(self, fn_component):
if fn_component and \
fn_component.lower() in self.VALID_GENOME_CENTERS and \
self.job_id in [
GenomicJob.METRICS_INGESTION,
GenomicJob.AW1_MANIFEST,
GenomicJob.AW1C_INGEST,
GenomicJob.AW1CF_INGEST,
GenomicJob.AW1F_MANIFEST
]:
self.gc_site_id = fn_component
elif self.job_id in [
GenomicJob.AW4_ARRAY_WORKFLOW,
GenomicJob.AW4_WGS_WORKFLOW,
GenomicJob.AW5_ARRAY_MANIFEST,
GenomicJob.AW5_WGS_MANIFEST
]:
self.gc_site_id = self.DRC_BROAD
def validate_ingestion_file(self, *, filename, data_to_validate):
"""
Procedure to validate an ingestion file
:param filename:
:param data_to_validate:
:return: result code
"""
self.filename = filename
self.set_genome_type()
file_processed = self.controller. \
file_processed_dao.get_record_from_filename(filename)
# validates filenames for each job
validated_filename = self.validate_filename(filename)
if not validated_filename:
self.controller.create_incident(
source_job_run_id=self.controller.job_run.id,
source_file_processed_id=file_processed.id,
code=GenomicIncidentCode.FILE_VALIDATION_INVALID_FILE_NAME.name,
message=f"{self.job_id.name}: File name {filename.split('/')[1]} has failed validation due to an"
f"incorrect file name.",
slack=True,
submitted_gc_site_id=self.gc_site_id,
manifest_file_name=self.filename
)
return GenomicSubProcessResult.INVALID_FILE_NAME
# validates values in fields if specified for job
values_validation_failed, message = self.validate_values(data_to_validate)
if values_validation_failed:
self.controller.create_incident(
source_job_run_id=self.controller.job_run.id,
source_file_processed_id=file_processed.id,
code=GenomicIncidentCode.FILE_VALIDATION_FAILED_VALUES.name,
message=message,
slack=True,
submitted_gc_site_id=self.gc_site_id,
manifest_file_name=self.filename
)
return GenomicSubProcessResult.ERROR
# validates file structure rules
struct_valid_result, missing_fields, extra_fields, expected = self._check_file_structure_valid(
data_to_validate['fieldnames'])
if not struct_valid_result:
slack = True
invalid_message = f"{self.job_id.name}: File structure of {filename} is not valid."
if extra_fields:
invalid_message += f" Extra fields: {', '.join(extra_fields)}"
if missing_fields:
invalid_message += f" Missing fields: {', '.join(missing_fields)}"
if len(missing_fields) == len(expected):
slack = False
self.controller.create_incident(
source_job_run_id=self.controller.job_run.id,
source_file_processed_id=file_processed.id,
code=GenomicIncidentCode.FILE_VALIDATION_FAILED_STRUCTURE.name,
message=invalid_message,
slack=slack,
submitted_gc_site_id=self.gc_site_id,
manifest_file_name=self.filename
)
return GenomicSubProcessResult.INVALID_FILE_STRUCTURE
return GenomicSubProcessResult.SUCCESS
def validate_filename(self, filename):
"""
Applies a naming rule to an arbitrary filename
Naming rules are defined as local functions and
Mapped to a Genomic Job ID in naming_rules dict.
:param filename: passed to each name rule as 'fn'
:return: boolean
"""
filename_components = [x.lower() for x in filename.split('/')[-1].split("_")]
self.set_gc_site_id(filename_components[0])
# Naming Rule Definitions
def gc_validation_metrics_name_rule():
"""GC metrics file name rule"""
return (
filename_components[0] in self.VALID_GENOME_CENTERS and
filename_components[1] == 'aou' and
filename_components[2] in ('seq', 'gen') and
filename.lower().endswith('csv')
)
def bb_to_gc_manifest_name_rule():
"""Biobank to GCs manifest name rule"""
return (
filename_components[0] in self.VALID_GENOME_CENTERS and
filename_components[1] == 'aou' and
filename_components[2] in ('seq', 'gen') and
filename.lower().endswith('csv')
)
def aw1f_manifest_name_rule():
"""Biobank to GCs Failure (AW1F) manifest name rule"""
return (
len(filename_components) == 5 and
filename_components[0] in self.VALID_GENOME_CENTERS and
filename_components[1] == 'aou' and
filename_components[2] in ('seq', 'gen') and
re.search(r"pkg-[0-9]{4}-[0-9]{5,}$",
filename_components[3]) is not None and
filename_components[4] == 'failure.csv' and
filename.lower().endswith('csv')
)
def cvl_w2sc_manifest_name_rule():
"""
CVL W2SC (secondary confirmation) manifest name rule
"""
return (
len(filename_components) == 5 and
filename_components[0] in self.VALID_CVL_FACILITIES and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
filename_components[3] == 'w2sc' and
filename.lower().endswith('csv')
)
def cvl_w3ns_manifest_name_rule():
"""
CVL W3NS manifest name rule
"""
return (
len(filename_components) == 5 and
filename_components[0] in self.VALID_CVL_FACILITIES and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
filename_components[3] == 'w3ns' and
filename.lower().endswith('csv')
)
def cvl_w3sc_manifest_name_rule():
"""
CVL W3SC manifest name rule
"""
return (
len(filename_components) == 5 and
filename_components[0] in self.VALID_CVL_FACILITIES and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
filename_components[3] == 'w3sc' and
filename.lower().endswith('csv')
)
def cvl_w3ss_manifest_name_rule():
"""
CVL W3SS manifest name rule
"""
return (
len(filename_components) == 4 and
filename_components[0] in self.VALID_CVL_FACILITIES and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
'pkg' in filename_components[3] and
filename.lower().endswith('csv')
)
def cvl_w4wr_manifest_name_rule():
"""
CVL W4WR manifest name rule
"""
return (
len(filename_components) == 6 and
filename_components[0] in self.VALID_CVL_FACILITIES and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
filename_components[3] == 'w4wr' and
filename_components[4] in
[k.lower() for k in ResultsModuleType.to_dict().keys()]
and filename.lower().endswith('csv')
)
def cvl_w5nf_manifest_name_rule():
"""
CVL W5NF manifest name rule
"""
return (
len(filename_components) == 7 and
filename_components[0] in self.VALID_CVL_FACILITIES and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
filename_components[3] == 'w5nf' and
filename_components[4] in
[k.lower() for k in ResultsModuleType.to_dict().keys()]
and filename.lower().endswith('csv')
)
def gem_a2_manifest_name_rule():
"""GEM A2 manifest name rule: i.e. AoU_GEM_A2_manifest_2020-07-11-00-00-00.csv"""
return (
len(filename_components) == 5 and
filename_components[0] == 'aou' and
filename_components[1] == 'gem' and
filename_components[2] == 'a2' and
filename.lower().endswith('csv')
)
def gem_metrics_name_rule():
"""GEM Metrics name rule: i.e. AoU_GEM_metrics_aggregate_2020-07-11-00-00-00.csv"""
return (
filename_components[0] == 'aou' and
filename_components[1] == 'gem' and
filename_components[2] == 'metrics' and
filename.lower().endswith('csv')
)
def aw4_arr_manifest_name_rule():
"""DRC Broad AW4 Array manifest name rule: i.e. AoU_DRCB_GEN_2020-07-11-00-00-00.csv"""
return (
filename_components[0] == 'aou' and
filename_components[1] == 'drcb' and
filename_components[2] == 'gen' and
filename.lower().endswith('csv')
)
def aw4_wgs_manifest_name_rule():
"""DRC Broad AW4 WGS manifest name rule: i.e. AoU_DRCB_SEQ_2020-07-11-00-00-00.csv"""
return (
filename_components[0] == 'aou' and
filename_components[1] == 'drcb' and
filename_components[2] == 'seq' and
filename.lower().endswith('csv')
)
def aw5_wgs_manifest_name_rule():
# don't have name convention right now, if have in the future, add here
return filename.lower().endswith('csv')
def aw5_array_manifest_name_rule():
# don't have name convention right now, if have in the future, add here
return filename.lower().endswith('csv')
ingestion_name_rules = {
GenomicJob.METRICS_INGESTION: gc_validation_metrics_name_rule,
GenomicJob.AW1_MANIFEST: bb_to_gc_manifest_name_rule,
GenomicJob.AW1F_MANIFEST: aw1f_manifest_name_rule,
GenomicJob.GEM_A2_MANIFEST: gem_a2_manifest_name_rule,
GenomicJob.AW4_ARRAY_WORKFLOW: aw4_arr_manifest_name_rule,
GenomicJob.AW4_WGS_WORKFLOW: aw4_wgs_manifest_name_rule,
GenomicJob.GEM_METRICS_INGEST: gem_metrics_name_rule,
GenomicJob.AW5_WGS_MANIFEST: aw5_wgs_manifest_name_rule,
GenomicJob.AW5_ARRAY_MANIFEST: aw5_array_manifest_name_rule,
GenomicJob.CVL_W2SC_WORKFLOW: cvl_w2sc_manifest_name_rule,
GenomicJob.CVL_W3NS_WORKFLOW: cvl_w3ns_manifest_name_rule,
GenomicJob.CVL_W3SC_WORKFLOW: cvl_w3sc_manifest_name_rule,
GenomicJob.CVL_W3SS_WORKFLOW: cvl_w3ss_manifest_name_rule,
GenomicJob.CVL_W4WR_WORKFLOW: cvl_w4wr_manifest_name_rule,
GenomicJob.CVL_W5NF_WORKFLOW: cvl_w5nf_manifest_name_rule
}
try:
is_valid_filename = ingestion_name_rules[self.job_id]()
return is_valid_filename
except KeyError:
return GenomicSubProcessResult.ERROR
def validate_values(self, data):
is_invalid, message = False, None
cleaned_fieldnames = [self._clean_field_name(fieldname) for fieldname in data['fieldnames']]
try:
if self.genome_type:
values_to_check = self.values_for_validation[self.job_id][self.genome_type]
else:
values_to_check = self.values_for_validation[self.job_id]
except KeyError:
return is_invalid, message
for field_name, field_values in values_to_check.items():
if field_name not in cleaned_fieldnames:
continue
pos = cleaned_fieldnames.index(field_name)
for row in data['rows']:
value_check = list(row.values())[pos]
if value_check not in field_values:
message = f"{self.job_id.name}: Value for {data['fieldnames'][pos]} is invalid: {value_check}"
is_invalid = True
return is_invalid, message
return is_invalid, message
@staticmethod
def _clean_field_name(fieldname):
return fieldname.lower().replace('\ufeff', '').replace(' ', '').replace('_', '')
def _check_file_structure_valid(self, fields):
"""
Validates the structure of the CSV against a defined set of columns.
:param fields: the data from the CSV file; dictionary per row.
:return: boolean; True if valid structure, False if not.
"""
missing_fields, extra_fields = None, None
if not self.valid_schema:
self.valid_schema = self._set_schema()
cases = tuple([self._clean_field_name(field) for field in fields])
all_file_columns_valid = all([c in self.valid_schema for c in cases])
all_expected_columns_in_file = all([c in cases for c in self.valid_schema])
if not all_file_columns_valid:
extra_fields = list(set(cases) - set(self.valid_schema))
if not all_expected_columns_in_file:
missing_fields = list(set(self.valid_schema) - set(cases))
return \
all([all_file_columns_valid, all_expected_columns_in_file]), \
missing_fields, \
extra_fields, \
self.valid_schema
def _set_schema(self):
"""
Sets schema via the job id
:return: schema_to_validate,
(tuple from the CSV_SCHEMA or result code of INVALID_FILE_NAME).
"""
try:
if self.job_id == GenomicJob.METRICS_INGESTION:
return self.GC_METRICS_SCHEMAS[self.genome_type]
if self.job_id == GenomicJob.AW1_MANIFEST:
return self.AW1_MANIFEST_SCHEMA
if self.job_id == GenomicJob.GEM_A2_MANIFEST:
return self.GEM_A2_SCHEMA
if self.job_id == GenomicJob.AW1F_MANIFEST:
return self.AW1_MANIFEST_SCHEMA # AW1F and AW1 use same schema
if self.job_id == GenomicJob.GEM_METRICS_INGEST:
return self.GEM_METRICS_SCHEMA
if self.job_id == GenomicJob.AW4_ARRAY_WORKFLOW:
return self.AW4_ARRAY_SCHEMA
if self.job_id == GenomicJob.AW4_WGS_WORKFLOW:
return self.AW4_WGS_SCHEMA
if self.job_id in (GenomicJob.AW1C_INGEST, GenomicJob.AW1CF_INGEST):
return self.AW1_MANIFEST_SCHEMA
if self.job_id == GenomicJob.AW5_WGS_MANIFEST:
self.genome_type = self.GENOME_TYPE_MAPPINGS['seq']
return self.AW5_WGS_SCHEMA
if self.job_id == GenomicJob.AW5_ARRAY_MANIFEST:
self.genome_type = self.GENOME_TYPE_MAPPINGS['gen']
return self.AW5_ARRAY_SCHEMA
if self.job_id == GenomicJob.CVL_W2SC_WORKFLOW:
return self.CVL_W2SC_SCHEMA
if self.job_id == GenomicJob.CVL_W3NS_WORKFLOW:
return self.CVL_W3NS_SCHEMA
if self.job_id == GenomicJob.CVL_W3SC_WORKFLOW:
return self.CVL_W3SC_SCHEMA
if self.job_id == GenomicJob.CVL_W3SS_WORKFLOW:
return self.CVL_W3SS_SCHEMA
if self.job_id == GenomicJob.CVL_W4WR_WORKFLOW:
return self.CVL_W4WR_SCHEMA
if self.job_id == GenomicJob.CVL_W5NF_WORKFLOW:
return self.CVL_W5NF_SCHEMA
except (IndexError, KeyError):
return GenomicSubProcessResult.ERROR
class GenomicFileMover:
"""
This utility class moves files in the bucket by copying into an archive folder
and deleting the old instance.
"""
def __init__(self, archive_folder=None):
self.archive_folder = archive_folder
def archive_file(self, file_obj=None, file_path=None):
"""
This method moves a file to an archive
by copy and delete
:param file_obj: a genomic_file_processed object to move
:return:
"""
source_path = file_obj.filePath if file_obj else file_path
file_name = source_path.split('/')[-1]
archive_path = source_path.replace(file_name,
f"{self.archive_folder}/"
f"{file_name}")
try:
copy_cloud_file(source_path, archive_path)
delete_cloud_file(source_path)
except FileNotFoundError:
logging.error(f"No file found at '{file_obj.filePath}'")
class GenomicReconciler:
""" This component handles reconciliation between genomic datasets """
def __init__(self, run_id, job_id, archive_folder=None, file_mover=None,
bucket_name=None, storage_provider=None, controller=None):
self.run_id = run_id
self.job_id = job_id
self.bucket_name = bucket_name
self.archive_folder = archive_folder
self.cvl_file_name = None
self.file_list = None
self.ready_signal = None
# Dao components
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.file_dao = GenomicFileProcessedDao()
self.data_file_dao = GenomicGcDataFileDao()
self.data_file_missing_dao = GenomicGcDataFileMissingDao()
# Other components
self.file_mover = file_mover
self.storage_provider = storage_provider
self.controller = controller
def process_missing_data(self, metric, missing_data_files, genome_type):
missing_files_config = config.getSettingJson(config.GENOMIC_SKIP_MISSING_FILETYPES, {})
missing_files_config = missing_files_config.get(genome_type)
if missing_files_config:
missing_files_config = list(missing_files_config) if not type(missing_files_config) \
is list else missing_files_config
missing_data_files = [
x for x in list(missing_data_files) if x not in missing_files_config
]
if missing_data_files:
file = self.file_dao.get(metric.genomicFileProcessedId)
member = self.member_dao.get(metric.genomicSetMemberId)
description = f"{self.job_id.name}: The following AW2 manifests are missing data files."
description += f"\nGenomic Job Run ID: {self.run_id}"
file_list = '\n'.join([mf for mf in missing_data_files])
description += f"\nManifest File: {file.fileName}"
description += "\nMissing Data File(s):"
description += f"\n{file_list}"
self.controller.create_incident(
source_job_run_id=self.run_id,
source_file_processed_id=file.id,
code=GenomicIncidentCode.MISSING_FILES.name,
message=description,
genomic_set_member_id=member.id,
biobank_id=member.biobankId,
sample_id=member.sampleId if member.sampleId else "",
collection_tube_id=member.collectionTubeId if member.collectionTubeId else "",
slack=True
)
def generate_cvl_reconciliation_report(self):
"""
The main method for the CVL Reconciliation report,
outputs report file to the cvl subfolder and updates
genomic_set_member
:return: result code
"""
members = self.member_dao.get_members_for_cvl_reconciliation()
if members:
cvl_subfolder = getSetting(GENOMIC_CVL_RECONCILIATION_REPORT_SUBFOLDER)
self.cvl_file_name = f"{cvl_subfolder}/cvl_report_{self.run_id}.csv"
self._write_cvl_report_to_file(members)
self.controller.execute_cloud_task({
'member_ids': [m.id for m in members],
'field': 'reconcileCvlJobRunId',
'value': self.run_id,
'is_job_run': True,
}, 'genomic_set_member_update_task')
return GenomicSubProcessResult.SUCCESS
return GenomicSubProcessResult.NO_FILES
def update_report_states_for_consent_removal(self, workflow_states):
"""
Updates report states if gror or primary consent is not yes
:param workflow_states: list of GenomicWorkflowStates
"""
# Get unconsented members to update
unconsented_gror_members = self.member_dao.get_unconsented_gror_or_primary(workflow_states)
# update each member with the new state and withdrawal time
for member in unconsented_gror_members:
new_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState,
signal='unconsented')
if new_state is not None or new_state != member.genomicWorkflowState:
self.member_dao.update_member_workflow_state(member, new_state)
# Handle withdrawal (gror/primary consent) for reportConsentRemovalDate
removal_date = self.member_dao.get_consent_removal_date(member)
if removal_date:
self.member_dao.update_report_consent_removal_date(member, removal_date)
def update_report_state_for_reconsent(self, last_run_time):
"""
This code is not currently executed, the reconsent has been deferred.
:param last_run_time:
:return:
"""
# Get reconsented members to update (consent > last run time of job_id)
reconsented_gror_members = self.member_dao.get_reconsented_gror_since_date(last_run_time)
# update each member with the new state
for member in reconsented_gror_members:
new_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState,
signal='reconsented')
if new_state is not None or new_state != member.genomicWorkflowState:
self.member_dao.update_member_workflow_state(member, new_state)
self.member_dao.update_report_consent_removal_date(member, None)
def _write_cvl_report_to_file(self, members):
"""
writes data to csv file in bucket
:param members:
:return: result code
"""
try:
# extract only columns we need
cvl_columns = ('biobank_id', 'sample_id', 'member_id')
report_data = ((m.biobankId, m.sampleId, m.id) for m in members)
# Use SQL exporter
exporter = SqlExporter(self.bucket_name)
with exporter.open_cloud_writer(self.cvl_file_name) as writer:
writer.write_header(cvl_columns)
writer.write_rows(report_data)
return GenomicSubProcessResult.SUCCESS
except RuntimeError:
return GenomicSubProcessResult.ERROR
class GenomicBiobankSamplesCoupler:
"""This component creates the source data for Cohot 3:
new genomic set and members from the biobank samples pipeline.
Class uses the manifest handler to create and upload a manifest"""
_SEX_AT_BIRTH_CODES = {
'male': 'M',
'female': 'F',
'none_intersex': 'NA'
}
_VALIDATION_FLAGS = (GenomicValidationFlag.INVALID_WITHDRAW_STATUS,
GenomicValidationFlag.INVALID_SUSPENSION_STATUS,
GenomicValidationFlag.INVALID_CONSENT,
GenomicValidationFlag.INVALID_AGE,
GenomicValidationFlag.INVALID_SEX_AT_BIRTH)
_ARRAY_GENOME_TYPE = "aou_array"
_WGS_GENOME_TYPE = "aou_wgs"
_LR_GENOME_TYPE = "long_read"
COHORT_1_ID = "C1"
COHORT_2_ID = "C2"
COHORT_3_ID = "C3"
GenomicSampleMeta = namedtuple("GenomicSampleMeta", ["bids",
"pids",
"order_ids",
"site_ids",
"state_ids",
"sample_ids",
"valid_withdrawal_status",
"valid_suspension_status",
"gen_consents",
"valid_ages",
"sabs",
"gror",
"is_ai_an",
"origins"])
def __init__(self, run_id, controller=None):
self.samples_dao = BiobankStoredSampleDao()
self.set_dao = GenomicSetDao()
self.member_dao = GenomicSetMemberDao()
self.site_dao = SiteDao()
self.ps_dao = ParticipantSummaryDao()
self.code_dao = CodeDao()
self.run_id = run_id
self.controller = controller
self.query = GenomicQueryClass()
def create_new_genomic_participants(self, from_date):
"""
This method determines which samples to enter into the genomic system
from Cohort 3 (New Participants).
Validation is handled in the query that retrieves the newly consented
participants' samples to process.
:param: from_date : the date from which to lookup new biobank_ids
:return: result
"""
samples = self._get_new_biobank_samples(from_date)
if samples:
samples_meta = self.GenomicSampleMeta(*samples)
return self.process_samples_into_manifest(samples_meta, cohort=self.COHORT_3_ID)
else:
logging.info(f'New Participant Workflow: No new samples to process.')
return GenomicSubProcessResult.NO_FILES
def create_saliva_genomic_participants(self, local=False, _config=None):
"""
This method determines which samples to enter into
the genomic system that are saliva only, via the
config obj passed in the argument.
:param: config : options for ror consent type and denoting if sample was generated in-home or in-clinic
:return: result
"""
participants = self._get_remaining_saliva_participants(_config)
if len(participants) > 0:
return self.create_matrix_and_process_samples(participants, cohort=None, local=local, saliva=True)
else:
logging.info(
f'Saliva Participant Workflow: No participants to process.')
return GenomicSubProcessResult.NO_FILES
def create_c2_genomic_participants(self, local=False):
"""
Creates Cohort 2 Participants in the genomic system.
Validation is handled in the query.
Refactored to first pull valid participants, then pull their samples,
applying the new business logic of prioritizing
collection date & blood over saliva.
:return: result
"""
samples = self._get_remaining_c2_samples()
if len(samples) > 0:
samples_meta = self.GenomicSampleMeta(*samples)
return self.process_samples_into_manifest(samples_meta, cohort=self.COHORT_2_ID, local=local)
else:
logging.info(f'Cohort 2 Participant Workflow: No participants to process.')
return GenomicSubProcessResult.NO_FILES
def create_c1_genomic_participants(self):
"""
Creates Cohort 1 Participants in the genomic system using reconsent.
Validation is handled in the query that retrieves the newly consented
participants. Only valid participants are currently sent.
:param: from_date : the date from which to lookup new participants
:return: result
"""
samples = self._get_remaining_c1_samples()
if len(samples) > 0:
samples_meta = self.GenomicSampleMeta(*samples)
return self.process_samples_into_manifest(samples_meta, cohort=self.COHORT_1_ID)
else:
logging.info(f'Cohort 1 Participant Workflow: No participants to process.')
return GenomicSubProcessResult.NO_FILES
def create_long_read_genomic_participants(self, limit=None):
"""
Create long_read participants that are already in the genomic system,
based on downstream filters.
:return:
"""
participants = self._get_long_read_participants(limit)
if len(participants) > 0:
return self.process_genomic_members_into_manifest(
participants=participants,
genome_type=self._LR_GENOME_TYPE
)
logging.info(f'Long Read Participant Workflow: No participants to process.')
return GenomicSubProcessResult.NO_FILES
def process_genomic_members_into_manifest(self, *, participants, genome_type):
"""
Compiles AW0 Manifest from already submitted genomic members.
:param participants:
:param genome_type
:return:
"""
new_genomic_set = self._create_new_genomic_set()
processed_members = []
count = 0
# duplicate genomic set members
with self.member_dao.session() as session:
for i, participant in enumerate(participants):
dup_member_obj = GenomicSetMember(
biobankId=participant.biobankId,
genomicSetId=new_genomic_set.id,
participantId=participant.participantId,
nyFlag=participant.nyFlag,
sexAtBirth=participant.sexAtBirth,
collectionTubeId=participant.collectionTubeId,
validationStatus=participant.validationStatus,
validationFlags=participant.validationFlags,
ai_an=participant.ai_an,
genomeType=genome_type,
genomicWorkflowState=GenomicWorkflowState.LR_PENDING,
genomicWorkflowStateStr=GenomicWorkflowState.LR_PENDING.name,
participantOrigin=participant.participantOrigin,
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
)
processed_members.append(dup_member_obj)
count = i + 1
if count % 100 == 0:
self.genomic_members_insert(
members=processed_members,
session=session,
)
processed_members.clear()
if count and processed_members:
self.genomic_members_insert(
members=processed_members,
session=session,
)
return new_genomic_set.id
def process_samples_into_manifest(self, samples_meta, cohort, saliva=False, local=False):
"""
Compiles AW0 Manifest from samples list.
:param samples_meta:
:param cohort:
:param saliva:
:param local: overrides automatic push to bucket
:return: job result code
"""
logging.info(f'{self.__class__.__name__}: Processing new biobank_ids {samples_meta.bids}')
new_genomic_set = self._create_new_genomic_set()
processed_array_wgs = []
count = 0
bids = []
# Create genomic set members
with self.member_dao.session() as session:
for i, bid in enumerate(samples_meta.bids):
# Don't write participant to table if no sample
if samples_meta.sample_ids[i] == 0:
continue
logging.info(f'Validating sample: {samples_meta.sample_ids[i]}')
validation_criteria = (
samples_meta.valid_withdrawal_status[i],
samples_meta.valid_suspension_status[i],
samples_meta.gen_consents[i],
samples_meta.valid_ages[i],
samples_meta.sabs[i] in self._SEX_AT_BIRTH_CODES.values()
)
valid_flags = self._calculate_validation_flags(validation_criteria)
logging.info(f'Creating genomic set members for PID: {samples_meta.pids[i]}')
# Get NY flag for collected-site
if samples_meta.site_ids[i]:
_ny_flag = self._get_new_york_flag_from_site(samples_meta.site_ids[i])
# Get NY flag for mail-kit
elif samples_meta.state_ids[i]:
_ny_flag = self._get_new_york_flag_from_state_id(samples_meta.state_ids[i])
# default ny flag if no state id
elif not samples_meta.state_ids[i]:
_ny_flag = 0
else:
logging.warning(f'No collection site or mail kit state. Skipping biobank_id: {bid}')
continue
new_array_member_obj = GenomicSetMember(
biobankId=bid,
genomicSetId=new_genomic_set.id,
participantId=samples_meta.pids[i],
nyFlag=_ny_flag,
sexAtBirth=samples_meta.sabs[i],
collectionTubeId=samples_meta.sample_ids[i],
validationStatus=(GenomicSetMemberStatus.INVALID if len(valid_flags) > 0
else GenomicSetMemberStatus.VALID),
validationFlags=valid_flags,
ai_an='Y' if samples_meta.is_ai_an[i] else 'N',
genomeType=self._ARRAY_GENOME_TYPE,
genomicWorkflowState=GenomicWorkflowState.AW0_READY,
genomicWorkflowStateStr=GenomicWorkflowState.AW0_READY.name,
participantOrigin=samples_meta.origins[i],
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
)
# Also create a WGS member
new_wgs_member_obj = deepcopy(new_array_member_obj)
new_wgs_member_obj.genomeType = self._WGS_GENOME_TYPE
bids.append(bid)
processed_array_wgs.extend([new_array_member_obj, new_wgs_member_obj])
count = i + 1
if count % 1000 == 0:
self.genomic_members_insert(
members=processed_array_wgs,
session=session
)
processed_array_wgs.clear()
bids.clear()
if count and processed_array_wgs:
self.genomic_members_insert(
members=processed_array_wgs,
session=session
)
# Create & transfer the Biobank Manifest based on the new genomic set
try:
if local:
return new_genomic_set.id
else:
create_and_upload_genomic_biobank_manifest_file(new_genomic_set.id,
cohort_id=cohort,
saliva=saliva)
# Handle Genomic States for manifests
for member in self.member_dao.get_members_from_set_id(new_genomic_set.id):
new_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState,
signal='manifest-generated')
if new_state is not None or new_state != member.genomicWorkflowState:
self.member_dao.update_member_workflow_state(member, new_state)
logging.info(f'{self.__class__.__name__}: Genomic set members created ')
return GenomicSubProcessResult.SUCCESS
except RuntimeError:
return GenomicSubProcessResult.ERROR
def create_matrix_and_process_samples(self, participants, cohort, local, saliva=False):
"""
Wrapper method for processing participants for C1 and C2 manifests
:param cohort:
:param participants:
:param local:
:param saliva:
:return:
"""
participant_matrix = self.GenomicSampleMeta(*participants)
for i, _bid in enumerate(participant_matrix.bids):
logging.info(f'Retrieving samples for PID: f{participant_matrix.pids[i]}')
blood_sample_data = None
if not saliva:
blood_sample_data = self._get_usable_blood_sample(pid=participant_matrix.pids[i],
bid=_bid)
saliva_sample_data = self._get_usable_saliva_sample(pid=participant_matrix.pids[i],
bid=_bid)
# Determine which sample ID to use
sample_data = self._determine_best_sample(blood_sample_data, saliva_sample_data)
# update the sample id, collected site, and biobank order
if sample_data is not None:
participant_matrix.sample_ids[i] = sample_data[0]
participant_matrix.site_ids[i] = sample_data[1]
participant_matrix.order_ids[i] = sample_data[2]
else:
logging.info(f'No valid samples for pid {participant_matrix.pids[i]}.')
# insert new members and make the manifest
return self.process_samples_into_manifest(
participant_matrix,
cohort=cohort,
saliva=saliva,
local=local
)
@staticmethod
def genomic_members_insert(*, members, session):
"""
Bulk save of member for genomic_set_member
batch updating of members
:param: members
:param: session
"""
try:
session.bulk_save_objects(members)
session.commit()
except Exception as e:
raise Exception("Error occurred on genomic member insert: {0}".format(e))
def _get_new_biobank_samples(self, from_date):
"""
Retrieves BiobankStoredSample objects with `rdr_created`
after the last run of the new participant workflow job.
The query filters out participants that do not match the
genomic validation requirements.
:param: from_date
:return: list of tuples (bid, pid, biobank_identifier.value, collected_site_id)
"""
_new_samples_sql = self.query.new_biobank_samples()
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"ai_param": Race.AMERICAN_INDIAN_OR_ALASKA_NATIVE.__int__(),
"from_date_param": from_date.strftime("%Y-%m-%d"),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"cohort_3_param": ParticipantCohort.COHORT_3.__int__(),
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.samples_dao.session() as session:
result = session.execute(_new_samples_sql, params).fetchall()
result = self._prioritize_samples_by_participant(result)
return list(zip(*result))[:-2] # Slicing to remove the last two columns retrieved for prioritization
def _prioritize_samples_by_participant(self, sample_results):
preferred_samples = {}
for sample in sample_results:
preferred_sample = sample
previously_found_sample = preferred_samples.get(sample.participant_id, None)
if previously_found_sample is not None:
preferred_sample = self._determine_best_sample(previously_found_sample, sample)
preferred_samples[sample.participant_id] = preferred_sample
return [x for x in preferred_samples.values() if x is not None]
@staticmethod
def _determine_best_sample(sample_one, sample_two):
if sample_one is None:
return sample_two
if sample_two is None:
return sample_one
# Return the usable sample (status less than NOT_RECEIVED) if one is usable and the other isn't
if sample_one.status < int(SampleStatus.SAMPLE_NOT_RECEIVED) <= sample_two.status:
return sample_one
elif sample_two.status < int(SampleStatus.SAMPLE_NOT_RECEIVED) <= sample_two.status:
return sample_two
elif sample_one.status >= int(SampleStatus.SAMPLE_NOT_RECEIVED) \
and sample_two.status >= int(SampleStatus.SAMPLE_NOT_RECEIVED):
return None
# Both are usable
# Return the sample by the priority of the code: 1ED04, then 1ED10, and 1SAL2 last
test_codes_by_preference = ['1ED04', '1ED10', '1SAL2'] # most desirable first
samples_by_code = {}
for sample in [sample_one, sample_two]:
samples_by_code[sample.test] = sample
for test_code in test_codes_by_preference:
if samples_by_code.get(test_code):
return samples_by_code[test_code]
logging.error(f'Should have been able to select between '
f'{sample_one.biobank_stored_sample_id} and {sample_two.biobank_stored_sample_id}')
def _get_remaining_c2_samples(self):
_c2_participant_sql = self.query.remaining_c2_participants()
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"cohort_param": ParticipantCohort.COHORT_2.__int__(),
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.samples_dao.session() as session:
result = session.execute(_c2_participant_sql, params).fetchall()
result2 = self._prioritize_samples_by_participant(result)
return list(zip(*result2))[:-2]
def _get_remaining_c1_samples(self):
"""
Retrieves C1 participants and validation data.
"""
_c1_participant_sql = self.query.remaining_c1_samples()
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"cohort_param": ParticipantCohort.COHORT_1.__int__(),
"c1_reconsent_param": COHORT_1_REVIEW_CONSENT_YES_CODE,
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.samples_dao.session() as session:
result = session.execute(_c1_participant_sql, params).fetchall()
result = self._prioritize_samples_by_participant(result)
return list(zip(*result))[:-2]
def _get_long_read_participants(self, limit=None):
"""
Retrieves participants based on filters that have
been denoted to use in the long read pilot program
"""
with self.member_dao.session() as session:
gsm_alias = aliased(GenomicSetMember)
result = session.query(GenomicSetMember).join(
ParticipantSummary,
GenomicSetMember.participantId == ParticipantSummary.participantId,
).join(
ParticipantRaceAnswers,
ParticipantRaceAnswers.participantId == ParticipantSummary.participantId,
).join(
Code,
ParticipantRaceAnswers.codeId == Code.codeId,
).join(
GenomicGCValidationMetrics,
GenomicSetMember.id == GenomicGCValidationMetrics.genomicSetMemberId,
).outerjoin(
gsm_alias,
sqlalchemy.and_(
gsm_alias.participantId == ParticipantSummary.participantId,
gsm_alias.genomeType == 'long_read'
)
).filter(
Code.value == 'WhatRaceEthnicity_Black',
GenomicSetMember.genomeType.in_(['aou_wgs']),
GenomicSetMember.genomicWorkflowState != GenomicWorkflowState.IGNORE,
GenomicGCValidationMetrics.ignoreFlag == 0,
GenomicGCValidationMetrics.contamination <= 0.01,
ParticipantSummary.participantOrigin == 'vibrent',
ParticipantSummary.ehrUpdateTime.isnot(None),
gsm_alias.id.is_(None),
).distinct(gsm_alias.biobankId)
if limit:
result = result.limit(limit)
return result.all()
def _get_usable_blood_sample(self, pid, bid):
"""
Select 1ED04 or 1ED10 based on max collected date
:param pid: participant_id
:param bid: biobank_id
:return: tuple(blood_collected date, blood sample, blood site, blood order)
"""
_samples_sql = self.query.usable_blood_sample()
params = {
"pid_param": pid,
"bid_param": bid,
}
with self.samples_dao.session() as session:
result = session.execute(_samples_sql, params).first()
return result
def _get_usable_saliva_sample(self, pid, bid):
"""
Select 1SAL2 based on max collected date
:param pid: participant_id
:param bid: biobank_id
:return: tuple(saliva date, saliva sample, saliva site, saliva order)
"""
_samples_sql = self.query.usable_saliva_sample()
params = {
"pid_param": pid,
"bid_param": bid,
}
with self.samples_dao.session() as session:
result = session.execute(_samples_sql, params).first()
return result
def _get_remaining_saliva_participants(self, _config):
_saliva_sql = self.query.remaining_saliva_participants(_config)
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"ai_param": Race.AMERICAN_INDIAN_OR_ALASKA_NATIVE.__int__(),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.samples_dao.session() as session:
result = session.execute(_saliva_sql, params).fetchall()
return list([list(r) for r in zip(*result)])
def _create_new_genomic_set(self):
"""Inserts a new genomic set for this run"""
attributes = {
'genomicSetName': f'new_participant_workflow_{self.run_id}',
'genomicSetCriteria': '.',
'genomicSetVersion': 1,
'genomicSetStatus': GenomicSetStatus.VALID,
}
new_set_obj = GenomicSet(**attributes)
inserted_set = self.set_dao.insert(new_set_obj)
return inserted_set
def _create_new_set_member(self, **kwargs):
"""Inserts new GenomicSetMember object"""
new_member_obj = GenomicSetMember(**kwargs)
return self.member_dao.insert(new_member_obj)
def _get_new_york_flag_from_site(self, collected_site_id):
"""
Looks up whether a collected site's state is NY
:param collected_site_id: the id of the site
:return: int (1 or 0 for NY or Not)
"""
return int(self.site_dao.get(collected_site_id).state == 'NY')
def _get_new_york_flag_from_state_id(self, state_id):
"""
Looks up whether a collected site's state is NY
:param state_id: the code ID for the state
:return: int (1 or 0 for NY or Not)
"""
return int(self.code_dao.get(state_id).value.split('_')[1] == 'NY')
def _calculate_validation_flags(self, validation_criteria):
"""
Determines validation and flags for genomic sample
:param validation_criteria:
:return: list of validation flags
"""
# Process validation flags for inserting into genomic_set_member
flags = [flag for (passing, flag) in
zip(validation_criteria, self._VALIDATION_FLAGS)
if not passing]
return flags
class ManifestDefinitionProvider:
"""
Helper class to produce the definitions for each manifest
"""
# Metadata for the various manifests
ManifestDef = namedtuple('ManifestDef',
["job_run_field",
"source_data",
"destination_bucket",
"output_filename",
"columns",
"signal",
"query",
"params"])
def __init__(
self,
job_run_id=None,
bucket_name=None,
genome_type=None,
cvl_site_id='rdr',
**kwargs
):
# Attributes
self.job_run_id = job_run_id
self.bucket_name = bucket_name
self.cvl_site_id = cvl_site_id
self.kwargs = kwargs
self.genome_type = genome_type
self.query = GenomicQueryClass(
input_manifest=self.kwargs['kwargs'].get('input_manifest'),
genome_type=self.genome_type
)
self.query_dao = GenomicQueriesDao()
self.manifest_columns_config = {
GenomicManifestTypes.GEM_A1: (
'biobank_id',
'sample_id',
"sex_at_birth",
"consent_for_ror",
"date_of_consent_for_ror",
"chipwellbarcode",
"genome_center",
),
GenomicManifestTypes.GEM_A3: (
'biobank_id',
'sample_id',
'date_of_consent_removal',
),
GenomicManifestTypes.CVL_W1IL_PGX: (
'biobank_id',
'sample_id',
'vcf_raw_path',
'vcf_raw_index_path',
'vcf_raw_md5_path',
'gvcf_path',
'gvcf_md5_path',
'cram_name',
'sex_at_birth',
'ny_flag',
'genome_center',
'consent_for_gror',
'genome_type',
'informing_loop_pgx',
'aou_hdr_coverage',
'contamination',
'sex_ploidy'
),
GenomicManifestTypes.CVL_W1IL_HDR: (
'biobank_id',
'sample_id',
'vcf_raw_path',
'vcf_raw_index_path',
'vcf_raw_md5_path',
'gvcf_path',
'gvcf_md5_path',
'cram_name',
'sex_at_birth',
'ny_flag',
'genome_center',
'consent_for_gror',
'genome_type',
'informing_loop_hdr',
'aou_hdr_coverage',
'contamination',
'sex_ploidy'
),
GenomicManifestTypes.CVL_W2W: (
'biobank_id',
'sample_id',
'date_of_consent_removal'
),
GenomicManifestTypes.CVL_W3SR: (
"biobank_id",
"sample_id",
"parent_sample_id",
"collection_tubeid",
"sex_at_birth",
"ny_flag",
"genome_type",
"site_name",
"ai_an"
),
GenomicManifestTypes.AW3_ARRAY: (
"chipwellbarcode",
"biobank_id",
"sample_id",
"biobankidsampleid",
"sex_at_birth",
"site_id",
"red_idat_path",
"red_idat_md5_path",
"green_idat_path",
"green_idat_md5_path",
"vcf_path",
"vcf_index_path",
"vcf_md5_path",
"callrate",
"sex_concordance",
"contamination",
"processing_status",
"research_id",
"sample_source",
"pipeline_id",
"ai_an",
"blocklisted",
"blocklisted_reason"
),
GenomicManifestTypes.AW3_WGS: (
"biobank_id",
"sample_id",
"biobankidsampleid",
"sex_at_birth",
"site_id",
"vcf_hf_path",
"vcf_hf_index_path",
"vcf_hf_md5_path",
"cram_path",
"cram_md5_path",
"crai_path",
"gvcf_path",
"gvcf_md5_path",
"contamination",
"sex_concordance",
"processing_status",
"mean_coverage",
"research_id",
"sample_source",
"mapped_reads_pct",
"sex_ploidy",
"ai_an",
"blocklisted",
"blocklisted_reason"
),
GenomicManifestTypes.AW2F: (
"PACKAGE_ID",
"BIOBANKID_SAMPLEID",
"BOX_STORAGEUNIT_ID",
"BOX_ID/PLATE_ID",
"WELL_POSITION",
"SAMPLE_ID",
"PARENT_SAMPLE_ID",
"COLLECTION_TUBE_ID",
"MATRIX_ID",
"COLLECTION_DATE",
"BIOBANK_ID",
"SEX_AT_BIRTH",
"AGE",
"NY_STATE_(Y/N)",
"SAMPLE_TYPE",
"TREATMENTS",
"QUANTITY_(uL)",
"TOTAL_CONCENTRATION_(ng/uL)",
"TOTAL_DNA(ng)",
"VISIT_DESCRIPTION",
"SAMPLE_SOURCE",
"STUDY",
"TRACKING_NUMBER",
"CONTACT",
"EMAIL",
"STUDY_PI",
"TEST_NAME",
"FAILURE_MODE",
"FAILURE_MODE_DESC",
"PROCESSING_STATUS",
"CONTAMINATION",
"CONTAMINATION_CATEGORY",
"CONSENT_FOR_ROR",
),
}
def _get_source_data_query(self, manifest_type):
"""
Returns the query to use for manifest's source data
:param manifest_type:
:return: query object
"""
return self.query.genomic_data_config.get(manifest_type)
def get_def(self, manifest_type):
"""
Returns the manifest definition based on manifest_type
:param manifest_type:
:return: ManifestDef()
"""
now_formatted = clock.CLOCK.now().strftime("%Y-%m-%d-%H-%M-%S")
def_config = {
GenomicManifestTypes.GEM_A1: {
'job_run_field': 'gemA1ManifestJobRunId',
'output_filename': f'{GENOMIC_GEM_A1_MANIFEST_SUBFOLDER}/AoU_GEM_A1_manifest_{now_formatted}.csv',
'signal': 'manifest-generated'
},
GenomicManifestTypes.GEM_A3: {
'job_run_field': 'gemA3ManifestJobRunId',
'output_filename': f'{GENOMIC_GEM_A3_MANIFEST_SUBFOLDER}/AoU_GEM_A3_manifest_{now_formatted}.csv',
'signal': 'manifest-generated'
},
GenomicManifestTypes.CVL_W1IL_PGX: {
'job_run_field': 'cvlW1ilPgxJobRunId',
'output_filename':
f'{CVL_W1IL_PGX_MANIFEST_SUBFOLDER}/{self.cvl_site_id.upper()}_AoU_CVL_W1IL_'
f'{ResultsModuleType.PGXV1.name}_{now_formatted}.csv',
'signal': 'manifest-generated',
'query': self.query_dao.get_data_ready_for_w1il_manifest,
'params': {
'module': 'pgx',
'cvl_id': self.cvl_site_id
}
},
GenomicManifestTypes.CVL_W1IL_HDR: {
'job_run_field': 'cvlW1ilHdrJobRunId',
'output_filename':
f'{CVL_W1IL_HDR_MANIFEST_SUBFOLDER}/{self.cvl_site_id.upper()}_AoU_CVL_W1IL_'
f'{ResultsModuleType.HDRV1.name}_{now_formatted}.csv',
'query': self.query_dao.get_data_ready_for_w1il_manifest,
'params': {
'module': 'hdr',
'cvl_id': self.cvl_site_id
}
},
GenomicManifestTypes.CVL_W2W: {
'job_run_field': 'cvlW2wJobRunId',
'output_filename':
f'{CVL_W2W_MANIFEST_SUBFOLDER}/{self.cvl_site_id.upper()}_AoU_CVL_W2W_{now_formatted}.csv',
'query': self.query_dao.get_data_ready_for_w2w_manifest,
'params': {
'cvl_id': self.cvl_site_id
}
},
GenomicManifestTypes.CVL_W3SR: {
'job_run_field': 'cvlW3srManifestJobRunID',
'output_filename': f'{CVL_W3SR_MANIFEST_SUBFOLDER}/{self.cvl_site_id.upper()}_AoU_CVL_W3SR'
f'_{now_formatted}.csv',
'query': self.query_dao.get_w3sr_records,
'params': {
'site_id': self.cvl_site_id
}
},
GenomicManifestTypes.AW3_ARRAY: {
'job_run_field': 'aw3ManifestJobRunID',
'output_filename': f'{GENOMIC_AW3_ARRAY_SUBFOLDER}/AoU_DRCV_GEN_{now_formatted}.csv',
'signal': 'bypass',
'query': self.query_dao.get_aw3_array_records,
'params': {
'genome_type': self.genome_type
}
},
GenomicManifestTypes.AW3_WGS: {
'job_run_field': 'aw3ManifestJobRunID',
'output_filename': f'{GENOMIC_AW3_WGS_SUBFOLDER}/AoU_DRCV_SEQ_{now_formatted}.csv',
'signal': 'bypass',
'query': self.query_dao.get_aw3_wgs_records,
'params': {
'genome_type': self.genome_type
}
},
GenomicManifestTypes.AW2F: {
'job_run_field': 'aw2fManifestJobRunID',
'output_filename': f'{BIOBANK_AW2F_SUBFOLDER}/GC_AoU_DataType_PKG-YYMM-xxxxxx_contamination.csv',
'signal': 'bypass'
}
}
def_config = def_config[manifest_type]
return self.ManifestDef(
job_run_field=def_config.get('job_run_field'),
source_data=self._get_source_data_query(manifest_type),
destination_bucket=f'{self.bucket_name}',
output_filename=def_config.get('output_filename'),
columns=self.manifest_columns_config[manifest_type],
signal=def_config.get('signal'),
query=def_config.get('query'),
params=def_config.get('params')
)
class ManifestCompiler:
"""
This component compiles Genomic manifests
based on definitions provided by ManifestDefinitionProvider
"""
def __init__(
self,
run_id=None,
bucket_name=None,
max_num=None,
controller=None
):
self.run_id = run_id
self.bucket_name = bucket_name
self.max_num = max_num
self.controller = controller
self.output_file_name = None
self.manifest_def = None
self.def_provider = None
# Dao components
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.results_workflow_dao = GenomicResultWorkflowStateDao()
def generate_and_transfer_manifest(self, manifest_type, genome_type, version=None, **kwargs):
"""
Main execution method for ManifestCompiler
:return: result dict:
"code": (i.e. SUCCESS)
"feedback_file": None or feedback file record to update,
"record_count": integer
"""
self.def_provider = ManifestDefinitionProvider(
job_run_id=self.run_id,
bucket_name=self.bucket_name,
genome_type=genome_type,
cvl_site_id=self.controller.cvl_site_id,
kwargs=kwargs
)
self.manifest_def = self.def_provider.get_def(manifest_type)
source_data = self.pull_source_data()
if not source_data:
logging.info(f'No records found for manifest type: {manifest_type}.')
return {
"code": GenomicSubProcessResult.NO_FILES,
"record_count": 0,
}
validation_failed, message = self._validate_source_data(source_data, manifest_type)
if validation_failed:
message = f'{self.controller.job_id.name}: {message}'
self.controller.create_incident(
source_job_run_id=self.run_id,
code=GenomicIncidentCode.MANIFEST_GENERATE_DATA_VALIDATION_FAILED.name,
slack=True,
message=message
)
raise RuntimeError
if self.max_num and len(source_data) > self.max_num:
current_list, count = [], 0
for obj in source_data:
current_list.append(obj)
if len(current_list) == self.max_num:
count += 1
self.output_file_name = self.manifest_def.output_filename
self.output_file_name = f'{self.output_file_name.split(".csv")[0]}_{count}.csv'
file_path = f'{self.manifest_def.destination_bucket}/{self.output_file_name}'
logging.info(
f'Preparing manifest of type {manifest_type}...'
f'{file_path}'
)
self._write_and_upload_manifest(current_list)
self.controller.manifests_generated.append({
'file_path': file_path,
'record_count': len(current_list)
})
current_list.clear()
if current_list:
count += 1
self.output_file_name = self.manifest_def.output_filename
self.output_file_name = f'{self.output_file_name.split(".csv")[0]}_{count}.csv'
file_path = f'{self.manifest_def.destination_bucket}/{self.output_file_name}'
logging.info(
f'Preparing manifest of type {manifest_type}...'
f'{file_path}'
)
self._write_and_upload_manifest(current_list)
self.controller.manifests_generated.append({
'file_path': file_path,
'record_count': len(current_list)
})
else:
self.output_file_name = self.manifest_def.output_filename
# If the new manifest is a feedback manifest,
# it will have an input manifest
if "input_manifest" in kwargs.keys():
# AW2F manifest file name is based of of AW1
if manifest_type == GenomicManifestTypes.AW2F:
new_name = kwargs['input_manifest'].filePath.split('/')[-1]
new_name = new_name.replace('.csv', f'_contamination_{version}.csv')
self.output_file_name = self.manifest_def.output_filename.replace(
"GC_AoU_DataType_PKG-YYMM-xxxxxx_contamination.csv",
f"{new_name}"
)
file_path = f'{self.manifest_def.destination_bucket}/{self.output_file_name}'
logging.info(
f'Preparing manifest of type {manifest_type}...'
f'{file_path}'
)
self._write_and_upload_manifest(source_data)
self.controller.manifests_generated.append({
'file_path': file_path,
'record_count': len(source_data)
})
for row in source_data:
sample_id = row.sampleId if hasattr(row, 'sampleId') else row.sample_id
member = self.member_dao.get_member_from_sample_id(sample_id, genome_type)
if not member:
raise NotFound(f"Cannot find genomic set member with sample ID {sample_id}")
if self.manifest_def.job_run_field:
self.controller.member_ids_for_update.append(member.id)
# Handle Genomic States for manifests
if self.manifest_def.signal != "bypass":
# genomic workflow state
new_wf_state = GenomicStateHandler.get_new_state(
member.genomicWorkflowState,
signal=self.manifest_def.signal
)
if new_wf_state or new_wf_state != member.genomicWorkflowState:
self.member_dao.update_member_workflow_state(member, new_wf_state)
# result workflow state
cvl_manifest_data = CVLManifestData(manifest_type)
if cvl_manifest_data.is_cvl_manifest:
self.results_workflow_dao.insert_new_result_record(
member_id=member.id,
module_type=cvl_manifest_data.module_type,
state=cvl_manifest_data.result_state
)
# Updates job run field on set member
if self.controller.member_ids_for_update:
self.controller.execute_cloud_task({
'member_ids': list(set(self.controller.member_ids_for_update)),
'field': self.manifest_def.job_run_field,
'value': self.run_id,
'is_job_run': True
}, 'genomic_set_member_update_task')
return {
"code": GenomicSubProcessResult.SUCCESS,
}
def pull_source_data(self):
"""
Runs the source data query
:return: result set
"""
if self.manifest_def.query:
params = self.manifest_def.params or {}
return self.manifest_def.query(**params)
with self.member_dao.session() as session:
return session.execute(self.manifest_def.source_data).fetchall()
def _validate_source_data(self, data, manifest_type):
invalid = False
message = None
if manifest_type in [
GenomicManifestTypes.AW3_ARRAY,
GenomicManifestTypes.AW3_WGS
]:
prefix = get_biobank_id_prefix()
path_positions = []
biobank_ids, sample_ids, sex_at_birth = [], [], []
for i, col in enumerate(self.manifest_def.columns):
if 'sample_id' in col:
sample_ids = [row[i] for row in data]
if 'biobank_id' in col:
biobank_ids = [row[i] for row in data]
if 'sex_at_birth' in col:
sex_at_birth = [row[i] for row in data]
if '_path' in col:
path_positions.append(i)
needs_prefixes = any(bid for bid in biobank_ids if prefix not in bid)
if needs_prefixes:
message = 'Biobank IDs are missing correct prefix'
invalid = True
return invalid, message
biobank_ids.clear()
dup_sample_ids = {sample_id for sample_id in sample_ids if sample_ids.count(sample_id) > 1}
if dup_sample_ids:
message = f'Sample IDs {list(dup_sample_ids)} are not distinct'
invalid = True
return invalid, message
sample_ids.clear()
invalid_sex_values = any(val for val in sex_at_birth if val not in ['M', 'F', 'NA'])
if invalid_sex_values:
message = 'Invalid Sex at Birth values'
invalid = True
return invalid, message
sex_at_birth.clear()
for row in data:
for i, val in enumerate(row):
if i in path_positions and val:
if not val.startswith('gs://') \
or (val.startswith('gs://')
and len(val.split('gs://')[1].split('/')) < 3):
message = f'Path {val} is invalid formatting'
invalid = True
return invalid, message
return invalid, message
def _write_and_upload_manifest(self, source_data):
"""
writes data to csv file in bucket
:return: result code
"""
try:
# Use SQL exporter
exporter = SqlExporter(self.bucket_name)
with exporter.open_cloud_writer(self.output_file_name) as writer:
writer.write_header(self.manifest_def.columns)
writer.write_rows(source_data)
return GenomicSubProcessResult.SUCCESS
except RuntimeError:
return GenomicSubProcessResult.ERROR
class CVLManifestData:
result_state = None
module_type = ResultsModuleType.HDRV1
is_cvl_manifest = True
def __init__(self, manifest_type: GenomicManifestTypes):
self.manifest_type = manifest_type
self.get_is_cvl_manifest()
def get_is_cvl_manifest(self):
if 'cvl' not in self.manifest_type.name.lower():
self.is_cvl_manifest = False
return
self.get_module_type()
self.get_result_state()
def get_module_type(self) -> ResultsModuleType:
if 'pgx' in self.manifest_type.name.lower():
self.module_type = ResultsModuleType.PGXV1
return self.module_type
def get_result_state(self) -> ResultsWorkflowState:
manifest_name = self.manifest_type.name.rsplit('_', 1)[0] \
if self.manifest_type.name.count('_') > 1 else \
self.manifest_type.name
self.result_state = ResultsWorkflowState.lookup_by_name(manifest_name)
return self.result_state
| bsd-3-clause | 7fa3c3d895300a3a951fdd9eca2e26b4 | 38.630468 | 132 | 0.557239 | 4.009477 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/dao/code_dao.py | 1 | 12581 | from collections import defaultdict
import logging
from typing import Dict, List, Optional, Set
from werkzeug.exceptions import BadRequest
from sqlalchemy.orm import joinedload, Session
from rdr_service import clock
from rdr_service.dao.base_dao import BaseDao
from rdr_service.dao.cache_all_dao import CacheAllDao
from rdr_service.model.code import Code, CodeBook, CodeHistory, CodeType
from rdr_service.model.survey import Survey, SurveyQuestion, SurveyQuestionOption
from rdr_service.singletons import CODE_CACHE_INDEX
_CODE_TYPE_MAP = {
"Module Name": CodeType.MODULE,
"Topic": CodeType.TOPIC,
"Question": CodeType.QUESTION,
"Answer": CodeType.ANSWER,
}
class CodeMap(object):
"""Stores code object ids by the value and system"""
def __init__(self):
self.codes = {}
def add(self, code: Code):
self.codes[(code.system, code.value.lower())] = code.codeId
def get(self, system, value):
return self.codes.get((system, value.lower()))
def __len__(self):
return len(self.codes)
class CodeBookDao(BaseDao):
def __init__(self):
super(CodeBookDao, self).__init__(CodeBook)
self.code_dao = CodeDao()
def insert_with_session(self, session, obj):
obj.created = clock.CLOCK.now()
obj.latest = True
old_latest = self.get_latest_with_session(session, obj.system)
if old_latest:
if old_latest.version == obj.version:
raise BadRequest(f"Codebook with system {obj.system}, version {obj.version} already exists")
old_latest.latest = False
session.merge(old_latest)
super(CodeBookDao, self).insert_with_session(session, obj)
return obj
def get_latest_with_session(self, session, system):
return session.query(CodeBook).filter(CodeBook.latest == True).filter(CodeBook.system == system).one_or_none()
def get_id(self, obj):
return obj.codeBookId
def _import_concept(self, session, existing_codes, concept, system, code_book_id, parent_id):
"""Recursively imports a concept and its descendants as codes.
Existing codes will be updated; codes that weren't there before will be inserted. Codes that
are in the database but not in the codebook will be left untouched.
"""
property_dict = {p["code"]: p["valueCode"] for p in concept["property"]}
topic = property_dict["concept-topic"]
value = concept["code"]
short_value = property_dict.get("short-code") or value[:50]
display = concept["display"]
code_type = _CODE_TYPE_MAP.get(property_dict["concept-type"])
if code_type is None:
logging.warning(
f"Unrecognized concept type: {property_dict['concept-type']}, value: {value}; ignoring."
)
return 0
code = Code(
system=system,
codeBookId=code_book_id,
value=value,
shortValue=short_value,
display=display,
topic=topic,
codeType=code_type,
mapped=True,
parentId=parent_id,
)
existing_code = existing_codes.get((system, value))
if existing_code:
code.codeId = existing_code.codeId
self.code_dao._do_update(session, code, existing_code)
else:
self.code_dao.insert_with_session(session, code)
child_concepts = concept.get("concept")
code_count = 1
if child_concepts:
session.flush()
for child_concept in child_concepts:
code_count += self._import_concept(
session, existing_codes, child_concept, system, code_book_id, code.codeId
)
return code_count
def import_codebook(self, codebook_json):
"""Imports a codebook and all codes inside it. Returns (new_codebook, imported_code_count)."""
version = codebook_json["version"]
num_concepts = len(codebook_json["concept"])
logging.info(f"Importing {num_concepts} concepts into new CodeBook version {version}...")
system = codebook_json["url"]
codebook = CodeBook(name=codebook_json["name"], version=version, system=system)
code_count = 0
with self.session() as session:
# Pre-fetch all Codes. This avoids any potential race conditions, and keeps a persistent
# cache even though updates below invalidate the cache repeatedly.
# Fetch within the session so later merges are faster.
existing_codes = {
(code.system, code.value): code for code in session.query(self.code_dao.model_type).all()
}
self.insert_with_session(session, codebook)
session.flush()
for i, concept in enumerate(codebook_json["concept"], start=1):
logging.info(f"Importing root concept {i} of {num_concepts} ({concept.get('display')}).")
code_count += self._import_concept(session, existing_codes, concept, system, codebook.codeBookId, None)
logging.info(f"Finished, {code_count} codes imported.")
return codebook, code_count
SYSTEM_AND_VALUE = ("system", "value")
class CodeDao(CacheAllDao):
def __init__(self, silent=False, use_cache=True):
super(CodeDao, self).__init__(
Code, cache_index=CODE_CACHE_INDEX, cache_ttl_seconds=600, index_field_keys=[SYSTEM_AND_VALUE]
)
self.silent = silent
self.use_cache = use_cache
# Initialize fields to hold dictionaries that map code ids to module and parent values
self._code_module_map: Optional[Dict[int, List[str]]] = None
self._code_parent_map: Optional[Dict[int, List[str]]] = None
def _load_cache(self):
result = super(CodeDao, self)._load_cache()
for code in list(result.id_to_entity.values()):
if code.parentId is not None:
parent = result.id_to_entity.get(code.parentId)
if parent:
parent.children.append(code)
code.parent = parent
return result
def _add_history(self, session, obj):
history = CodeHistory()
history.fromdict(obj.asdict(), allow_pk=True)
session.add(history)
def insert_with_session(self, session, obj):
obj.created = clock.CLOCK.now()
super(CodeDao, self).insert_with_session(session, obj)
# Flush the insert so that the code's ID gets assigned and can be copied to history.
session.flush()
self._add_history(session, obj)
return obj
def _validate_update(self, session, obj, existing_obj):
# pylint: disable=unused-argument
if obj.codeBookId is None or existing_obj.codeBookId == obj.codeBookId:
raise BadRequest("codeBookId must be set to a new value when updating a code")
def _do_update(self, session, obj, existing_obj):
obj.created = existing_obj.created
super(CodeDao, self)._do_update(session, obj, existing_obj)
self._add_history(session, obj)
def get_id(self, obj):
return obj.codeId
def get_code_with_session(self, session, system, value):
return self._get_code_with_session(session, system, value)
def _get_code_with_session(self, session, system, value):
# In the context of an import, where this is called, don't use the cache.
return session.query(Code).filter(Code.system == system).filter(Code.value == value).one_or_none()
def get_code(self, system, value):
if self.use_cache:
return self._get_cache().index_maps[SYSTEM_AND_VALUE].get((system, value))
else:
with self.session() as session:
print('looking for sys:', system, ', code:', value)
return session.query(Code).filter(
Code.system == system,
Code.value == value
).one_or_none()
def find_ancestor_of_type(self, code, code_type):
if code.codeType == code_type:
return code
if code.parentId:
return self.find_ancestor_of_type(code.parent, code_type)
return None
def get_internal_id_code_map(self, code_map):
"""Accepts a map of (system, value) -> (display, code_type, parent_id) for codes found in a
questionnaire or questionnaire response.
Returns a map of (system, value) -> codeId for existing codes.
"""
# First get whatever is already in the cache.
result_map = CodeMap()
for system, value in list(code_map.keys()):
code = self.get_code(system, value)
if code:
result_map.add(code)
if len(result_map) == len(code_map):
return result_map
missing_codes = []
with self.session() as session:
for system, value in list(code_map.keys()):
existing_code = result_map.get(system, value)
if not existing_code:
# Check to see if it's in the database. (Normally it won't be.)
existing_code = self._get_code_with_session(session, system, value)
if existing_code:
result_map.add(existing_code)
else:
missing_codes.append(f'{value} (system: {system})')
if missing_codes:
raise BadRequest(
f"The following code values were unrecognized: {', '.join(missing_codes)}"
)
else:
return result_map
def _init_hierarchy_maps(self, session: Session):
"""Constructs a set of data structures for accessing the parent and module code values for a given code id"""
# Load all surveys with the question and option codes used
query = session.query(
Survey
).options(
joinedload(Survey.code),
joinedload(Survey.questions).joinedload(SurveyQuestion.code),
joinedload(Survey.questions).joinedload(SurveyQuestion.options).joinedload(SurveyQuestionOption.code)
)
# For each question and option code id, store the parent and module code values
module_map: Dict[int, Set[str]] = defaultdict(lambda: set())
parent_map: Dict[int, Set[str]] = defaultdict(lambda: set())
for survey in query.all():
module_code_value = survey.code.value
for question in survey.questions:
question_code_id = question.code.codeId
module_map[question_code_id].add(module_code_value)
parent_map[question_code_id].add(module_code_value)
question_code_value = question.code.value
for option in question.options:
option_code_id = option.code.codeId
module_map[option_code_id].add(module_code_value)
parent_map[option_code_id].add(question_code_value)
# Go back through the sets of code values and sort them
self._code_module_map: Dict[int, List[str]] = defaultdict(lambda: list())
self._code_module_map.update(
{code_id: sorted(module_value_set) for code_id, module_value_set in module_map.items()}
)
self._code_parent_map: Dict[int, List[str]] = defaultdict(lambda: list())
self._code_parent_map.update(
{code_id: sorted(parent_value_set) for code_id, parent_value_set in parent_map.items()}
)
def get_parent_values(self, code_id: int, session: Session) -> List[str]:
"""
Returns the code values that have the given code id as a direct child.
That will include any module code values where the given code is used as a question,
and any any question code values where the given code is used as an option.
"""
if not self._code_parent_map:
self._init_hierarchy_maps(session)
return self._code_parent_map[code_id]
def get_module_values(self, code_id: int, session: Session) -> List[str]:
"""
Returns the module code values where the given code id is used
(either as a question or as an option to a question).
"""
if not self._code_module_map:
self._init_hierarchy_maps(session)
return self._code_module_map[code_id]
class CodeHistoryDao(BaseDao):
def __init__(self):
super(CodeHistoryDao, self).__init__(CodeHistory)
def get_id(self, obj):
return obj.codeHistoryId
| bsd-3-clause | a1c40816b5e9107610121ea4d92498f7 | 40.65894 | 119 | 0.613226 | 3.996506 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/auditevent_tests.py | 1 | 28132 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import io
import json
import os
import unittest
from . import auditevent
from .fhirdate import FHIRDate
class AuditEventTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("AuditEvent", js["resourceType"])
return auditevent.AuditEvent(js)
def testAuditEvent1(self):
inst = self.instantiate_from("audit-event-example-login.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent1(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent1(inst2)
def implAuditEvent1(self, inst):
self.assertEqual(inst.action, "E")
self.assertEqual(inst.agent[0].altId, "601847123")
self.assertEqual(inst.agent[0].name, "Grahame Grieve")
self.assertEqual(inst.agent[0].network.address, "127.0.0.1")
self.assertEqual(inst.agent[0].network.type, "2")
self.assertTrue(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].userId.value, "95")
self.assertEqual(inst.agent[1].altId, "6580")
self.assertEqual(inst.agent[1].network.address, "Workstation1.ehr.familyclinic.com")
self.assertEqual(inst.agent[1].network.type, "1")
self.assertFalse(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].role[0].coding[0].code, "110153")
self.assertEqual(inst.agent[1].role[0].coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[1].role[0].coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.agent[1].userId.value, "2.16.840.1.113883.4.2|2.16.840.1.113883.4.2")
self.assertEqual(inst.id, "example-login")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.recorded.date, FHIRDate("2013-06-20T23:41:23Z").date)
self.assertEqual(inst.recorded.as_json(), "2013-06-20T23:41:23Z")
self.assertEqual(inst.source.identifier.value, "hl7connect.healthintersections.com.au")
self.assertEqual(inst.source.site, "Cloud")
self.assertEqual(inst.source.type[0].code, "3")
self.assertEqual(inst.source.type[0].display, "Web Server")
self.assertEqual(inst.source.type[0].system, "http://hl7.org/fhir/security-source-type")
self.assertEqual(inst.subtype[0].code, "110122")
self.assertEqual(inst.subtype[0].display, "Login")
self.assertEqual(inst.subtype[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "110114")
self.assertEqual(inst.type.display, "User Authentication")
self.assertEqual(inst.type.system, "http://dicom.nema.org/resources/ontology/DCM")
def testAuditEvent2(self):
inst = self.instantiate_from("audit-event-example-logout.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent2(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent2(inst2)
def implAuditEvent2(self, inst):
self.assertEqual(inst.action, "E")
self.assertEqual(inst.agent[0].altId, "601847123")
self.assertEqual(inst.agent[0].name, "Grahame Grieve")
self.assertEqual(inst.agent[0].network.address, "127.0.0.1")
self.assertEqual(inst.agent[0].network.type, "2")
self.assertTrue(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].userId.value, "95")
self.assertEqual(inst.agent[1].altId, "6580")
self.assertEqual(inst.agent[1].network.address, "Workstation1.ehr.familyclinic.com")
self.assertEqual(inst.agent[1].network.type, "1")
self.assertFalse(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].role[0].coding[0].code, "110153")
self.assertEqual(inst.agent[1].role[0].coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[1].role[0].coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.agent[1].userId.value, "2.16.840.1.113883.4.2|2.16.840.1.113883.4.2")
self.assertEqual(inst.id, "example-logout")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.recorded.date, FHIRDate("2013-06-20T23:46:41Z").date)
self.assertEqual(inst.recorded.as_json(), "2013-06-20T23:46:41Z")
self.assertEqual(inst.source.identifier.value, "hl7connect.healthintersections.com.au")
self.assertEqual(inst.source.site, "Cloud")
self.assertEqual(inst.source.type[0].code, "3")
self.assertEqual(inst.source.type[0].display, "Web Server")
self.assertEqual(inst.source.type[0].system, "http://hl7.org/fhir/security-source-type")
self.assertEqual(inst.subtype[0].code, "110123")
self.assertEqual(inst.subtype[0].display, "Logout")
self.assertEqual(inst.subtype[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "110114")
self.assertEqual(inst.type.display, "User Authentication")
self.assertEqual(inst.type.system, "http://dicom.nema.org/resources/ontology/DCM")
def testAuditEvent3(self):
inst = self.instantiate_from("audit-event-example-media.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent3(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent3(inst2)
def implAuditEvent3(self, inst):
self.assertEqual(inst.action, "R")
self.assertFalse(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].role[0].coding[0].code, "110153")
self.assertEqual(inst.agent[0].role[0].coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[0].role[0].coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.agent[0].userId.value, "ExportToMedia.app")
self.assertEqual(inst.agent[1].altId, "601847123")
self.assertEqual(inst.agent[1].name, "Grahame Grieve")
self.assertTrue(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].userId.value, "95")
self.assertEqual(inst.agent[2].media.code, "110033")
self.assertEqual(inst.agent[2].media.display, "DVD")
self.assertEqual(inst.agent[2].media.system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.agent[2].name, "Media title: Hello World")
self.assertFalse(inst.agent[2].requestor)
self.assertEqual(inst.entity[0].identifier.value, "e3cdfc81a0d24bd^^^&2.16.840.1.113883.4.2&ISO")
self.assertEqual(inst.entity[0].role.code, "1")
self.assertEqual(inst.entity[0].role.display, "Patient")
self.assertEqual(inst.entity[0].role.system, "http://hl7.org/fhir/object-role")
self.assertEqual(inst.entity[0].type.code, "1")
self.assertEqual(inst.entity[0].type.display, "Person")
self.assertEqual(inst.entity[0].type.system, "http://hl7.org/fhir/object-type")
self.assertEqual(inst.entity[1].identifier.type.coding[0].code, "IHE XDS Metadata")
self.assertEqual(inst.entity[1].identifier.type.coding[0].display, "submission set classificationNode")
self.assertEqual(inst.entity[1].identifier.type.coding[0].system, "urn:uuid:a54d6aa5-d40d-43f9-88c5-b4633d873bdd")
self.assertEqual(inst.entity[1].identifier.value, "e3cdfc81a0d24bd^^^&2.16.840.1.113883.4.2&ISO")
self.assertEqual(inst.entity[1].role.code, "20")
self.assertEqual(inst.entity[1].role.display, "Job")
self.assertEqual(inst.entity[1].role.system, "http://hl7.org/fhir/object-role")
self.assertEqual(inst.entity[1].type.code, "2")
self.assertEqual(inst.entity[1].type.display, "System Object")
self.assertEqual(inst.entity[1].type.system, "http://hl7.org/fhir/object-type")
self.assertEqual(inst.entity[2].type.code, "2")
self.assertEqual(inst.entity[2].type.display, "System Object")
self.assertEqual(inst.entity[2].type.system, "http://hl7.org/fhir/object-type")
self.assertEqual(inst.id, "example-media")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.recorded.date, FHIRDate("2015-08-27T23:42:24Z").date)
self.assertEqual(inst.recorded.as_json(), "2015-08-27T23:42:24Z")
self.assertEqual(inst.source.identifier.value, "hl7connect.healthintersections.com.au")
self.assertEqual(inst.subtype[0].code, "ITI-32")
self.assertEqual(inst.subtype[0].display, "Distribute Document Set on Media")
self.assertEqual(inst.subtype[0].system, "urn:oid:1.3.6.1.4.1.19376.1.2")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "110106")
self.assertEqual(inst.type.display, "Export")
self.assertEqual(inst.type.system, "http://dicom.nema.org/resources/ontology/DCM")
def testAuditEvent4(self):
inst = self.instantiate_from("audit-event-example-pixQuery.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent4(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent4(inst2)
def implAuditEvent4(self, inst):
self.assertEqual(inst.action, "E")
self.assertEqual(inst.agent[0].altId, "6580")
self.assertEqual(inst.agent[0].network.address, "Workstation1.ehr.familyclinic.com")
self.assertEqual(inst.agent[0].network.type, "1")
self.assertFalse(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].role[0].coding[0].code, "110153")
self.assertEqual(inst.agent[0].role[0].coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[0].role[0].coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.agent[0].userId.value, "2.16.840.1.113883.4.2|2.16.840.1.113883.4.2")
self.assertEqual(inst.agent[1].altId, "601847123")
self.assertEqual(inst.agent[1].name, "Grahame Grieve")
self.assertTrue(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].userId.value, "95")
self.assertEqual(inst.entity[0].identifier.value, "e3cdfc81a0d24bd^^^&2.16.840.1.113883.4.2&ISO")
self.assertEqual(inst.entity[0].role.code, "1")
self.assertEqual(inst.entity[0].role.display, "Patient")
self.assertEqual(inst.entity[0].role.system, "http://hl7.org/fhir/object-role")
self.assertEqual(inst.entity[0].type.code, "1")
self.assertEqual(inst.entity[0].type.display, "Person")
self.assertEqual(inst.entity[0].type.system, "http://hl7.org/fhir/object-type")
self.assertEqual(inst.entity[1].detail[0].type, "MSH-10")
self.assertEqual(inst.entity[1].detail[0].value, "MS4yLjg0MC4xMTQzNTAuMS4xMy4wLjEuNy4xLjE=")
self.assertEqual(inst.entity[1].role.code, "24")
self.assertEqual(inst.entity[1].role.display, "Query")
self.assertEqual(inst.entity[1].role.system, "http://hl7.org/fhir/object-role")
self.assertEqual(inst.entity[1].type.code, "2")
self.assertEqual(inst.entity[1].type.display, "System Object")
self.assertEqual(inst.entity[1].type.system, "http://hl7.org/fhir/object-type")
self.assertEqual(inst.id, "example-pixQuery")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.recorded.date, FHIRDate("2015-08-26T23:42:24Z").date)
self.assertEqual(inst.recorded.as_json(), "2015-08-26T23:42:24Z")
self.assertEqual(inst.source.identifier.value, "hl7connect.healthintersections.com.au")
self.assertEqual(inst.subtype[0].code, "ITI-9")
self.assertEqual(inst.subtype[0].display, "PIX Query")
self.assertEqual(inst.subtype[0].system, "urn:oid:1.3.6.1.4.1.19376.1.2")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "110112")
self.assertEqual(inst.type.display, "Query")
self.assertEqual(inst.type.system, "http://dicom.nema.org/resources/ontology/DCM")
def testAuditEvent5(self):
inst = self.instantiate_from("audit-event-example-search.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent5(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent5(inst2)
def implAuditEvent5(self, inst):
self.assertEqual(inst.action, "E")
self.assertEqual(inst.agent[0].altId, "601847123")
self.assertEqual(inst.agent[0].name, "Grahame Grieve")
self.assertTrue(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].userId.value, "95")
self.assertEqual(inst.agent[1].altId, "6580")
self.assertEqual(inst.agent[1].network.address, "Workstation1.ehr.familyclinic.com")
self.assertEqual(inst.agent[1].network.type, "1")
self.assertFalse(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].role[0].coding[0].code, "110153")
self.assertEqual(inst.agent[1].role[0].coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[1].role[0].coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.agent[1].userId.value, "2.16.840.1.113883.4.2|2.16.840.1.113883.4.2")
self.assertEqual(inst.entity[0].query, "aHR0cDovL2ZoaXItZGV2LmhlYWx0aGludGVyc2VjdGlvbnMuY29tLmF1L29wZW4vRW5jb3VudGVyP3BhcnRpY2lwYW50PTEz")
self.assertEqual(inst.entity[0].role.code, "24")
self.assertEqual(inst.entity[0].role.display, "Query")
self.assertEqual(inst.entity[0].role.system, "http://hl7.org/fhir/object-role")
self.assertEqual(inst.entity[0].type.code, "2")
self.assertEqual(inst.entity[0].type.display, "System Object")
self.assertEqual(inst.entity[0].type.system, "http://hl7.org/fhir/object-type")
self.assertEqual(inst.id, "example-search")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.recorded.date, FHIRDate("2015-08-22T23:42:24Z").date)
self.assertEqual(inst.recorded.as_json(), "2015-08-22T23:42:24Z")
self.assertEqual(inst.source.identifier.value, "hl7connect.healthintersections.com.au")
self.assertEqual(inst.source.site, "Cloud")
self.assertEqual(inst.source.type[0].code, "3")
self.assertEqual(inst.source.type[0].display, "Web Server")
self.assertEqual(inst.source.type[0].system, "http://hl7.org/fhir/security-source-type")
self.assertEqual(inst.subtype[0].code, "search")
self.assertEqual(inst.subtype[0].display, "search")
self.assertEqual(inst.subtype[0].system, "http://hl7.org/fhir/restful-interaction")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "rest")
self.assertEqual(inst.type.display, "Restful Operation")
self.assertEqual(inst.type.system, "http://hl7.org/fhir/audit-event-type")
def testAuditEvent6(self):
inst = self.instantiate_from("audit-event-example-vread.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent6(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent6(inst2)
def implAuditEvent6(self, inst):
self.assertEqual(inst.action, "R")
self.assertEqual(inst.agent[0].altId, "601847123")
self.assertEqual(inst.agent[0].name, "Grahame Grieve")
self.assertTrue(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].userId.value, "95")
self.assertEqual(inst.agent[1].altId, "6580")
self.assertEqual(inst.agent[1].network.address, "Workstation1.ehr.familyclinic.com")
self.assertEqual(inst.agent[1].network.type, "1")
self.assertFalse(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].role[0].coding[0].code, "110153")
self.assertEqual(inst.agent[1].role[0].coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[1].role[0].coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.agent[1].userId.value, "2.16.840.1.113883.4.2|2.16.840.1.113883.4.2")
self.assertEqual(inst.entity[0].lifecycle.code, "6")
self.assertEqual(inst.entity[0].lifecycle.display, "Access / Use")
self.assertEqual(inst.entity[0].lifecycle.system, "http://hl7.org/fhir/dicom-audit-lifecycle")
self.assertEqual(inst.entity[0].type.code, "2")
self.assertEqual(inst.entity[0].type.display, "System Object")
self.assertEqual(inst.entity[0].type.system, "http://hl7.org/fhir/object-type")
self.assertEqual(inst.id, "example-rest")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.recorded.date, FHIRDate("2013-06-20T23:42:24Z").date)
self.assertEqual(inst.recorded.as_json(), "2013-06-20T23:42:24Z")
self.assertEqual(inst.source.identifier.value, "hl7connect.healthintersections.com.au")
self.assertEqual(inst.source.site, "Cloud")
self.assertEqual(inst.source.type[0].code, "3")
self.assertEqual(inst.source.type[0].display, "Web Server")
self.assertEqual(inst.source.type[0].system, "http://hl7.org/fhir/security-source-type")
self.assertEqual(inst.subtype[0].code, "vread")
self.assertEqual(inst.subtype[0].display, "vread")
self.assertEqual(inst.subtype[0].system, "http://hl7.org/fhir/restful-interaction")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "rest")
self.assertEqual(inst.type.display, "Restful Operation")
self.assertEqual(inst.type.system, "http://hl7.org/fhir/audit-event-type")
def testAuditEvent7(self):
inst = self.instantiate_from("auditevent-example-disclosure.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent7(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent7(inst2)
def implAuditEvent7(self, inst):
self.assertEqual(inst.action, "R")
self.assertEqual(inst.agent[0].altId, "notMe")
self.assertEqual(inst.agent[0].name, "That guy everyone wishes would be caught")
self.assertEqual(inst.agent[0].network.address, "custodian.net")
self.assertEqual(inst.agent[0].network.type, "1")
self.assertEqual(inst.agent[0].policy[0], "http://consent.com/yes")
self.assertTrue(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].role[0].coding[0].code, "110153")
self.assertEqual(inst.agent[0].role[0].coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[0].role[0].coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.agent[0].userId.value, "SomeIdiot@nowhere.com")
self.assertEqual(inst.agent[1].network.address, "marketing.land")
self.assertEqual(inst.agent[1].network.type, "1")
self.assertEqual(inst.agent[1].purposeOfUse[0].coding[0].code, "HMARKT")
self.assertEqual(inst.agent[1].purposeOfUse[0].coding[0].display, "healthcare marketing")
self.assertEqual(inst.agent[1].purposeOfUse[0].coding[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertFalse(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].role[0].coding[0].code, "110152")
self.assertEqual(inst.agent[1].role[0].coding[0].display, "Destination Role ID")
self.assertEqual(inst.agent[1].role[0].coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.agent[1].userId.value, "Where")
self.assertEqual(inst.entity[0].role.code, "1")
self.assertEqual(inst.entity[0].role.display, "Patient")
self.assertEqual(inst.entity[0].role.system, "http://hl7.org/fhir/object-role")
self.assertEqual(inst.entity[0].type.code, "1")
self.assertEqual(inst.entity[0].type.display, "Person")
self.assertEqual(inst.entity[0].type.system, "http://hl7.org/fhir/object-type")
self.assertEqual(inst.entity[1].description, "data about Everthing important")
self.assertEqual(inst.entity[1].identifier.value, "What.id")
self.assertEqual(inst.entity[1].lifecycle.code, "11")
self.assertEqual(inst.entity[1].lifecycle.display, "Disclosure")
self.assertEqual(inst.entity[1].lifecycle.system, "http://hl7.org/fhir/dicom-audit-lifecycle")
self.assertEqual(inst.entity[1].name, "Namne of What")
self.assertEqual(inst.entity[1].role.code, "4")
self.assertEqual(inst.entity[1].role.display, "Domain Resource")
self.assertEqual(inst.entity[1].role.system, "http://hl7.org/fhir/object-role")
self.assertEqual(inst.entity[1].securityLabel[0].code, "V")
self.assertEqual(inst.entity[1].securityLabel[0].display, "very restricted")
self.assertEqual(inst.entity[1].securityLabel[0].system, "http://hl7.org/fhir/v3/Confidentiality")
self.assertEqual(inst.entity[1].securityLabel[1].code, "STD")
self.assertEqual(inst.entity[1].securityLabel[1].display, "sexually transmitted disease information sensitivity")
self.assertEqual(inst.entity[1].securityLabel[1].system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.entity[1].securityLabel[2].code, "DELAU")
self.assertEqual(inst.entity[1].securityLabel[2].display, "delete after use")
self.assertEqual(inst.entity[1].securityLabel[2].system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.entity[1].type.code, "2")
self.assertEqual(inst.entity[1].type.display, "System Object")
self.assertEqual(inst.entity[1].type.system, "http://hl7.org/fhir/object-type")
self.assertEqual(inst.id, "example-disclosure")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.outcomeDesc, "Successful Disclosure")
self.assertEqual(inst.purposeOfEvent[0].coding[0].code, "HMARKT")
self.assertEqual(inst.purposeOfEvent[0].coding[0].display, "healthcare marketing")
self.assertEqual(inst.purposeOfEvent[0].coding[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.recorded.date, FHIRDate("2013-09-22T00:08:00Z").date)
self.assertEqual(inst.recorded.as_json(), "2013-09-22T00:08:00Z")
self.assertEqual(inst.source.identifier.value, "Watchers Accounting of Disclosures Application")
self.assertEqual(inst.source.site, "Watcher")
self.assertEqual(inst.source.type[0].code, "4")
self.assertEqual(inst.source.type[0].display, "Application Server")
self.assertEqual(inst.source.type[0].system, "http://hl7.org/fhir/security-source-type")
self.assertEqual(inst.subtype[0].code, "Disclosure")
self.assertEqual(inst.subtype[0].display, "HIPAA disclosure")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Disclosure by some idiot, for marketing reasons, to places unknown, of a Poor Sap, data about Everthing important.</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "110106")
self.assertEqual(inst.type.display, "Export")
self.assertEqual(inst.type.system, "http://dicom.nema.org/resources/ontology/DCM")
def testAuditEvent8(self):
inst = self.instantiate_from("auditevent-example.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent8(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent8(inst2)
def implAuditEvent8(self, inst):
self.assertEqual(inst.action, "E")
self.assertEqual(inst.agent[0].network.address, "127.0.0.1")
self.assertEqual(inst.agent[0].network.type, "2")
self.assertFalse(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].role[0].text, "Service User (Logon)")
self.assertEqual(inst.agent[0].userId.value, "Grahame")
self.assertEqual(inst.agent[1].altId, "6580")
self.assertEqual(inst.agent[1].network.address, "Workstation1.ehr.familyclinic.com")
self.assertEqual(inst.agent[1].network.type, "1")
self.assertFalse(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].role[0].coding[0].code, "110153")
self.assertEqual(inst.agent[1].role[0].coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[1].role[0].coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.agent[1].userId.value, "2.16.840.1.113883.4.2|2.16.840.1.113883.4.2")
self.assertEqual(inst.entity[0].identifier.type.coding[0].code, "SNO")
self.assertEqual(inst.entity[0].identifier.type.coding[0].system, "http://hl7.org/fhir/identifier-type")
self.assertEqual(inst.entity[0].identifier.type.text, "Dell Serial Number")
self.assertEqual(inst.entity[0].identifier.value, "ABCDEF")
self.assertEqual(inst.entity[0].lifecycle.code, "6")
self.assertEqual(inst.entity[0].lifecycle.display, "Access / Use")
self.assertEqual(inst.entity[0].lifecycle.system, "http://hl7.org/fhir/dicom-audit-lifecycle")
self.assertEqual(inst.entity[0].name, "Grahame's Laptop")
self.assertEqual(inst.entity[0].role.code, "4")
self.assertEqual(inst.entity[0].role.display, "Domain Resource")
self.assertEqual(inst.entity[0].role.system, "http://hl7.org/fhir/object-role")
self.assertEqual(inst.entity[0].type.code, "4")
self.assertEqual(inst.entity[0].type.display, "Other")
self.assertEqual(inst.entity[0].type.system, "http://hl7.org/fhir/object-type")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.recorded.date, FHIRDate("2012-10-25T22:04:27+11:00").date)
self.assertEqual(inst.recorded.as_json(), "2012-10-25T22:04:27+11:00")
self.assertEqual(inst.source.identifier.value, "Grahame's Laptop")
self.assertEqual(inst.source.site, "Development")
self.assertEqual(inst.source.type[0].code, "110122")
self.assertEqual(inst.source.type[0].display, "Login")
self.assertEqual(inst.source.type[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.subtype[0].code, "110120")
self.assertEqual(inst.subtype[0].display, "Application Start")
self.assertEqual(inst.subtype[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Application Start for under service login "Grahame" (id: Grahame's Test HL7Connect)</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "110100")
self.assertEqual(inst.type.display, "Application Activity")
self.assertEqual(inst.type.system, "http://dicom.nema.org/resources/ontology/DCM")
| bsd-3-clause | 016c4d872c91762f8e0a83b6769815e1 | 61.101545 | 207 | 0.679333 | 3.19319 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/visionprescription_tests.py | 1 | 6072 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import io
import json
import os
import unittest
from . import visionprescription
from .fhirdate import FHIRDate
class VisionPrescriptionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("VisionPrescription", js["resourceType"])
return visionprescription.VisionPrescription(js)
def testVisionPrescription1(self):
inst = self.instantiate_from("visionprescription-example-1.json")
self.assertIsNotNone(inst, "Must have instantiated a VisionPrescription instance")
self.implVisionPrescription1(inst)
js = inst.as_json()
self.assertEqual("VisionPrescription", js["resourceType"])
inst2 = visionprescription.VisionPrescription(js)
self.implVisionPrescription1(inst2)
def implVisionPrescription1(self, inst):
self.assertEqual(inst.dateWritten.date, FHIRDate("2014-06-15").date)
self.assertEqual(inst.dateWritten.as_json(), "2014-06-15")
self.assertEqual(inst.dispense[0].add, 1.75)
self.assertEqual(inst.dispense[0].axis, 160)
self.assertEqual(inst.dispense[0].backCurve, 8.7)
self.assertEqual(inst.dispense[0].brand, "OphthaGuard")
self.assertEqual(inst.dispense[0].color, "green")
self.assertEqual(inst.dispense[0].cylinder, -2.25)
self.assertEqual(inst.dispense[0].diameter, 14.0)
self.assertEqual(inst.dispense[0].duration.code, "month")
self.assertEqual(inst.dispense[0].duration.system, "http://unitsofmeasure.org")
self.assertEqual(inst.dispense[0].duration.unit, "month")
self.assertEqual(inst.dispense[0].duration.value, 1)
self.assertEqual(inst.dispense[0].eye, "right")
self.assertEqual(inst.dispense[0].note[0].text, "Shade treatment for extreme light sensitivity")
self.assertEqual(inst.dispense[0].power, -2.75)
self.assertEqual(inst.dispense[0].product.coding[0].code, "contact")
self.assertEqual(inst.dispense[0].product.coding[0].system, "http://hl7.org/fhir/ex-visionprescriptionproduct")
self.assertEqual(inst.dispense[1].add, 1.75)
self.assertEqual(inst.dispense[1].axis, 160)
self.assertEqual(inst.dispense[1].backCurve, 8.7)
self.assertEqual(inst.dispense[1].brand, "OphthaGuard")
self.assertEqual(inst.dispense[1].color, "green")
self.assertEqual(inst.dispense[1].cylinder, -3.5)
self.assertEqual(inst.dispense[1].diameter, 14.0)
self.assertEqual(inst.dispense[1].duration.code, "month")
self.assertEqual(inst.dispense[1].duration.system, "http://unitsofmeasure.org")
self.assertEqual(inst.dispense[1].duration.unit, "month")
self.assertEqual(inst.dispense[1].duration.value, 1)
self.assertEqual(inst.dispense[1].eye, "left")
self.assertEqual(inst.dispense[1].note[0].text, "Shade treatment for extreme light sensitivity")
self.assertEqual(inst.dispense[1].power, -2.75)
self.assertEqual(inst.dispense[1].product.coding[0].code, "contact")
self.assertEqual(inst.dispense[1].product.coding[0].system, "http://hl7.org/fhir/ex-visionprescriptionproduct")
self.assertEqual(inst.id, "33124")
self.assertEqual(inst.identifier[0].system, "http://www.happysight.com/prescription")
self.assertEqual(inst.identifier[0].value, "15014")
self.assertEqual(inst.reasonCodeableConcept.coding[0].code, "myopia")
self.assertEqual(inst.reasonCodeableConcept.coding[0].system, "http://samplevisionreasoncodes.com")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Sample Contract Lens prescription</div>")
self.assertEqual(inst.text.status, "generated")
def testVisionPrescription2(self):
inst = self.instantiate_from("visionprescription-example.json")
self.assertIsNotNone(inst, "Must have instantiated a VisionPrescription instance")
self.implVisionPrescription2(inst)
js = inst.as_json()
self.assertEqual("VisionPrescription", js["resourceType"])
inst2 = visionprescription.VisionPrescription(js)
self.implVisionPrescription2(inst2)
def implVisionPrescription2(self, inst):
self.assertEqual(inst.dateWritten.date, FHIRDate("2014-06-15").date)
self.assertEqual(inst.dateWritten.as_json(), "2014-06-15")
self.assertEqual(inst.dispense[0].add, 2.0)
self.assertEqual(inst.dispense[0].base, "down")
self.assertEqual(inst.dispense[0].eye, "right")
self.assertEqual(inst.dispense[0].prism, 0.5)
self.assertEqual(inst.dispense[0].product.coding[0].code, "lens")
self.assertEqual(inst.dispense[0].product.coding[0].system, "http://hl7.org/fhir/ex-visionprescriptionproduct")
self.assertEqual(inst.dispense[0].sphere, -2.0)
self.assertEqual(inst.dispense[1].add, 2.0)
self.assertEqual(inst.dispense[1].axis, 180)
self.assertEqual(inst.dispense[1].base, "up")
self.assertEqual(inst.dispense[1].cylinder, -0.5)
self.assertEqual(inst.dispense[1].eye, "left")
self.assertEqual(inst.dispense[1].prism, 0.5)
self.assertEqual(inst.dispense[1].product.coding[0].code, "lens")
self.assertEqual(inst.dispense[1].product.coding[0].system, "http://hl7.org/fhir/ex-visionprescriptionproduct")
self.assertEqual(inst.dispense[1].sphere, -1.0)
self.assertEqual(inst.id, "33123")
self.assertEqual(inst.identifier[0].system, "http://www.happysight.com/prescription")
self.assertEqual(inst.identifier[0].value, "15013")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
| bsd-3-clause | ea6e4546ddcaa9a4156e3c2f9ba79924 | 52.734513 | 126 | 0.689559 | 3.319847 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/client/client_libs/verify_environment.py | 1 | 3772 | #! /bin/env python
#
# Simply verify the environment is valid for running the client apps.
#
import argparse
# pylint: disable=superfluous-parens
# pylint: disable=broad-except
import importlib
import logging
import sys
from rdr_service.tools.tool_libs import GCPProcessContext
from rdr_service.services.gcp_utils import gcp_get_app_access_token, gcp_get_app_host_name
from rdr_service.services.system_utils import make_api_request, setup_logging, setup_i18n
_logger = logging.getLogger("rdr_logger")
tool_cmd = "verify"
tool_desc = "test local environment"
class Verify(object):
def __init__(self, args, gcp_env):
self.args = args
self.gcp_env = gcp_env
def run(self):
"""
Main program process
:return: Exit code value
"""
result = 0
requests_mod = False
#
# python modules that need to be installed in the local system
#
modules = ["requests", "urllib3"]
for module in modules:
try:
mod = importlib.import_module(module)
_logger.info("found python module [{0}].".format(mod.__name__))
if module == "requests":
requests_mod = True
except ImportError:
_logger.error('missing python [{0}] module, please run "pip --install {0}"'.format(module))
result = 1
if self.args.project in ["localhost", "127.0.0.1"]:
_logger.warning("unable to perform additional testing unless '--project' parameter is set.")
return result
# Try making some API calls to to verify OAuth2 token functions.
if requests_mod:
host = gcp_get_app_host_name(self.args.project)
url = "rdr/v1"
_logger.info("attempting simple api request.")
code, resp = make_api_request(host, url)
if code != 200 or "version_id" not in resp:
_logger.error("simple api request failed")
return 1
_logger.info("{0} version is [{1}].".format(host, resp["version_id"]))
# verify OAuth2 token can be retrieved.
token = gcp_get_app_access_token()
if token and token.startswith("ya"):
_logger.info("verified app authentication token.")
# TODO: Make an authenticated API call here. What APIs are avail for this?
else:
_logger.error("app authentication token verification failed.")
result = 1
else:
_logger.warning("skipping api request tests.")
return result
def run():
# Set global debug value and setup application logging.
setup_logging(
_logger, tool_cmd, "--debug" in sys.argv, "{0}.log".format(tool_cmd) if "--log-file" in sys.argv else None
)
setup_i18n()
# Setup program arguments.
parser = argparse.ArgumentParser(prog=tool_cmd, description=tool_desc)
parser.add_argument("--debug", help="Enable debug output", default=False, action="store_true") # noqa
parser.add_argument("--log-file", help="write output to a log file", default=False, action="store_true") # noqa
parser.add_argument("--project", help="gcp project name", default="localhost") # noqa
parser.add_argument("--account", help="pmi-ops account", default=None) # noqa
parser.add_argument("--service-account", help="gcp service account", default=None) # noqa
args = parser.parse_args()
with GCPProcessContext(tool_cmd, args.project, args.account, args.service_account) as gcp_env:
process = Verify(args, gcp_env)
exit_code = process.run()
return exit_code
# --- Main Program Call ---
if __name__ == "__main__":
sys.exit(run())
| bsd-3-clause | c6ba25fae204def572202b1eb52526f7 | 32.981982 | 116 | 0.610551 | 4.034225 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/elementdefinition.py | 1 | 75790 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/ElementDefinition) on 2019-05-07.
# 2019, SMART Health IT.
from . import backboneelement
class ElementDefinition(backboneelement.BackboneElement):
""" Definition of an element in a resource or extension.
Captures constraints on each element within the resource, profile, or
extension.
"""
resource_type = "ElementDefinition"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.alias = None
""" Other names.
List of `str` items. """
self.base = None
""" Base definition information for tools.
Type `ElementDefinitionBase` (represented as `dict` in JSON). """
self.binding = None
""" ValueSet details if this is coded.
Type `ElementDefinitionBinding` (represented as `dict` in JSON). """
self.code = None
""" Corresponding codes in terminologies.
List of `Coding` items (represented as `dict` in JSON). """
self.comment = None
""" Comments about the use of this element.
Type `str`. """
self.condition = None
""" Reference to invariant about presence.
List of `str` items. """
self.constraint = None
""" Condition that must evaluate to true.
List of `ElementDefinitionConstraint` items (represented as `dict` in JSON). """
self.contentReference = None
""" Reference to definition of content for the element.
Type `str`. """
self.defaultValueAddress = None
""" Specified value if missing from instance.
Type `Address` (represented as `dict` in JSON). """
self.defaultValueAge = None
""" Specified value if missing from instance.
Type `Age` (represented as `dict` in JSON). """
self.defaultValueAnnotation = None
""" Specified value if missing from instance.
Type `Annotation` (represented as `dict` in JSON). """
self.defaultValueAttachment = None
""" Specified value if missing from instance.
Type `Attachment` (represented as `dict` in JSON). """
self.defaultValueBase64Binary = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueBoolean = None
""" Specified value if missing from instance.
Type `bool`. """
self.defaultValueCanonical = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueCode = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueCodeableConcept = None
""" Specified value if missing from instance.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.defaultValueCoding = None
""" Specified value if missing from instance.
Type `Coding` (represented as `dict` in JSON). """
self.defaultValueContactDetail = None
""" Specified value if missing from instance.
Type `ContactDetail` (represented as `dict` in JSON). """
self.defaultValueContactPoint = None
""" Specified value if missing from instance.
Type `ContactPoint` (represented as `dict` in JSON). """
self.defaultValueContributor = None
""" Specified value if missing from instance.
Type `Contributor` (represented as `dict` in JSON). """
self.defaultValueCount = None
""" Specified value if missing from instance.
Type `Count` (represented as `dict` in JSON). """
self.defaultValueDataRequirement = None
""" Specified value if missing from instance.
Type `DataRequirement` (represented as `dict` in JSON). """
self.defaultValueDate = None
""" Specified value if missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueDateTime = None
""" Specified value if missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueDecimal = None
""" Specified value if missing from instance.
Type `float`. """
self.defaultValueDistance = None
""" Specified value if missing from instance.
Type `Distance` (represented as `dict` in JSON). """
self.defaultValueDosage = None
""" Specified value if missing from instance.
Type `Dosage` (represented as `dict` in JSON). """
self.defaultValueDuration = None
""" Specified value if missing from instance.
Type `Duration` (represented as `dict` in JSON). """
self.defaultValueExpression = None
""" Specified value if missing from instance.
Type `Expression` (represented as `dict` in JSON). """
self.defaultValueHumanName = None
""" Specified value if missing from instance.
Type `HumanName` (represented as `dict` in JSON). """
self.defaultValueId = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueIdentifier = None
""" Specified value if missing from instance.
Type `Identifier` (represented as `dict` in JSON). """
self.defaultValueInstant = None
""" Specified value if missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueInteger = None
""" Specified value if missing from instance.
Type `int`. """
self.defaultValueMarkdown = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueMoney = None
""" Specified value if missing from instance.
Type `Money` (represented as `dict` in JSON). """
self.defaultValueOid = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueParameterDefinition = None
""" Specified value if missing from instance.
Type `ParameterDefinition` (represented as `dict` in JSON). """
self.defaultValuePeriod = None
""" Specified value if missing from instance.
Type `Period` (represented as `dict` in JSON). """
self.defaultValuePositiveInt = None
""" Specified value if missing from instance.
Type `int`. """
self.defaultValueQuantity = None
""" Specified value if missing from instance.
Type `Quantity` (represented as `dict` in JSON). """
self.defaultValueRange = None
""" Specified value if missing from instance.
Type `Range` (represented as `dict` in JSON). """
self.defaultValueRatio = None
""" Specified value if missing from instance.
Type `Ratio` (represented as `dict` in JSON). """
self.defaultValueReference = None
""" Specified value if missing from instance.
Type `FHIRReference` (represented as `dict` in JSON). """
self.defaultValueRelatedArtifact = None
""" Specified value if missing from instance.
Type `RelatedArtifact` (represented as `dict` in JSON). """
self.defaultValueSampledData = None
""" Specified value if missing from instance.
Type `SampledData` (represented as `dict` in JSON). """
self.defaultValueSignature = None
""" Specified value if missing from instance.
Type `Signature` (represented as `dict` in JSON). """
self.defaultValueString = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueTime = None
""" Specified value if missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueTiming = None
""" Specified value if missing from instance.
Type `Timing` (represented as `dict` in JSON). """
self.defaultValueTriggerDefinition = None
""" Specified value if missing from instance.
Type `TriggerDefinition` (represented as `dict` in JSON). """
self.defaultValueUnsignedInt = None
""" Specified value if missing from instance.
Type `int`. """
self.defaultValueUri = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueUrl = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueUsageContext = None
""" Specified value if missing from instance.
Type `UsageContext` (represented as `dict` in JSON). """
self.defaultValueUuid = None
""" Specified value if missing from instance.
Type `str`. """
self.definition = None
""" Full formal definition as narrative text.
Type `str`. """
self.example = None
""" Example value (as defined for type).
List of `ElementDefinitionExample` items (represented as `dict` in JSON). """
self.fixedAddress = None
""" Value must be exactly this.
Type `Address` (represented as `dict` in JSON). """
self.fixedAge = None
""" Value must be exactly this.
Type `Age` (represented as `dict` in JSON). """
self.fixedAnnotation = None
""" Value must be exactly this.
Type `Annotation` (represented as `dict` in JSON). """
self.fixedAttachment = None
""" Value must be exactly this.
Type `Attachment` (represented as `dict` in JSON). """
self.fixedBase64Binary = None
""" Value must be exactly this.
Type `str`. """
self.fixedBoolean = None
""" Value must be exactly this.
Type `bool`. """
self.fixedCanonical = None
""" Value must be exactly this.
Type `str`. """
self.fixedCode = None
""" Value must be exactly this.
Type `str`. """
self.fixedCodeableConcept = None
""" Value must be exactly this.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.fixedCoding = None
""" Value must be exactly this.
Type `Coding` (represented as `dict` in JSON). """
self.fixedContactDetail = None
""" Value must be exactly this.
Type `ContactDetail` (represented as `dict` in JSON). """
self.fixedContactPoint = None
""" Value must be exactly this.
Type `ContactPoint` (represented as `dict` in JSON). """
self.fixedContributor = None
""" Value must be exactly this.
Type `Contributor` (represented as `dict` in JSON). """
self.fixedCount = None
""" Value must be exactly this.
Type `Count` (represented as `dict` in JSON). """
self.fixedDataRequirement = None
""" Value must be exactly this.
Type `DataRequirement` (represented as `dict` in JSON). """
self.fixedDate = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedDateTime = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedDecimal = None
""" Value must be exactly this.
Type `float`. """
self.fixedDistance = None
""" Value must be exactly this.
Type `Distance` (represented as `dict` in JSON). """
self.fixedDosage = None
""" Value must be exactly this.
Type `Dosage` (represented as `dict` in JSON). """
self.fixedDuration = None
""" Value must be exactly this.
Type `Duration` (represented as `dict` in JSON). """
self.fixedExpression = None
""" Value must be exactly this.
Type `Expression` (represented as `dict` in JSON). """
self.fixedHumanName = None
""" Value must be exactly this.
Type `HumanName` (represented as `dict` in JSON). """
self.fixedId = None
""" Value must be exactly this.
Type `str`. """
self.fixedIdentifier = None
""" Value must be exactly this.
Type `Identifier` (represented as `dict` in JSON). """
self.fixedInstant = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedInteger = None
""" Value must be exactly this.
Type `int`. """
self.fixedMarkdown = None
""" Value must be exactly this.
Type `str`. """
self.fixedMoney = None
""" Value must be exactly this.
Type `Money` (represented as `dict` in JSON). """
self.fixedOid = None
""" Value must be exactly this.
Type `str`. """
self.fixedParameterDefinition = None
""" Value must be exactly this.
Type `ParameterDefinition` (represented as `dict` in JSON). """
self.fixedPeriod = None
""" Value must be exactly this.
Type `Period` (represented as `dict` in JSON). """
self.fixedPositiveInt = None
""" Value must be exactly this.
Type `int`. """
self.fixedQuantity = None
""" Value must be exactly this.
Type `Quantity` (represented as `dict` in JSON). """
self.fixedRange = None
""" Value must be exactly this.
Type `Range` (represented as `dict` in JSON). """
self.fixedRatio = None
""" Value must be exactly this.
Type `Ratio` (represented as `dict` in JSON). """
self.fixedReference = None
""" Value must be exactly this.
Type `FHIRReference` (represented as `dict` in JSON). """
self.fixedRelatedArtifact = None
""" Value must be exactly this.
Type `RelatedArtifact` (represented as `dict` in JSON). """
self.fixedSampledData = None
""" Value must be exactly this.
Type `SampledData` (represented as `dict` in JSON). """
self.fixedSignature = None
""" Value must be exactly this.
Type `Signature` (represented as `dict` in JSON). """
self.fixedString = None
""" Value must be exactly this.
Type `str`. """
self.fixedTime = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedTiming = None
""" Value must be exactly this.
Type `Timing` (represented as `dict` in JSON). """
self.fixedTriggerDefinition = None
""" Value must be exactly this.
Type `TriggerDefinition` (represented as `dict` in JSON). """
self.fixedUnsignedInt = None
""" Value must be exactly this.
Type `int`. """
self.fixedUri = None
""" Value must be exactly this.
Type `str`. """
self.fixedUrl = None
""" Value must be exactly this.
Type `str`. """
self.fixedUsageContext = None
""" Value must be exactly this.
Type `UsageContext` (represented as `dict` in JSON). """
self.fixedUuid = None
""" Value must be exactly this.
Type `str`. """
self.isModifier = None
""" If this modifies the meaning of other elements.
Type `bool`. """
self.isModifierReason = None
""" Reason that this element is marked as a modifier.
Type `str`. """
self.isSummary = None
""" Include when _summary = true?.
Type `bool`. """
self.label = None
""" Name for element to display with or prompt for element.
Type `str`. """
self.mapping = None
""" Map element to another set of definitions.
List of `ElementDefinitionMapping` items (represented as `dict` in JSON). """
self.max = None
""" Maximum Cardinality (a number or *).
Type `str`. """
self.maxLength = None
""" Max length for strings.
Type `int`. """
self.maxValueDate = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueDateTime = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueDecimal = None
""" Maximum Allowed Value (for some types).
Type `float`. """
self.maxValueInstant = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueInteger = None
""" Maximum Allowed Value (for some types).
Type `int`. """
self.maxValuePositiveInt = None
""" Maximum Allowed Value (for some types).
Type `int`. """
self.maxValueQuantity = None
""" Maximum Allowed Value (for some types).
Type `Quantity` (represented as `dict` in JSON). """
self.maxValueTime = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueUnsignedInt = None
""" Maximum Allowed Value (for some types).
Type `int`. """
self.meaningWhenMissing = None
""" Implicit meaning when this element is missing.
Type `str`. """
self.min = None
""" Minimum Cardinality.
Type `int`. """
self.minValueDate = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueDateTime = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueDecimal = None
""" Minimum Allowed Value (for some types).
Type `float`. """
self.minValueInstant = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueInteger = None
""" Minimum Allowed Value (for some types).
Type `int`. """
self.minValuePositiveInt = None
""" Minimum Allowed Value (for some types).
Type `int`. """
self.minValueQuantity = None
""" Minimum Allowed Value (for some types).
Type `Quantity` (represented as `dict` in JSON). """
self.minValueTime = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueUnsignedInt = None
""" Minimum Allowed Value (for some types).
Type `int`. """
self.mustSupport = None
""" If the element must be supported.
Type `bool`. """
self.orderMeaning = None
""" What the order of the elements means.
Type `str`. """
self.path = None
""" Path of the element in the hierarchy of elements.
Type `str`. """
self.patternAddress = None
""" Value must have at least these property values.
Type `Address` (represented as `dict` in JSON). """
self.patternAge = None
""" Value must have at least these property values.
Type `Age` (represented as `dict` in JSON). """
self.patternAnnotation = None
""" Value must have at least these property values.
Type `Annotation` (represented as `dict` in JSON). """
self.patternAttachment = None
""" Value must have at least these property values.
Type `Attachment` (represented as `dict` in JSON). """
self.patternBase64Binary = None
""" Value must have at least these property values.
Type `str`. """
self.patternBoolean = None
""" Value must have at least these property values.
Type `bool`. """
self.patternCanonical = None
""" Value must have at least these property values.
Type `str`. """
self.patternCode = None
""" Value must have at least these property values.
Type `str`. """
self.patternCodeableConcept = None
""" Value must have at least these property values.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.patternCoding = None
""" Value must have at least these property values.
Type `Coding` (represented as `dict` in JSON). """
self.patternContactDetail = None
""" Value must have at least these property values.
Type `ContactDetail` (represented as `dict` in JSON). """
self.patternContactPoint = None
""" Value must have at least these property values.
Type `ContactPoint` (represented as `dict` in JSON). """
self.patternContributor = None
""" Value must have at least these property values.
Type `Contributor` (represented as `dict` in JSON). """
self.patternCount = None
""" Value must have at least these property values.
Type `Count` (represented as `dict` in JSON). """
self.patternDataRequirement = None
""" Value must have at least these property values.
Type `DataRequirement` (represented as `dict` in JSON). """
self.patternDate = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternDateTime = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternDecimal = None
""" Value must have at least these property values.
Type `float`. """
self.patternDistance = None
""" Value must have at least these property values.
Type `Distance` (represented as `dict` in JSON). """
self.patternDosage = None
""" Value must have at least these property values.
Type `Dosage` (represented as `dict` in JSON). """
self.patternDuration = None
""" Value must have at least these property values.
Type `Duration` (represented as `dict` in JSON). """
self.patternExpression = None
""" Value must have at least these property values.
Type `Expression` (represented as `dict` in JSON). """
self.patternHumanName = None
""" Value must have at least these property values.
Type `HumanName` (represented as `dict` in JSON). """
self.patternId = None
""" Value must have at least these property values.
Type `str`. """
self.patternIdentifier = None
""" Value must have at least these property values.
Type `Identifier` (represented as `dict` in JSON). """
self.patternInstant = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternInteger = None
""" Value must have at least these property values.
Type `int`. """
self.patternMarkdown = None
""" Value must have at least these property values.
Type `str`. """
self.patternMoney = None
""" Value must have at least these property values.
Type `Money` (represented as `dict` in JSON). """
self.patternOid = None
""" Value must have at least these property values.
Type `str`. """
self.patternParameterDefinition = None
""" Value must have at least these property values.
Type `ParameterDefinition` (represented as `dict` in JSON). """
self.patternPeriod = None
""" Value must have at least these property values.
Type `Period` (represented as `dict` in JSON). """
self.patternPositiveInt = None
""" Value must have at least these property values.
Type `int`. """
self.patternQuantity = None
""" Value must have at least these property values.
Type `Quantity` (represented as `dict` in JSON). """
self.patternRange = None
""" Value must have at least these property values.
Type `Range` (represented as `dict` in JSON). """
self.patternRatio = None
""" Value must have at least these property values.
Type `Ratio` (represented as `dict` in JSON). """
self.patternReference = None
""" Value must have at least these property values.
Type `FHIRReference` (represented as `dict` in JSON). """
self.patternRelatedArtifact = None
""" Value must have at least these property values.
Type `RelatedArtifact` (represented as `dict` in JSON). """
self.patternSampledData = None
""" Value must have at least these property values.
Type `SampledData` (represented as `dict` in JSON). """
self.patternSignature = None
""" Value must have at least these property values.
Type `Signature` (represented as `dict` in JSON). """
self.patternString = None
""" Value must have at least these property values.
Type `str`. """
self.patternTime = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternTiming = None
""" Value must have at least these property values.
Type `Timing` (represented as `dict` in JSON). """
self.patternTriggerDefinition = None
""" Value must have at least these property values.
Type `TriggerDefinition` (represented as `dict` in JSON). """
self.patternUnsignedInt = None
""" Value must have at least these property values.
Type `int`. """
self.patternUri = None
""" Value must have at least these property values.
Type `str`. """
self.patternUrl = None
""" Value must have at least these property values.
Type `str`. """
self.patternUsageContext = None
""" Value must have at least these property values.
Type `UsageContext` (represented as `dict` in JSON). """
self.patternUuid = None
""" Value must have at least these property values.
Type `str`. """
self.representation = None
""" xmlAttr | xmlText | typeAttr | cdaText | xhtml.
List of `str` items. """
self.requirements = None
""" Why this resource has been created.
Type `str`. """
self.short = None
""" Concise definition for space-constrained presentation.
Type `str`. """
self.sliceIsConstraining = None
""" If this slice definition constrains an inherited slice definition
(or not).
Type `bool`. """
self.sliceName = None
""" Name for this particular element (in a set of slices).
Type `str`. """
self.slicing = None
""" This element is sliced - slices follow.
Type `ElementDefinitionSlicing` (represented as `dict` in JSON). """
self.type = None
""" Data type and Profile for this element.
List of `ElementDefinitionType` items (represented as `dict` in JSON). """
super(ElementDefinition, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinition, self).elementProperties()
js.extend([
("alias", "alias", str, True, None, False),
("base", "base", ElementDefinitionBase, False, None, False),
("binding", "binding", ElementDefinitionBinding, False, None, False),
("code", "code", coding.Coding, True, None, False),
("comment", "comment", str, False, None, False),
("condition", "condition", str, True, None, False),
("constraint", "constraint", ElementDefinitionConstraint, True, None, False),
("contentReference", "contentReference", str, False, None, False),
("defaultValueAddress", "defaultValueAddress", address.Address, False, "defaultValue", False),
("defaultValueAge", "defaultValueAge", age.Age, False, "defaultValue", False),
("defaultValueAnnotation", "defaultValueAnnotation", annotation.Annotation, False, "defaultValue", False),
("defaultValueAttachment", "defaultValueAttachment", attachment.Attachment, False, "defaultValue", False),
("defaultValueBase64Binary", "defaultValueBase64Binary", str, False, "defaultValue", False),
("defaultValueBoolean", "defaultValueBoolean", bool, False, "defaultValue", False),
("defaultValueCanonical", "defaultValueCanonical", str, False, "defaultValue", False),
("defaultValueCode", "defaultValueCode", str, False, "defaultValue", False),
("defaultValueCodeableConcept", "defaultValueCodeableConcept", codeableconcept.CodeableConcept, False, "defaultValue", False),
("defaultValueCoding", "defaultValueCoding", coding.Coding, False, "defaultValue", False),
("defaultValueContactDetail", "defaultValueContactDetail", contactdetail.ContactDetail, False, "defaultValue", False),
("defaultValueContactPoint", "defaultValueContactPoint", contactpoint.ContactPoint, False, "defaultValue", False),
("defaultValueContributor", "defaultValueContributor", contributor.Contributor, False, "defaultValue", False),
("defaultValueCount", "defaultValueCount", count.Count, False, "defaultValue", False),
("defaultValueDataRequirement", "defaultValueDataRequirement", datarequirement.DataRequirement, False, "defaultValue", False),
("defaultValueDate", "defaultValueDate", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueDateTime", "defaultValueDateTime", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueDecimal", "defaultValueDecimal", float, False, "defaultValue", False),
("defaultValueDistance", "defaultValueDistance", distance.Distance, False, "defaultValue", False),
("defaultValueDosage", "defaultValueDosage", dosage.Dosage, False, "defaultValue", False),
("defaultValueDuration", "defaultValueDuration", duration.Duration, False, "defaultValue", False),
("defaultValueExpression", "defaultValueExpression", expression.Expression, False, "defaultValue", False),
("defaultValueHumanName", "defaultValueHumanName", humanname.HumanName, False, "defaultValue", False),
("defaultValueId", "defaultValueId", str, False, "defaultValue", False),
("defaultValueIdentifier", "defaultValueIdentifier", identifier.Identifier, False, "defaultValue", False),
("defaultValueInstant", "defaultValueInstant", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueInteger", "defaultValueInteger", int, False, "defaultValue", False),
("defaultValueMarkdown", "defaultValueMarkdown", str, False, "defaultValue", False),
("defaultValueMoney", "defaultValueMoney", money.Money, False, "defaultValue", False),
("defaultValueOid", "defaultValueOid", str, False, "defaultValue", False),
("defaultValueParameterDefinition", "defaultValueParameterDefinition", parameterdefinition.ParameterDefinition, False, "defaultValue", False),
("defaultValuePeriod", "defaultValuePeriod", period.Period, False, "defaultValue", False),
("defaultValuePositiveInt", "defaultValuePositiveInt", int, False, "defaultValue", False),
("defaultValueQuantity", "defaultValueQuantity", quantity.Quantity, False, "defaultValue", False),
("defaultValueRange", "defaultValueRange", range.Range, False, "defaultValue", False),
("defaultValueRatio", "defaultValueRatio", ratio.Ratio, False, "defaultValue", False),
("defaultValueReference", "defaultValueReference", fhirreference.FHIRReference, False, "defaultValue", False),
("defaultValueRelatedArtifact", "defaultValueRelatedArtifact", relatedartifact.RelatedArtifact, False, "defaultValue", False),
("defaultValueSampledData", "defaultValueSampledData", sampleddata.SampledData, False, "defaultValue", False),
("defaultValueSignature", "defaultValueSignature", signature.Signature, False, "defaultValue", False),
("defaultValueString", "defaultValueString", str, False, "defaultValue", False),
("defaultValueTime", "defaultValueTime", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueTiming", "defaultValueTiming", timing.Timing, False, "defaultValue", False),
("defaultValueTriggerDefinition", "defaultValueTriggerDefinition", triggerdefinition.TriggerDefinition, False, "defaultValue", False),
("defaultValueUnsignedInt", "defaultValueUnsignedInt", int, False, "defaultValue", False),
("defaultValueUri", "defaultValueUri", str, False, "defaultValue", False),
("defaultValueUrl", "defaultValueUrl", str, False, "defaultValue", False),
("defaultValueUsageContext", "defaultValueUsageContext", usagecontext.UsageContext, False, "defaultValue", False),
("defaultValueUuid", "defaultValueUuid", str, False, "defaultValue", False),
("definition", "definition", str, False, None, False),
("example", "example", ElementDefinitionExample, True, None, False),
("fixedAddress", "fixedAddress", address.Address, False, "fixed", False),
("fixedAge", "fixedAge", age.Age, False, "fixed", False),
("fixedAnnotation", "fixedAnnotation", annotation.Annotation, False, "fixed", False),
("fixedAttachment", "fixedAttachment", attachment.Attachment, False, "fixed", False),
("fixedBase64Binary", "fixedBase64Binary", str, False, "fixed", False),
("fixedBoolean", "fixedBoolean", bool, False, "fixed", False),
("fixedCanonical", "fixedCanonical", str, False, "fixed", False),
("fixedCode", "fixedCode", str, False, "fixed", False),
("fixedCodeableConcept", "fixedCodeableConcept", codeableconcept.CodeableConcept, False, "fixed", False),
("fixedCoding", "fixedCoding", coding.Coding, False, "fixed", False),
("fixedContactDetail", "fixedContactDetail", contactdetail.ContactDetail, False, "fixed", False),
("fixedContactPoint", "fixedContactPoint", contactpoint.ContactPoint, False, "fixed", False),
("fixedContributor", "fixedContributor", contributor.Contributor, False, "fixed", False),
("fixedCount", "fixedCount", count.Count, False, "fixed", False),
("fixedDataRequirement", "fixedDataRequirement", datarequirement.DataRequirement, False, "fixed", False),
("fixedDate", "fixedDate", fhirdate.FHIRDate, False, "fixed", False),
("fixedDateTime", "fixedDateTime", fhirdate.FHIRDate, False, "fixed", False),
("fixedDecimal", "fixedDecimal", float, False, "fixed", False),
("fixedDistance", "fixedDistance", distance.Distance, False, "fixed", False),
("fixedDosage", "fixedDosage", dosage.Dosage, False, "fixed", False),
("fixedDuration", "fixedDuration", duration.Duration, False, "fixed", False),
("fixedExpression", "fixedExpression", expression.Expression, False, "fixed", False),
("fixedHumanName", "fixedHumanName", humanname.HumanName, False, "fixed", False),
("fixedId", "fixedId", str, False, "fixed", False),
("fixedIdentifier", "fixedIdentifier", identifier.Identifier, False, "fixed", False),
("fixedInstant", "fixedInstant", fhirdate.FHIRDate, False, "fixed", False),
("fixedInteger", "fixedInteger", int, False, "fixed", False),
("fixedMarkdown", "fixedMarkdown", str, False, "fixed", False),
("fixedMoney", "fixedMoney", money.Money, False, "fixed", False),
("fixedOid", "fixedOid", str, False, "fixed", False),
("fixedParameterDefinition", "fixedParameterDefinition", parameterdefinition.ParameterDefinition, False, "fixed", False),
("fixedPeriod", "fixedPeriod", period.Period, False, "fixed", False),
("fixedPositiveInt", "fixedPositiveInt", int, False, "fixed", False),
("fixedQuantity", "fixedQuantity", quantity.Quantity, False, "fixed", False),
("fixedRange", "fixedRange", range.Range, False, "fixed", False),
("fixedRatio", "fixedRatio", ratio.Ratio, False, "fixed", False),
("fixedReference", "fixedReference", fhirreference.FHIRReference, False, "fixed", False),
("fixedRelatedArtifact", "fixedRelatedArtifact", relatedartifact.RelatedArtifact, False, "fixed", False),
("fixedSampledData", "fixedSampledData", sampleddata.SampledData, False, "fixed", False),
("fixedSignature", "fixedSignature", signature.Signature, False, "fixed", False),
("fixedString", "fixedString", str, False, "fixed", False),
("fixedTime", "fixedTime", fhirdate.FHIRDate, False, "fixed", False),
("fixedTiming", "fixedTiming", timing.Timing, False, "fixed", False),
("fixedTriggerDefinition", "fixedTriggerDefinition", triggerdefinition.TriggerDefinition, False, "fixed", False),
("fixedUnsignedInt", "fixedUnsignedInt", int, False, "fixed", False),
("fixedUri", "fixedUri", str, False, "fixed", False),
("fixedUrl", "fixedUrl", str, False, "fixed", False),
("fixedUsageContext", "fixedUsageContext", usagecontext.UsageContext, False, "fixed", False),
("fixedUuid", "fixedUuid", str, False, "fixed", False),
("isModifier", "isModifier", bool, False, None, False),
("isModifierReason", "isModifierReason", str, False, None, False),
("isSummary", "isSummary", bool, False, None, False),
("label", "label", str, False, None, False),
("mapping", "mapping", ElementDefinitionMapping, True, None, False),
("max", "max", str, False, None, False),
("maxLength", "maxLength", int, False, None, False),
("maxValueDate", "maxValueDate", fhirdate.FHIRDate, False, "maxValue", False),
("maxValueDateTime", "maxValueDateTime", fhirdate.FHIRDate, False, "maxValue", False),
("maxValueDecimal", "maxValueDecimal", float, False, "maxValue", False),
("maxValueInstant", "maxValueInstant", fhirdate.FHIRDate, False, "maxValue", False),
("maxValueInteger", "maxValueInteger", int, False, "maxValue", False),
("maxValuePositiveInt", "maxValuePositiveInt", int, False, "maxValue", False),
("maxValueQuantity", "maxValueQuantity", quantity.Quantity, False, "maxValue", False),
("maxValueTime", "maxValueTime", fhirdate.FHIRDate, False, "maxValue", False),
("maxValueUnsignedInt", "maxValueUnsignedInt", int, False, "maxValue", False),
("meaningWhenMissing", "meaningWhenMissing", str, False, None, False),
("min", "min", int, False, None, False),
("minValueDate", "minValueDate", fhirdate.FHIRDate, False, "minValue", False),
("minValueDateTime", "minValueDateTime", fhirdate.FHIRDate, False, "minValue", False),
("minValueDecimal", "minValueDecimal", float, False, "minValue", False),
("minValueInstant", "minValueInstant", fhirdate.FHIRDate, False, "minValue", False),
("minValueInteger", "minValueInteger", int, False, "minValue", False),
("minValuePositiveInt", "minValuePositiveInt", int, False, "minValue", False),
("minValueQuantity", "minValueQuantity", quantity.Quantity, False, "minValue", False),
("minValueTime", "minValueTime", fhirdate.FHIRDate, False, "minValue", False),
("minValueUnsignedInt", "minValueUnsignedInt", int, False, "minValue", False),
("mustSupport", "mustSupport", bool, False, None, False),
("orderMeaning", "orderMeaning", str, False, None, False),
("path", "path", str, False, None, True),
("patternAddress", "patternAddress", address.Address, False, "pattern", False),
("patternAge", "patternAge", age.Age, False, "pattern", False),
("patternAnnotation", "patternAnnotation", annotation.Annotation, False, "pattern", False),
("patternAttachment", "patternAttachment", attachment.Attachment, False, "pattern", False),
("patternBase64Binary", "patternBase64Binary", str, False, "pattern", False),
("patternBoolean", "patternBoolean", bool, False, "pattern", False),
("patternCanonical", "patternCanonical", str, False, "pattern", False),
("patternCode", "patternCode", str, False, "pattern", False),
("patternCodeableConcept", "patternCodeableConcept", codeableconcept.CodeableConcept, False, "pattern", False),
("patternCoding", "patternCoding", coding.Coding, False, "pattern", False),
("patternContactDetail", "patternContactDetail", contactdetail.ContactDetail, False, "pattern", False),
("patternContactPoint", "patternContactPoint", contactpoint.ContactPoint, False, "pattern", False),
("patternContributor", "patternContributor", contributor.Contributor, False, "pattern", False),
("patternCount", "patternCount", count.Count, False, "pattern", False),
("patternDataRequirement", "patternDataRequirement", datarequirement.DataRequirement, False, "pattern", False),
("patternDate", "patternDate", fhirdate.FHIRDate, False, "pattern", False),
("patternDateTime", "patternDateTime", fhirdate.FHIRDate, False, "pattern", False),
("patternDecimal", "patternDecimal", float, False, "pattern", False),
("patternDistance", "patternDistance", distance.Distance, False, "pattern", False),
("patternDosage", "patternDosage", dosage.Dosage, False, "pattern", False),
("patternDuration", "patternDuration", duration.Duration, False, "pattern", False),
("patternExpression", "patternExpression", expression.Expression, False, "pattern", False),
("patternHumanName", "patternHumanName", humanname.HumanName, False, "pattern", False),
("patternId", "patternId", str, False, "pattern", False),
("patternIdentifier", "patternIdentifier", identifier.Identifier, False, "pattern", False),
("patternInstant", "patternInstant", fhirdate.FHIRDate, False, "pattern", False),
("patternInteger", "patternInteger", int, False, "pattern", False),
("patternMarkdown", "patternMarkdown", str, False, "pattern", False),
("patternMoney", "patternMoney", money.Money, False, "pattern", False),
("patternOid", "patternOid", str, False, "pattern", False),
("patternParameterDefinition", "patternParameterDefinition", parameterdefinition.ParameterDefinition, False, "pattern", False),
("patternPeriod", "patternPeriod", period.Period, False, "pattern", False),
("patternPositiveInt", "patternPositiveInt", int, False, "pattern", False),
("patternQuantity", "patternQuantity", quantity.Quantity, False, "pattern", False),
("patternRange", "patternRange", range.Range, False, "pattern", False),
("patternRatio", "patternRatio", ratio.Ratio, False, "pattern", False),
("patternReference", "patternReference", fhirreference.FHIRReference, False, "pattern", False),
("patternRelatedArtifact", "patternRelatedArtifact", relatedartifact.RelatedArtifact, False, "pattern", False),
("patternSampledData", "patternSampledData", sampleddata.SampledData, False, "pattern", False),
("patternSignature", "patternSignature", signature.Signature, False, "pattern", False),
("patternString", "patternString", str, False, "pattern", False),
("patternTime", "patternTime", fhirdate.FHIRDate, False, "pattern", False),
("patternTiming", "patternTiming", timing.Timing, False, "pattern", False),
("patternTriggerDefinition", "patternTriggerDefinition", triggerdefinition.TriggerDefinition, False, "pattern", False),
("patternUnsignedInt", "patternUnsignedInt", int, False, "pattern", False),
("patternUri", "patternUri", str, False, "pattern", False),
("patternUrl", "patternUrl", str, False, "pattern", False),
("patternUsageContext", "patternUsageContext", usagecontext.UsageContext, False, "pattern", False),
("patternUuid", "patternUuid", str, False, "pattern", False),
("representation", "representation", str, True, None, False),
("requirements", "requirements", str, False, None, False),
("short", "short", str, False, None, False),
("sliceIsConstraining", "sliceIsConstraining", bool, False, None, False),
("sliceName", "sliceName", str, False, None, False),
("slicing", "slicing", ElementDefinitionSlicing, False, None, False),
("type", "type", ElementDefinitionType, True, None, False),
])
return js
from . import element
class ElementDefinitionBase(element.Element):
""" Base definition information for tools.
Information about the base definition of the element, provided to make it
unnecessary for tools to trace the deviation of the element through the
derived and related profiles. When the element definition is not the
original definition of an element - i.g. either in a constraint on another
type, or for elements from a super type in a snap shot - then the
information in provided in the element definition may be different to the
base definition. On the original definition of the element, it will be
same.
"""
resource_type = "ElementDefinitionBase"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.max = None
""" Max cardinality of the base element.
Type `str`. """
self.min = None
""" Min cardinality of the base element.
Type `int`. """
self.path = None
""" Path that identifies the base element.
Type `str`. """
super(ElementDefinitionBase, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionBase, self).elementProperties()
js.extend([
("max", "max", str, False, None, True),
("min", "min", int, False, None, True),
("path", "path", str, False, None, True),
])
return js
class ElementDefinitionBinding(element.Element):
""" ValueSet details if this is coded.
Binds to a value set if this element is coded (code, Coding,
CodeableConcept, Quantity), or the data types (string, uri).
"""
resource_type = "ElementDefinitionBinding"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Human explanation of the value set.
Type `str`. """
self.strength = None
""" required | extensible | preferred | example.
Type `str`. """
self.valueSet = None
""" Source of value set.
Type `str`. """
super(ElementDefinitionBinding, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionBinding, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("strength", "strength", str, False, None, True),
("valueSet", "valueSet", str, False, None, False),
])
return js
class ElementDefinitionConstraint(element.Element):
""" Condition that must evaluate to true.
Formal constraints such as co-occurrence and other constraints that can be
computationally evaluated within the context of the instance.
"""
resource_type = "ElementDefinitionConstraint"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.expression = None
""" FHIRPath expression of constraint.
Type `str`. """
self.human = None
""" Human description of constraint.
Type `str`. """
self.key = None
""" Target of 'condition' reference above.
Type `str`. """
self.requirements = None
""" Why this constraint is necessary or appropriate.
Type `str`. """
self.severity = None
""" error | warning.
Type `str`. """
self.source = None
""" Reference to original source of constraint.
Type `str`. """
self.xpath = None
""" XPath expression of constraint.
Type `str`. """
super(ElementDefinitionConstraint, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionConstraint, self).elementProperties()
js.extend([
("expression", "expression", str, False, None, False),
("human", "human", str, False, None, True),
("key", "key", str, False, None, True),
("requirements", "requirements", str, False, None, False),
("severity", "severity", str, False, None, True),
("source", "source", str, False, None, False),
("xpath", "xpath", str, False, None, False),
])
return js
class ElementDefinitionExample(element.Element):
""" Example value (as defined for type).
A sample value for this element demonstrating the type of information that
would typically be found in the element.
"""
resource_type = "ElementDefinitionExample"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.label = None
""" Describes the purpose of this example.
Type `str`. """
self.valueAddress = None
""" Value of Example (one of allowed types).
Type `Address` (represented as `dict` in JSON). """
self.valueAge = None
""" Value of Example (one of allowed types).
Type `Age` (represented as `dict` in JSON). """
self.valueAnnotation = None
""" Value of Example (one of allowed types).
Type `Annotation` (represented as `dict` in JSON). """
self.valueAttachment = None
""" Value of Example (one of allowed types).
Type `Attachment` (represented as `dict` in JSON). """
self.valueBase64Binary = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueBoolean = None
""" Value of Example (one of allowed types).
Type `bool`. """
self.valueCanonical = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueCode = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueCodeableConcept = None
""" Value of Example (one of allowed types).
Type `CodeableConcept` (represented as `dict` in JSON). """
self.valueCoding = None
""" Value of Example (one of allowed types).
Type `Coding` (represented as `dict` in JSON). """
self.valueContactDetail = None
""" Value of Example (one of allowed types).
Type `ContactDetail` (represented as `dict` in JSON). """
self.valueContactPoint = None
""" Value of Example (one of allowed types).
Type `ContactPoint` (represented as `dict` in JSON). """
self.valueContributor = None
""" Value of Example (one of allowed types).
Type `Contributor` (represented as `dict` in JSON). """
self.valueCount = None
""" Value of Example (one of allowed types).
Type `Count` (represented as `dict` in JSON). """
self.valueDataRequirement = None
""" Value of Example (one of allowed types).
Type `DataRequirement` (represented as `dict` in JSON). """
self.valueDate = None
""" Value of Example (one of allowed types).
Type `FHIRDate` (represented as `str` in JSON). """
self.valueDateTime = None
""" Value of Example (one of allowed types).
Type `FHIRDate` (represented as `str` in JSON). """
self.valueDecimal = None
""" Value of Example (one of allowed types).
Type `float`. """
self.valueDistance = None
""" Value of Example (one of allowed types).
Type `Distance` (represented as `dict` in JSON). """
self.valueDosage = None
""" Value of Example (one of allowed types).
Type `Dosage` (represented as `dict` in JSON). """
self.valueDuration = None
""" Value of Example (one of allowed types).
Type `Duration` (represented as `dict` in JSON). """
self.valueExpression = None
""" Value of Example (one of allowed types).
Type `Expression` (represented as `dict` in JSON). """
self.valueHumanName = None
""" Value of Example (one of allowed types).
Type `HumanName` (represented as `dict` in JSON). """
self.valueId = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueIdentifier = None
""" Value of Example (one of allowed types).
Type `Identifier` (represented as `dict` in JSON). """
self.valueInstant = None
""" Value of Example (one of allowed types).
Type `FHIRDate` (represented as `str` in JSON). """
self.valueInteger = None
""" Value of Example (one of allowed types).
Type `int`. """
self.valueMarkdown = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueMoney = None
""" Value of Example (one of allowed types).
Type `Money` (represented as `dict` in JSON). """
self.valueOid = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueParameterDefinition = None
""" Value of Example (one of allowed types).
Type `ParameterDefinition` (represented as `dict` in JSON). """
self.valuePeriod = None
""" Value of Example (one of allowed types).
Type `Period` (represented as `dict` in JSON). """
self.valuePositiveInt = None
""" Value of Example (one of allowed types).
Type `int`. """
self.valueQuantity = None
""" Value of Example (one of allowed types).
Type `Quantity` (represented as `dict` in JSON). """
self.valueRange = None
""" Value of Example (one of allowed types).
Type `Range` (represented as `dict` in JSON). """
self.valueRatio = None
""" Value of Example (one of allowed types).
Type `Ratio` (represented as `dict` in JSON). """
self.valueReference = None
""" Value of Example (one of allowed types).
Type `FHIRReference` (represented as `dict` in JSON). """
self.valueRelatedArtifact = None
""" Value of Example (one of allowed types).
Type `RelatedArtifact` (represented as `dict` in JSON). """
self.valueSampledData = None
""" Value of Example (one of allowed types).
Type `SampledData` (represented as `dict` in JSON). """
self.valueSignature = None
""" Value of Example (one of allowed types).
Type `Signature` (represented as `dict` in JSON). """
self.valueString = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueTime = None
""" Value of Example (one of allowed types).
Type `FHIRDate` (represented as `str` in JSON). """
self.valueTiming = None
""" Value of Example (one of allowed types).
Type `Timing` (represented as `dict` in JSON). """
self.valueTriggerDefinition = None
""" Value of Example (one of allowed types).
Type `TriggerDefinition` (represented as `dict` in JSON). """
self.valueUnsignedInt = None
""" Value of Example (one of allowed types).
Type `int`. """
self.valueUri = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueUrl = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueUsageContext = None
""" Value of Example (one of allowed types).
Type `UsageContext` (represented as `dict` in JSON). """
self.valueUuid = None
""" Value of Example (one of allowed types).
Type `str`. """
super(ElementDefinitionExample, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionExample, self).elementProperties()
js.extend([
("label", "label", str, False, None, True),
("valueAddress", "valueAddress", address.Address, False, "value", True),
("valueAge", "valueAge", age.Age, False, "value", True),
("valueAnnotation", "valueAnnotation", annotation.Annotation, False, "value", True),
("valueAttachment", "valueAttachment", attachment.Attachment, False, "value", True),
("valueBase64Binary", "valueBase64Binary", str, False, "value", True),
("valueBoolean", "valueBoolean", bool, False, "value", True),
("valueCanonical", "valueCanonical", str, False, "value", True),
("valueCode", "valueCode", str, False, "value", True),
("valueCodeableConcept", "valueCodeableConcept", codeableconcept.CodeableConcept, False, "value", True),
("valueCoding", "valueCoding", coding.Coding, False, "value", True),
("valueContactDetail", "valueContactDetail", contactdetail.ContactDetail, False, "value", True),
("valueContactPoint", "valueContactPoint", contactpoint.ContactPoint, False, "value", True),
("valueContributor", "valueContributor", contributor.Contributor, False, "value", True),
("valueCount", "valueCount", count.Count, False, "value", True),
("valueDataRequirement", "valueDataRequirement", datarequirement.DataRequirement, False, "value", True),
("valueDate", "valueDate", fhirdate.FHIRDate, False, "value", True),
("valueDateTime", "valueDateTime", fhirdate.FHIRDate, False, "value", True),
("valueDecimal", "valueDecimal", float, False, "value", True),
("valueDistance", "valueDistance", distance.Distance, False, "value", True),
("valueDosage", "valueDosage", dosage.Dosage, False, "value", True),
("valueDuration", "valueDuration", duration.Duration, False, "value", True),
("valueExpression", "valueExpression", expression.Expression, False, "value", True),
("valueHumanName", "valueHumanName", humanname.HumanName, False, "value", True),
("valueId", "valueId", str, False, "value", True),
("valueIdentifier", "valueIdentifier", identifier.Identifier, False, "value", True),
("valueInstant", "valueInstant", fhirdate.FHIRDate, False, "value", True),
("valueInteger", "valueInteger", int, False, "value", True),
("valueMarkdown", "valueMarkdown", str, False, "value", True),
("valueMoney", "valueMoney", money.Money, False, "value", True),
("valueOid", "valueOid", str, False, "value", True),
("valueParameterDefinition", "valueParameterDefinition", parameterdefinition.ParameterDefinition, False, "value", True),
("valuePeriod", "valuePeriod", period.Period, False, "value", True),
("valuePositiveInt", "valuePositiveInt", int, False, "value", True),
("valueQuantity", "valueQuantity", quantity.Quantity, False, "value", True),
("valueRange", "valueRange", range.Range, False, "value", True),
("valueRatio", "valueRatio", ratio.Ratio, False, "value", True),
("valueReference", "valueReference", fhirreference.FHIRReference, False, "value", True),
("valueRelatedArtifact", "valueRelatedArtifact", relatedartifact.RelatedArtifact, False, "value", True),
("valueSampledData", "valueSampledData", sampleddata.SampledData, False, "value", True),
("valueSignature", "valueSignature", signature.Signature, False, "value", True),
("valueString", "valueString", str, False, "value", True),
("valueTime", "valueTime", fhirdate.FHIRDate, False, "value", True),
("valueTiming", "valueTiming", timing.Timing, False, "value", True),
("valueTriggerDefinition", "valueTriggerDefinition", triggerdefinition.TriggerDefinition, False, "value", True),
("valueUnsignedInt", "valueUnsignedInt", int, False, "value", True),
("valueUri", "valueUri", str, False, "value", True),
("valueUrl", "valueUrl", str, False, "value", True),
("valueUsageContext", "valueUsageContext", usagecontext.UsageContext, False, "value", True),
("valueUuid", "valueUuid", str, False, "value", True),
])
return js
class ElementDefinitionMapping(element.Element):
""" Map element to another set of definitions.
Identifies a concept from an external specification that roughly
corresponds to this element.
"""
resource_type = "ElementDefinitionMapping"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.comment = None
""" Comments about the mapping or its use.
Type `str`. """
self.identity = None
""" Reference to mapping declaration.
Type `str`. """
self.language = None
""" Computable language of mapping.
Type `str`. """
self.map = None
""" Details of the mapping.
Type `str`. """
super(ElementDefinitionMapping, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionMapping, self).elementProperties()
js.extend([
("comment", "comment", str, False, None, False),
("identity", "identity", str, False, None, True),
("language", "language", str, False, None, False),
("map", "map", str, False, None, True),
])
return js
class ElementDefinitionSlicing(element.Element):
""" This element is sliced - slices follow.
Indicates that the element is sliced into a set of alternative definitions
(i.e. in a structure definition, there are multiple different constraints
on a single element in the base resource). Slicing can be used in any
resource that has cardinality ..* on the base resource, or any resource
with a choice of types. The set of slices is any elements that come after
this in the element sequence that have the same path, until a shorter path
occurs (the shorter path terminates the set).
"""
resource_type = "ElementDefinitionSlicing"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Text description of how slicing works (or not).
Type `str`. """
self.discriminator = None
""" Element values that are used to distinguish the slices.
List of `ElementDefinitionSlicingDiscriminator` items (represented as `dict` in JSON). """
self.ordered = None
""" If elements must be in same order as slices.
Type `bool`. """
self.rules = None
""" closed | open | openAtEnd.
Type `str`. """
super(ElementDefinitionSlicing, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionSlicing, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("discriminator", "discriminator", ElementDefinitionSlicingDiscriminator, True, None, False),
("ordered", "ordered", bool, False, None, False),
("rules", "rules", str, False, None, True),
])
return js
class ElementDefinitionSlicingDiscriminator(element.Element):
""" Element values that are used to distinguish the slices.
Designates which child elements are used to discriminate between the slices
when processing an instance. If one or more discriminators are provided,
the value of the child elements in the instance data SHALL completely
distinguish which slice the element in the resource matches based on the
allowed values for those elements in each of the slices.
"""
resource_type = "ElementDefinitionSlicingDiscriminator"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.path = None
""" Path to element value.
Type `str`. """
self.type = None
""" value | exists | pattern | type | profile.
Type `str`. """
super(ElementDefinitionSlicingDiscriminator, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionSlicingDiscriminator, self).elementProperties()
js.extend([
("path", "path", str, False, None, True),
("type", "type", str, False, None, True),
])
return js
class ElementDefinitionType(element.Element):
""" Data type and Profile for this element.
The data type or resource that the value of this element is permitted to
be.
"""
resource_type = "ElementDefinitionType"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.aggregation = None
""" contained | referenced | bundled - how aggregated.
List of `str` items. """
self.code = None
""" Data type or Resource (reference to definition).
Type `str`. """
self.profile = None
""" Profiles (StructureDefinition or IG) - one must apply.
List of `str` items. """
self.targetProfile = None
""" Profile (StructureDefinition or IG) on the Reference/canonical
target - one must apply.
List of `str` items. """
self.versioning = None
""" either | independent | specific.
Type `str`. """
super(ElementDefinitionType, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionType, self).elementProperties()
js.extend([
("aggregation", "aggregation", str, True, None, False),
("code", "code", str, False, None, True),
("profile", "profile", str, True, None, False),
("targetProfile", "targetProfile", str, True, None, False),
("versioning", "versioning", str, False, None, False),
])
return js
import sys
try:
from . import address
except ImportError:
address = sys.modules[__package__ + '.address']
try:
from . import age
except ImportError:
age = sys.modules[__package__ + '.age']
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import coding
except ImportError:
coding = sys.modules[__package__ + '.coding']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import contactpoint
except ImportError:
contactpoint = sys.modules[__package__ + '.contactpoint']
try:
from . import contributor
except ImportError:
contributor = sys.modules[__package__ + '.contributor']
try:
from . import count
except ImportError:
count = sys.modules[__package__ + '.count']
try:
from . import datarequirement
except ImportError:
datarequirement = sys.modules[__package__ + '.datarequirement']
try:
from . import distance
except ImportError:
distance = sys.modules[__package__ + '.distance']
try:
from . import dosage
except ImportError:
dosage = sys.modules[__package__ + '.dosage']
try:
from . import duration
except ImportError:
duration = sys.modules[__package__ + '.duration']
try:
from . import expression
except ImportError:
expression = sys.modules[__package__ + '.expression']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import humanname
except ImportError:
humanname = sys.modules[__package__ + '.humanname']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import money
except ImportError:
money = sys.modules[__package__ + '.money']
try:
from . import parameterdefinition
except ImportError:
parameterdefinition = sys.modules[__package__ + '.parameterdefinition']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
try:
from . import range
except ImportError:
range = sys.modules[__package__ + '.range']
try:
from . import ratio
except ImportError:
ratio = sys.modules[__package__ + '.ratio']
try:
from . import relatedartifact
except ImportError:
relatedartifact = sys.modules[__package__ + '.relatedartifact']
try:
from . import sampleddata
except ImportError:
sampleddata = sys.modules[__package__ + '.sampleddata']
try:
from . import signature
except ImportError:
signature = sys.modules[__package__ + '.signature']
try:
from . import timing
except ImportError:
timing = sys.modules[__package__ + '.timing']
try:
from . import triggerdefinition
except ImportError:
triggerdefinition = sys.modules[__package__ + '.triggerdefinition']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext']
| bsd-3-clause | 62c270bc4144bcedc6fd95f631552029 | 42.407789 | 154 | 0.598601 | 4.601141 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/message_broker/message_broker.py | 1 | 6297 | import logging
from datetime import timedelta
import backoff
import requests
from werkzeug.exceptions import BadRequest, BadGateway, HTTPException
from rdr_service import clock
from rdr_service.model.message_broker import MessageBrokerDestAuthInfo
from rdr_service.model.utils import to_client_participant_id
from rdr_service.dao.database_utils import format_datetime
from rdr_service.dao.message_broker_metadata_dao import MessageBrokerMetadataDao
from rdr_service.dao.message_broker_dest_auth_info_dao import MessageBrokerDestAuthInfoDao
# this is added based on the document, PTSC's test env is not ready, no test on real env yet
class BaseMessageBroker:
def __init__(self, message):
self.message = message
self.message_metadata_dao = MessageBrokerMetadataDao()
self.dest_auth_dao = MessageBrokerDestAuthInfoDao()
# Calls from DRC will use a token obtained using the client credentials grant,
# used for machine to machine authentication/authorization.
def get_access_token(self):
"""Returns the access token for the API endpoint."""
auth_info = self.dest_auth_dao.get_auth_info(self.message.messageDest)
if not auth_info:
raise BadRequest(f'can not find auth info for dest: {self.message.messageDest}')
now = clock.CLOCK.now()
# the token will be expired in 300 secs, compare with the timestamp of 20 secs later
# to make sure we use a valid token
secs_later = now + timedelta(seconds=20)
if auth_info.accessToken and auth_info.expiredAt > secs_later:
return auth_info.accessToken
else:
return self._request_new_token(auth_info=auth_info)
@backoff.on_exception(backoff.constant, HTTPException, max_tries=3)
def _request_new_token(self, auth_info: MessageBrokerDestAuthInfo):
token_endpoint = auth_info.tokenEndpoint
payload = f'grant_type=client_credentials&client_id={auth_info.key}&client_secret={auth_info.secret}'
response = requests.post(token_endpoint, data=payload,
headers={"Content-type": "application/x-www-form-urlencoded"})
if response.status_code in (200, 201):
r_json = response.json()
auth_info.accessToken = r_json['access_token']
now = clock.CLOCK.now()
expired_at = now + timedelta(seconds=r_json['expires_in'])
auth_info.expiredAt = expired_at
self.dest_auth_dao.update(auth_info)
return r_json['access_token']
else:
logging.warning(
f'received {response.status_code} from message broker auth endpoint for {self.message.messageDest}'
)
raise BadGateway(f'can not get access token for dest: {self.message.messageDest}, '
f'response error: {str(response.status_code)}')
def _get_message_dest_url(self):
dest_url = self.message_metadata_dao.get_dest_url(self.message.eventType, self.message.messageDest)
if not dest_url:
raise BadRequest(f'no destination url found for dest: {self.message.messageDest} '
f'and event: {self.message.eventType}')
return dest_url
def make_request_body(self):
"""Returns the request body that need to be sent to the destination. Must be overridden by subclasses."""
raise NotImplementedError()
def send_request(self):
dest_url = self._get_message_dest_url()
token = self.get_access_token()
request_body = self.make_request_body()
response = self.send_request_with_retry_on_conn_error(dest_url, request_body, token)
if response.status_code == 200:
return response.status_code, response.json(), ''
else:
return response.status_code, response.text, response.text
@backoff.on_exception(backoff.constant, requests.exceptions.ConnectionError, max_tries=3)
def send_request_with_retry_on_conn_error(self, url, request_body, token):
# retry 3 time for the following error:
# urllib3.exceptions.ProtocolError:
# ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
# Traceback:
# response = requests.post(url, json=request_body, headers={"Authorization": "Bearer " + token})
# "/layers/google.python.pip/pip/lib/python3.7/site-packages/requests/api.py", line
# in post
# return request('post', url, data=data, json=json, **kwargs)
# "/layers/google.python.pip/pip/lib/python3.7/site-packages/requests/api.py", line
# in request
# return session.request(method=method, url=url, **kwargs)
# "/layers/google.python.pip/pip/lib/python3.7/site-packages/requests/sessions.py", line
# in request
# resp = self.send(prep, **send_kwargs)
# "/layers/google.python.pip/pip/lib/python3.7/site-packages/requests/sessions.py", line
# in send
# r = adapter.send(request, **kwargs)
# "/layers/google.python.pip/pip/lib/python3.7/site-packages/requests/adapters.py", line
# in send
# raise ConnectionError(err, request=request)
# requests.exceptions.ConnectionError:
# ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
# Token should be included in the HTTP Authorization header using the Bearer scheme.
return requests.post(url, json=request_body, headers={"Authorization": "Bearer " + token})
class PtscMessageBroker(BaseMessageBroker):
def __init__(self, message):
super(PtscMessageBroker, self).__init__(message)
def make_request_body(self):
request_body = {
'event': self.message.eventType,
'eventAuthoredTime': format_datetime(self.message.eventAuthoredTime),
'participantId': to_client_participant_id(self.message.participantId),
'messageBody': self.message.requestBody
}
return request_body
class MessageBrokerFactory:
@staticmethod
def create(message):
if message.messageDest == 'vibrent':
return PtscMessageBroker(message)
else:
raise BadRequest(f'no destination found: {message.messageDest}')
| bsd-3-clause | 4bccf2e041b78d363031ca3675216a1a | 45.992537 | 115 | 0.665714 | 3.980405 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/534d805d5dcf_alter_biobank_dv_table.py | 1 | 1944 | """alter biobank dv table
Revision ID: 534d805d5dcf
Revises: dc971fc16861
Create Date: 2019-03-18 13:23:40.194824
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "534d805d5dcf"
down_revision = "dc971fc16861"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("biobank_dv_order", sa.Column("version", sa.Integer(), nullable=False))
op.alter_column(
"biobank_dv_order",
"modified",
existing_type=mysql.DATETIME(fsp=6),
nullable=False,
existing_server_default=sa.text("CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)"),
)
op.create_unique_constraint(None, "biobank_dv_order", ["biobank_order_id"])
op.drop_column("biobank_dv_order", "biobank_reference")
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("biobank_dv_order", sa.Column("biobank_reference", mysql.VARCHAR(length=80), nullable=True))
op.drop_constraint(None, "biobank_dv_order", type_="unique")
op.alter_column(
"biobank_dv_order",
"modified",
existing_type=mysql.DATETIME(fsp=6),
nullable=True,
existing_server_default=sa.text("CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)"),
)
op.drop_column("biobank_dv_order", "version")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | a982e653bbd5460173fce75e7f61d7ce | 28.454545 | 110 | 0.659465 | 3.452931 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/0e92151ebd4a_amend_biobank_orders.py | 1 | 3772 | """amend biobank orders
Revision ID: 0e92151ebd4a
Revises: 075b9eee88b7
Create Date: 2018-08-15 11:43:57.043915
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import BiobankOrderStatus
# revision identifiers, used by Alembic.
revision = "0e92151ebd4a"
down_revision = "075b9eee88b7"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("biobank_order", sa.Column("amended_biobank_order_id", sa.String(length=80), nullable=True))
op.add_column("biobank_order", sa.Column("amended_reason", sa.UnicodeText(), nullable=True))
op.add_column("biobank_order", sa.Column("amended_site_id", sa.Integer(), nullable=True))
op.add_column("biobank_order", sa.Column("amended_time", model.utils.UTCDateTime(), nullable=True))
op.add_column("biobank_order", sa.Column("amended_username", sa.String(length=255), nullable=True))
op.add_column("biobank_order", sa.Column("cancelled_site_id", sa.Integer(), nullable=True))
op.add_column("biobank_order", sa.Column("cancelled_time", model.utils.UTCDateTime(), nullable=True))
op.add_column("biobank_order", sa.Column("cancelled_username", sa.String(length=255), nullable=True))
op.add_column("biobank_order", sa.Column("last_modified", model.utils.UTCDateTime(), nullable=True))
op.add_column("biobank_order", sa.Column("order_status", model.utils.Enum(BiobankOrderStatus), nullable=True))
op.add_column("biobank_order", sa.Column("restored_site_id", sa.Integer(), nullable=True))
op.add_column("biobank_order", sa.Column("restored_time", model.utils.UTCDateTime(), nullable=True))
op.add_column("biobank_order", sa.Column("restored_username", sa.String(length=255), nullable=True))
op.create_foreign_key(None, "biobank_order", "site", ["amended_site_id"], ["site_id"])
op.create_foreign_key(None, "biobank_order", "site", ["cancelled_site_id"], ["site_id"])
op.create_foreign_key(None, "biobank_order", "site", ["restored_site_id"], ["site_id"])
op.create_foreign_key(None, "biobank_order", "biobank_order", ["amended_biobank_order_id"], ["biobank_order_id"])
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "biobank_order", type_="foreignkey")
op.drop_constraint(None, "biobank_order", type_="foreignkey")
op.drop_constraint(None, "biobank_order", type_="foreignkey")
op.drop_constraint(None, "biobank_order", type_="foreignkey")
op.drop_column("biobank_order", "restored_username")
op.drop_column("biobank_order", "restored_time")
op.drop_column("biobank_order", "restored_site_id")
op.drop_column("biobank_order", "order_status")
op.drop_column("biobank_order", "last_modified")
op.drop_column("biobank_order", "cancelled_username")
op.drop_column("biobank_order", "cancelled_time")
op.drop_column("biobank_order", "cancelled_site_id")
op.drop_column("biobank_order", "amended_username")
op.drop_column("biobank_order", "amended_time")
op.drop_column("biobank_order", "amended_site_id")
op.drop_column("biobank_order", "amended_reason")
op.drop_column("biobank_order", "amended_biobank_order_id")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 8254a165576693cf083a923b94ec3f57 | 45 | 117 | 0.69141 | 3.159129 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/auditevent.py | 1 | 14315 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/AuditEvent) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class AuditEvent(domainresource.DomainResource):
""" Event record kept for security purposes.
A record of an event made for purposes of maintaining a security log.
Typical uses include detection of intrusion attempts and monitoring for
inappropriate usage.
"""
resource_name = "AuditEvent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.event = None
""" What was done.
Type `AuditEventEvent` (represented as `dict` in JSON). """
self.object = None
""" Specific instances of data or objects that have been accessed.
List of `AuditEventObject` items (represented as `dict` in JSON). """
self.participant = None
""" A person, a hardware device or software process.
List of `AuditEventParticipant` items (represented as `dict` in JSON). """
self.source = None
""" Application systems and processes.
Type `AuditEventSource` (represented as `dict` in JSON). """
super(AuditEvent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AuditEvent, self).elementProperties()
js.extend([
("event", "event", AuditEventEvent, False, None, True),
("object", "object", AuditEventObject, True, None, False),
("participant", "participant", AuditEventParticipant, True, None, True),
("source", "source", AuditEventSource, False, None, True),
])
return js
from . import backboneelement
class AuditEventEvent(backboneelement.BackboneElement):
""" What was done.
Identifies the name, action type, time, and disposition of the audited
event.
"""
resource_name = "AuditEventEvent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.action = None
""" Type of action performed during the event.
Type `str`. """
self.dateTime = None
""" Time when the event occurred on source.
Type `FHIRDate` (represented as `str` in JSON). """
self.outcome = None
""" Whether the event succeeded or failed.
Type `str`. """
self.outcomeDesc = None
""" Description of the event outcome.
Type `str`. """
self.purposeOfEvent = None
""" The purposeOfUse of the event.
List of `Coding` items (represented as `dict` in JSON). """
self.subtype = None
""" More specific type/id for the event.
List of `Coding` items (represented as `dict` in JSON). """
self.type = None
""" Type/identifier of event.
Type `Coding` (represented as `dict` in JSON). """
super(AuditEventEvent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AuditEventEvent, self).elementProperties()
js.extend([
("action", "action", str, False, None, False),
("dateTime", "dateTime", fhirdate.FHIRDate, False, None, True),
("outcome", "outcome", str, False, None, False),
("outcomeDesc", "outcomeDesc", str, False, None, False),
("purposeOfEvent", "purposeOfEvent", coding.Coding, True, None, False),
("subtype", "subtype", coding.Coding, True, None, False),
("type", "type", coding.Coding, False, None, True),
])
return js
class AuditEventObject(backboneelement.BackboneElement):
""" Specific instances of data or objects that have been accessed.
"""
resource_name = "AuditEventObject"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Descriptive text.
Type `str`. """
self.detail = None
""" Additional Information about the Object.
List of `AuditEventObjectDetail` items (represented as `dict` in JSON). """
self.identifier = None
""" Specific instance of object (e.g. versioned).
Type `Identifier` (represented as `dict` in JSON). """
self.lifecycle = None
""" Life-cycle stage for the object.
Type `Coding` (represented as `dict` in JSON). """
self.name = None
""" Instance-specific descriptor for Object.
Type `str`. """
self.query = None
""" Actual query for object.
Type `str`. """
self.reference = None
""" Specific instance of resource (e.g. versioned).
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
self.role = None
""" What role the Object played.
Type `Coding` (represented as `dict` in JSON). """
self.securityLabel = None
""" Security labels applied to the object.
List of `Coding` items (represented as `dict` in JSON). """
self.type = None
""" Type of object involved.
Type `Coding` (represented as `dict` in JSON). """
super(AuditEventObject, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AuditEventObject, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("detail", "detail", AuditEventObjectDetail, True, None, False),
("identifier", "identifier", identifier.Identifier, False, None, False),
("lifecycle", "lifecycle", coding.Coding, False, None, False),
("name", "name", str, False, None, False),
("query", "query", str, False, None, False),
("reference", "reference", fhirreference.FHIRReference, False, None, False),
("role", "role", coding.Coding, False, None, False),
("securityLabel", "securityLabel", coding.Coding, True, None, False),
("type", "type", coding.Coding, False, None, False),
])
return js
class AuditEventObjectDetail(backboneelement.BackboneElement):
""" Additional Information about the Object.
"""
resource_name = "AuditEventObjectDetail"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.type = None
""" Name of the property.
Type `str`. """
self.value = None
""" Property value.
Type `str`. """
super(AuditEventObjectDetail, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AuditEventObjectDetail, self).elementProperties()
js.extend([
("type", "type", str, False, None, True),
("value", "value", str, False, None, True),
])
return js
class AuditEventParticipant(backboneelement.BackboneElement):
""" A person, a hardware device or software process.
"""
resource_name = "AuditEventParticipant"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.altId = None
""" Alternative User id e.g. authentication.
Type `str`. """
self.location = None
""" Where.
Type `FHIRReference` referencing `Location` (represented as `dict` in JSON). """
self.media = None
""" Type of media.
Type `Coding` (represented as `dict` in JSON). """
self.name = None
""" Human-meaningful name for the user.
Type `str`. """
self.network = None
""" Logical network location for application activity.
Type `AuditEventParticipantNetwork` (represented as `dict` in JSON). """
self.policy = None
""" Policy that authorized event.
List of `str` items. """
self.purposeOfUse = None
""" Reason given for this user.
List of `Coding` items (represented as `dict` in JSON). """
self.reference = None
""" Direct reference to resource.
Type `FHIRReference` referencing `Practitioner, Organization, Device, Patient, RelatedPerson` (represented as `dict` in JSON). """
self.requestor = None
""" Whether user is initiator.
Type `bool`. """
self.role = None
""" User roles (e.g. local RBAC codes).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.userId = None
""" Unique identifier for the user.
Type `Identifier` (represented as `dict` in JSON). """
super(AuditEventParticipant, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AuditEventParticipant, self).elementProperties()
js.extend([
("altId", "altId", str, False, None, False),
("location", "location", fhirreference.FHIRReference, False, None, False),
("media", "media", coding.Coding, False, None, False),
("name", "name", str, False, None, False),
("network", "network", AuditEventParticipantNetwork, False, None, False),
("policy", "policy", str, True, None, False),
("purposeOfUse", "purposeOfUse", coding.Coding, True, None, False),
("reference", "reference", fhirreference.FHIRReference, False, None, False),
("requestor", "requestor", bool, False, None, True),
("role", "role", codeableconcept.CodeableConcept, True, None, False),
("userId", "userId", identifier.Identifier, False, None, False),
])
return js
class AuditEventParticipantNetwork(backboneelement.BackboneElement):
""" Logical network location for application activity.
Logical network location for application activity, if the activity has a
network location.
"""
resource_name = "AuditEventParticipantNetwork"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.address = None
""" Identifier for the network access point of the user device.
Type `str`. """
self.type = None
""" The type of network access point.
Type `str`. """
super(AuditEventParticipantNetwork, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AuditEventParticipantNetwork, self).elementProperties()
js.extend([
("address", "address", str, False, None, False),
("type", "type", str, False, None, False),
])
return js
class AuditEventSource(backboneelement.BackboneElement):
""" Application systems and processes.
"""
resource_name = "AuditEventSource"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.identifier = None
""" The identity of source detecting the event.
Type `Identifier` (represented as `dict` in JSON). """
self.site = None
""" Logical source location within the enterprise.
Type `str`. """
self.type = None
""" The type of source where event originated.
List of `Coding` items (represented as `dict` in JSON). """
super(AuditEventSource, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AuditEventSource, self).elementProperties()
js.extend([
("identifier", "identifier", identifier.Identifier, False, None, True),
("site", "site", str, False, None, False),
("type", "type", coding.Coding, True, None, False),
])
return js
from . import codeableconcept
from . import coding
from . import fhirdate
from . import fhirreference
from . import identifier
| bsd-3-clause | fb34075a185849017ed2e5e273a8d392 | 36.473822 | 138 | 0.594202 | 4.487461 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/offline/retention_eligible_import.py | 1 | 15568 | import logging
import datetime
from dateutil.parser import parse
from rdr_service.clock import CLOCK
from rdr_service.app_util import nonprod
from rdr_service.storage import GoogleCloudStorageCSVReader
from rdr_service.dao.retention_eligible_metrics_dao import RetentionEligibleMetricsDao
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.model.retention_eligible_metrics import RetentionEligibleMetrics
from rdr_service.participant_enums import RetentionType, RetentionStatus
_BATCH_SIZE = 1000
# Note: depreciated to switch to using GoogleCloudStorageCSVReader version.
# def import_retention_eligible_metrics_file(task_data):
# """
# Import PTSC retention eligible metric file from bucket.
# :param task_data: Cloud function event dict.
# """
# csv_file_cloud_path = task_data["file_path"]
# upload_date = task_data["upload_date"]
# dao = RetentionEligibleMetricsDao()
#
# # Copy bucket file to local temp file.
# logging.info(f"Opening gs://{csv_file_cloud_path}.")
# tmp_file = tempfile.NamedTemporaryFile(prefix='ptsc_')
# with GoogleCloudStorageProvider().open(csv_file_cloud_path, 'rt') as csv_file:
# while True:
# chunk = csv_file.read(_CHUNK_SIZE)
# tmp_file.write(chunk)
# if not chunk or len(chunk) < _CHUNK_SIZE:
# break
# tmp_file.seek(0)
#
# header = tmp_file.readline().decode('utf-8')
# missing_cols = set(RetentionEligibleMetricCsvColumns.ALL) - set(header.split(','))
# if missing_cols:
# raise DataError(f"CSV is missing columns {missing_cols}, had columns {header}.")
#
# strio = io.StringIO()
# strio.write(header + '\n')
# batch_count = upsert_count = 0
# records = list()
#
# with dao.session() as session:
# while True:
# # Create a mini-csv file with a _BATCH_SIZE number of records in the StringIO obj.
# line = tmp_file.readline().decode('utf-8')
# if line:
# strio.write(line + '\n')
# batch_count += 1
#
# if batch_count == _BATCH_SIZE or not line:
# strio.seek(0)
# csv_reader = csv.DictReader(strio, delimiter=",")
# for row in csv_reader:
# if not row[RetentionEligibleMetricCsvColumns.PARTICIPANT_ID]:
# continue
# record = _create_retention_eligible_metrics_obj_from_row(row, upload_date)
# records.append(record)
# upsert_count += dao.upsert_all_with_session(session, records)
# if not line:
# break
# # reset for next batch, re-use objects.
# batch_count = 0
# records.clear()
# strio.seek(0)
# strio.truncate(0)
# strio.write(header + '\n')
#
# tmp_file.close()
#
# logging.info(f"Updating participant summary retention eligible flags for {upsert_count} participants...")
# ParticipantSummaryDao().bulk_update_retention_eligible_flags(upload_date)
# logging.info(f"Import and update completed for gs://{csv_file_cloud_path}")
def import_retention_eligible_metrics_file(task_data):
"""
Import PTSC retention eligible metric file from bucket.
:param task_data: Cloud function event dict.
"""
csv_file_cloud_path = task_data["file_path"]
upload_date = task_data["upload_date"]
dao = RetentionEligibleMetricsDao()
# Copy bucket file to local temp file.
logging.info(f"Reading gs://{csv_file_cloud_path}.")
csv_reader = GoogleCloudStorageCSVReader(csv_file_cloud_path)
batch_count = upsert_count = 0
records = list()
with dao.session() as session:
for row in csv_reader:
if not row[RetentionEligibleMetricCsvColumns.PARTICIPANT_ID]:
continue
record = _create_retention_eligible_metrics_obj_from_row(row, upload_date)
records.append(record)
batch_count += 1
if batch_count == _BATCH_SIZE:
upsert_count += dao.upsert_all_with_session(session, records)
records.clear()
batch_count = 0
if records:
upsert_count += dao.upsert_all_with_session(session, records)
logging.info(f"Updating participant summary retention eligible flags for {upsert_count} participants...")
ParticipantSummaryDao().bulk_update_retention_eligible_flags(upload_date)
logging.info(f"Import and update completed for gs://{csv_file_cloud_path}")
@nonprod
def calculate_retention_eligible_metrics():
# Calculate retention eligible metrics
# This method is for lower env only, Prod env will import from file use above method
retention_window = datetime.timedelta(days=547)
eighteen_month_ago = CLOCK.now() - retention_window
eighteen_month_ago_str = eighteen_month_ago.strftime('%Y-%m-%d %H:%M:%S')
update_sql = """
UPDATE participant_summary
SET retention_eligible_status =
CASE WHEN
consent_for_study_enrollment = 1
AND (consent_for_electronic_health_records = 1 OR consent_for_dv_electronic_health_records_sharing = 1)
AND questionnaire_on_the_basics = 1
AND questionnaire_on_overall_health = 1
AND questionnaire_on_lifestyle = 1
AND withdrawal_status = 1
AND suspension_status = 1
AND samples_to_isolate_dna = 1
THEN 2 ELSE 1
END,
retention_eligible_time =
CASE WHEN
consent_for_study_enrollment = 1
AND (consent_for_electronic_health_records = 1 OR consent_for_dv_electronic_health_records_sharing = 1)
AND questionnaire_on_the_basics = 1
AND questionnaire_on_overall_health = 1
AND questionnaire_on_lifestyle = 1
AND withdrawal_status = 1
AND suspension_status = 1
AND samples_to_isolate_dna = 1
AND
COALESCE(sample_status_1ed10_time, sample_status_2ed10_time, sample_status_1ed04_time,
sample_status_1sal_time, sample_status_1sal2_time, 0) != 0
THEN GREATEST(
GREATEST (consent_for_study_enrollment_authored,
questionnaire_on_the_basics_authored,
questionnaire_on_overall_health_authored,
questionnaire_on_lifestyle_authored,
COALESCE(consent_for_electronic_health_records_authored, consent_for_study_enrollment_authored),
COALESCE(consent_for_dv_electronic_health_records_sharing_authored, consent_for_study_enrollment_authored)
),
LEAST(COALESCE(sample_status_1ed10_time, '9999-01-01'),
COALESCE(sample_status_2ed10_time, '9999-01-01'),
COALESCE(sample_status_1ed04_time, '9999-01-01'),
COALESCE(sample_status_1sal_time, '9999-01-01'),
COALESCE(sample_status_1sal2_time, '9999-01-01')
)
)
ELSE NULL
END,
retention_type =
CASE WHEN
consent_for_study_enrollment = 1
AND (consent_for_electronic_health_records = 1 OR consent_for_dv_electronic_health_records_sharing = 1)
AND questionnaire_on_the_basics = 1
AND questionnaire_on_overall_health = 1
AND questionnaire_on_lifestyle = 1
AND withdrawal_status = 1
AND suspension_status = 1
AND samples_to_isolate_dna = 1
AND (
(questionnaire_on_healthcare_access_authored is not null and
questionnaire_on_healthcare_access_authored > '{eighteen_month_ago}') or
(questionnaire_on_family_health_authored is not null and
questionnaire_on_family_health_authored > '{eighteen_month_ago}') or
(questionnaire_on_medical_history_authored is not null and
questionnaire_on_medical_history_authored > '{eighteen_month_ago}') or
(questionnaire_on_cope_nov_authored is not null
and questionnaire_on_cope_nov_authored > '{eighteen_month_ago}') or
(questionnaire_on_cope_july_authored is not null
and questionnaire_on_cope_july_authored > '{eighteen_month_ago}') or
(questionnaire_on_cope_june_authored is not null
and questionnaire_on_cope_june_authored > '{eighteen_month_ago}') or
(questionnaire_on_cope_dec_authored is not null
and questionnaire_on_cope_dec_authored > '{eighteen_month_ago}') or
(questionnaire_on_cope_may_authored is not null
and questionnaire_on_cope_may_authored > '{eighteen_month_ago}') or
(questionnaire_on_cope_feb_authored is not null
and questionnaire_on_cope_feb_authored > '{eighteen_month_ago}') or
(consent_cohort = 1 and consent_for_study_enrollment_authored !=
participant_summary.consent_for_study_enrollment_first_yes_authored and
consent_for_study_enrollment_authored > '{eighteen_month_ago}') or
(consent_cohort = 1 and consent_for_genomics_ror_authored is not null and
consent_for_genomics_ror_authored > '{eighteen_month_ago}') or
(consent_cohort = 2 and consent_for_genomics_ror_authored is not null and
consent_for_genomics_ror_authored > '{eighteen_month_ago}')
)
AND ehr_update_time is not null and ehr_update_time>'{eighteen_month_ago}'
THEN 3
WHEN
consent_for_study_enrollment = 1
AND (consent_for_electronic_health_records = 1 OR consent_for_dv_electronic_health_records_sharing = 1)
AND questionnaire_on_the_basics = 1
AND questionnaire_on_overall_health = 1
AND questionnaire_on_lifestyle = 1
AND withdrawal_status = 1
AND suspension_status = 1
AND samples_to_isolate_dna = 1
AND (
(questionnaire_on_healthcare_access_authored is not null and
questionnaire_on_healthcare_access_authored > '{eighteen_month_ago}') or
(questionnaire_on_family_health_authored is not null and
questionnaire_on_family_health_authored > '{eighteen_month_ago}') or
(questionnaire_on_medical_history_authored is not null and
questionnaire_on_medical_history_authored > '{eighteen_month_ago}') or
(questionnaire_on_cope_nov_authored is not null
and questionnaire_on_cope_nov_authored > '{eighteen_month_ago}') or
(questionnaire_on_cope_july_authored is not null
and questionnaire_on_cope_july_authored > '{eighteen_month_ago}') or
(questionnaire_on_cope_june_authored is not null
and questionnaire_on_cope_june_authored > '{eighteen_month_ago}') or
(questionnaire_on_cope_dec_authored is not null
and questionnaire_on_cope_dec_authored > '{eighteen_month_ago}') or
(questionnaire_on_cope_may_authored is not null
and questionnaire_on_cope_may_authored > '{eighteen_month_ago}') or
(questionnaire_on_cope_feb_authored is not null
and questionnaire_on_cope_feb_authored > '{eighteen_month_ago}') or
(consent_cohort = 1 and consent_for_study_enrollment_authored !=
participant_summary.consent_for_study_enrollment_first_yes_authored and
consent_for_study_enrollment_authored > '{eighteen_month_ago}') or
(consent_cohort = 1 and consent_for_genomics_ror_authored is not null and
consent_for_genomics_ror_authored > '{eighteen_month_ago}') or
(consent_cohort = 2 and consent_for_genomics_ror_authored is not null and
consent_for_genomics_ror_authored > '{eighteen_month_ago}')
)
THEN 1
WHEN
consent_for_study_enrollment = 1
AND (consent_for_electronic_health_records = 1 OR consent_for_dv_electronic_health_records_sharing = 1)
AND questionnaire_on_the_basics = 1
AND questionnaire_on_overall_health = 1
AND questionnaire_on_lifestyle = 1
AND withdrawal_status = 1
AND suspension_status = 1
AND samples_to_isolate_dna = 1
THEN 2
ELSE 0
END
WHERE 1=1
""".format(eighteen_month_ago=eighteen_month_ago_str)
dao = ParticipantSummaryDao()
with dao.session() as session:
session.execute(update_sql)
def _parse_field(parser_func, field_str):
return parser_func(field_str) if field_str not in ('', 'NULL') else None
def _create_retention_eligible_metrics_obj_from_row(row, upload_date):
retention_eligible = _parse_field(int, row[RetentionEligibleMetricCsvColumns.RETENTION_ELIGIBLE])
eligible_time = _parse_field(parse, row[RetentionEligibleMetricCsvColumns.RETENTION_ELIGIBLE_TIME])
last_active_eligible_activity_time = _parse_field(parse, row[RetentionEligibleMetricCsvColumns
.LAST_ACTIVE_RETENTION_ACTIVITY_TIME])
actively_retained = _parse_field(int, row[RetentionEligibleMetricCsvColumns.ACTIVELY_RETAINED])
passively_retained = _parse_field(int, row[RetentionEligibleMetricCsvColumns.PASSIVELY_RETAINED])
retention_type = RetentionType.UNSET
if actively_retained and passively_retained:
retention_type = RetentionType.ACTIVE_AND_PASSIVE
elif actively_retained:
retention_type = RetentionType.ACTIVE
elif passively_retained:
retention_type = RetentionType.PASSIVE
return RetentionEligibleMetrics(
participantId=row[RetentionEligibleMetricCsvColumns.PARTICIPANT_ID],
retentionEligible=retention_eligible,
retentionEligibleTime=eligible_time,
lastActiveRetentionActivityTime=last_active_eligible_activity_time,
activelyRetained=actively_retained,
passivelyRetained=passively_retained,
fileUploadDate=upload_date,
retentionEligibleStatus=RetentionStatus.ELIGIBLE if retention_eligible else RetentionStatus.NOT_ELIGIBLE,
retentionType=retention_type
)
class RetentionEligibleMetricCsvColumns(object):
PARTICIPANT_ID = "participant_id"
RETENTION_ELIGIBLE = "retention_eligible"
RETENTION_ELIGIBLE_TIME = "retention_eligible_date"
LAST_ACTIVE_RETENTION_ACTIVITY_TIME = "last_active_retention_activity_date"
ACTIVELY_RETAINED = "actively_retained"
PASSIVELY_RETAINED = "passively_retained"
ALL = (PARTICIPANT_ID, RETENTION_ELIGIBLE, RETENTION_ELIGIBLE_TIME, LAST_ACTIVE_RETENTION_ACTIVITY_TIME,
ACTIVELY_RETAINED, PASSIVELY_RETAINED)
class DataError(RuntimeError):
def __init__(self, msg, external=False):
super(DataError, self).__init__(msg)
self.external = external
| bsd-3-clause | 2f4fca9e3d2348253c42471391aceb6b | 49.219355 | 123 | 0.619604 | 3.622993 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/client.py | 1 | 8525 | # -*- coding: utf-8 -*-
import logging
from .server import FHIRNotFoundException, FHIRServer, FHIRUnauthorizedException
__version__ = '3.0.0'
__author__ = 'SMART Platforms Team'
__license__ = 'APACHE2'
__copyright__ = "Copyright 2017 Boston Children's Hospital"
scope_default = 'user/*.* patient/*.read openid profile'
scope_haslaunch = 'launch'
scope_patientlaunch = 'launch/patient'
logger = logging.getLogger(__name__)
class FHIRClient(object):
""" Instances of this class handle authorizing and talking to SMART on FHIR
servers.
The settings dictionary supports:
- `app_id`*: Your app/client-id, e.g. 'my_web_app'
- `app_secret`*: Your app/client-secret
- `api_base`*: The FHIR service to connect to, e.g. 'https://fhir-api-dstu2.smarthealthit.org'
- `redirect_uri`: The callback/redirect URL for your app, e.g. 'http://localhost:8000/fhir-app/' when testing locally
- `patient_id`: The patient id against which to operate, if already known
- `scope`: Space-separated list of scopes to request, if other than default
- `launch_token`: The launch token
"""
def __init__(self, settings=None, state=None, save_func=lambda x:x):
self.app_id = None
self.app_secret = None
""" The app-id for the app this client is used in. """
self.server = None
self.scope = scope_default
self.redirect = None
""" The redirect-uri that will be used to redirect after authorization. """
self.launch_token = None
""" The token/id provided at launch, if any. """
self.launch_context = None
""" Context parameters supplied by the server during launch. """
self.wants_patient = True
""" If true and launched without patient, will add the correct scope
to indicate that the server should prompt for a patient after login. """
self.patient_id = None
self._patient = None
if save_func is None:
raise Exception("Must supply a save_func when initializing the SMART client")
self._save_func = save_func
# init from state
if state is not None:
self.from_state(state)
# init from settings dict
elif settings is not None:
if not 'app_id' in settings:
raise Exception("Must provide 'app_id' in settings dictionary")
if not 'api_base' in settings:
raise Exception("Must provide 'api_base' in settings dictionary")
self.app_id = settings['app_id']
self.app_secret = settings.get('app_secret')
self.redirect = settings.get('redirect_uri')
self.patient_id = settings.get('patient_id')
self.scope = settings.get('scope', self.scope)
self.launch_token = settings.get('launch_token')
self.server = FHIRServer(self, base_uri=settings['api_base'])
else:
raise Exception("Must either supply settings or a state upon client initialization")
# MARK: Authorization
@property
def desired_scope(self):
""" Ensures `self.scope` is completed with launch scopes, according to
current client settings.
"""
scope = self.scope
if self.launch_token is not None:
scope = ' '.join([scope_haslaunch, scope])
elif self.patient_id is None and self.wants_patient:
scope = ' '.join([scope_patientlaunch, scope])
return scope
@property
def ready(self):
""" Returns True if the client is ready to make API calls (e.g. there
is an access token or this is an open server).
:returns: True if the server can make authenticated calls
"""
return self.server.ready if self.server is not None else False
def prepare(self):
""" Returns True if the client is ready to make API calls (e.g. there
is an access token or this is an open server). In contrast to the
`ready` property, this method will fetch the server's capability
statement if it hasn't yet been fetched.
:returns: True if the server can make authenticated calls
"""
if self.server:
if self.server.ready:
return True
return self.server.prepare()
return False
@property
def authorize_url(self):
""" The URL to use to receive an authorization token.
"""
return self.server.authorize_uri if self.server is not None else None
def handle_callback(self, url):
""" You can call this to have the client automatically handle the
auth callback after the user has logged in.
:param str url: The complete callback URL
"""
ctx = self.server.handle_callback(url) if self.server is not None else None
self._handle_launch_context(ctx)
def reauthorize(self):
""" Try to reauthorize with the server.
:returns: A bool indicating reauthorization success
"""
ctx = self.server.reauthorize() if self.server is not None else None
self._handle_launch_context(ctx)
return self.launch_context is not None
def _handle_launch_context(self, ctx):
logger.debug("SMART: Handling launch context: {0}".format(ctx))
if 'patient' in ctx:
#print('Patient id was {0}, row context is {1}'.format(self.patient_id, ctx))
self.patient_id = ctx['patient'] # TODO: TEST THIS!
if 'id_token' in ctx:
logger.warning("SMART: Received an id_token, ignoring")
self.launch_context = ctx
self.save_state()
# MARK: Current Patient
@property
def patient(self):
if self._patient is None and self.patient_id is not None and self.ready:
from . import models.patient
try:
logger.debug("SMART: Attempting to read Patient {0}".format(self.patient_id))
self._patient = models.patient.Patient.read(self.patient_id, self.server)
except FHIRUnauthorizedException as e:
if self.reauthorize():
logger.debug("SMART: Attempting to read Patient {0} after reauthorizing"
.format(self.patient_id))
self._patient = models.patient.Patient.read(self.patient_id, self.server)
except FHIRNotFoundException as e:
logger.warning("SMART: Patient with id {0} not found".format(self.patient_id))
self.patient_id = None
self.save_state()
return self._patient
def human_name(self, human_name_instance):
""" Formats a `HumanName` instance into a string.
"""
if human_name_instance is None:
return 'Unknown'
parts = []
for n in [human_name_instance.prefix, human_name_instance.given, human_name_instance.family]:
if n is not None:
parts.extend(n)
if len(human_name_instance.suffix) > 0:
if len(parts) > 0:
parts[len(parts)-1] = parts[len(parts)-1]+','
parts.extend(human_name_instance.suffix)
return ' '.join(parts) if len(parts) > 0 else 'Unnamed'
# MARK: State
def reset_patient(self):
self.launch_token = None
self.launch_context = None
self.patient_id = None
self._patient = None
self.save_state()
@property
def state(self):
return {
'app_id': self.app_id,
'app_secret': self.app_secret,
'scope': self.scope,
'redirect': self.redirect,
'patient_id': self.patient_id,
'server': self.server.state,
'launch_token': self.launch_token,
'launch_context': self.launch_context,
}
def from_state(self, state):
assert state
self.app_id = state.get('app_id') or self.app_id
self.app_secret = state.get('app_secret') or self.app_secret
self.scope = state.get('scope') or self.scope
self.redirect = state.get('redirect') or self.redirect
self.patient_id = state.get('patient_id') or self.patient_id
self.launch_token = state.get('launch_token') or self.launch_token
self.launch_context = state.get('launch_context') or self.launch_context
self.server = FHIRServer(self, state=state.get('server'))
def save_state (self):
self._save_func(self.state)
| bsd-3-clause | ce44beec2e9647d2ed5df0793d2a909f | 36.227074 | 125 | 0.606217 | 4.071156 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/supplyrequest.py | 1 | 8678 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/SupplyRequest) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class SupplyRequest(domainresource.DomainResource):
""" Request for a medication, substance or device.
A record of a request for a medication, substance or device used in the
healthcare setting.
"""
resource_type = "SupplyRequest"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.authoredOn = None
""" When the request was made.
Type `FHIRDate` (represented as `str` in JSON). """
self.category = None
""" The kind of supply (central, non-stock, etc.).
Type `CodeableConcept` (represented as `dict` in JSON). """
self.deliverFrom = None
""" The origin of the supply.
Type `FHIRReference` referencing `Organization, Location` (represented as `dict` in JSON). """
self.deliverTo = None
""" The destination of the supply.
Type `FHIRReference` referencing `Organization, Location, Patient` (represented as `dict` in JSON). """
self.identifier = None
""" Unique identifier.
Type `Identifier` (represented as `dict` in JSON). """
self.occurrenceDateTime = None
""" When the request should be fulfilled.
Type `FHIRDate` (represented as `str` in JSON). """
self.occurrencePeriod = None
""" When the request should be fulfilled.
Type `Period` (represented as `dict` in JSON). """
self.occurrenceTiming = None
""" When the request should be fulfilled.
Type `Timing` (represented as `dict` in JSON). """
self.orderedItem = None
""" The item being requested.
Type `SupplyRequestOrderedItem` (represented as `dict` in JSON). """
self.priority = None
""" routine | urgent | asap | stat.
Type `str`. """
self.reasonCodeableConcept = None
""" Why the supply item was requested.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.reasonReference = None
""" Why the supply item was requested.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
self.requester = None
""" Who/what is requesting service.
Type `SupplyRequestRequester` (represented as `dict` in JSON). """
self.status = None
""" draft | active | suspended +.
Type `str`. """
self.supplier = None
""" Who is intended to fulfill the request.
List of `FHIRReference` items referencing `Organization` (represented as `dict` in JSON). """
super(SupplyRequest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SupplyRequest, self).elementProperties()
js.extend([
("authoredOn", "authoredOn", fhirdate.FHIRDate, False, None, False),
("category", "category", codeableconcept.CodeableConcept, False, None, False),
("deliverFrom", "deliverFrom", fhirreference.FHIRReference, False, None, False),
("deliverTo", "deliverTo", fhirreference.FHIRReference, False, None, False),
("identifier", "identifier", identifier.Identifier, False, None, False),
("occurrenceDateTime", "occurrenceDateTime", fhirdate.FHIRDate, False, "occurrence", False),
("occurrencePeriod", "occurrencePeriod", period.Period, False, "occurrence", False),
("occurrenceTiming", "occurrenceTiming", timing.Timing, False, "occurrence", False),
("orderedItem", "orderedItem", SupplyRequestOrderedItem, False, None, False),
("priority", "priority", str, False, None, False),
("reasonCodeableConcept", "reasonCodeableConcept", codeableconcept.CodeableConcept, False, "reason", False),
("reasonReference", "reasonReference", fhirreference.FHIRReference, False, "reason", False),
("requester", "requester", SupplyRequestRequester, False, None, False),
("status", "status", str, False, None, False),
("supplier", "supplier", fhirreference.FHIRReference, True, None, False),
])
return js
from . import backboneelement
class SupplyRequestOrderedItem(backboneelement.BackboneElement):
""" The item being requested.
"""
resource_type = "SupplyRequestOrderedItem"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.itemCodeableConcept = None
""" Medication, Substance, or Device requested to be supplied.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.itemReference = None
""" Medication, Substance, or Device requested to be supplied.
Type `FHIRReference` referencing `Medication, Substance, Device` (represented as `dict` in JSON). """
self.quantity = None
""" The requested amount of the item indicated.
Type `Quantity` (represented as `dict` in JSON). """
super(SupplyRequestOrderedItem, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SupplyRequestOrderedItem, self).elementProperties()
js.extend([
("itemCodeableConcept", "itemCodeableConcept", codeableconcept.CodeableConcept, False, "item", False),
("itemReference", "itemReference", fhirreference.FHIRReference, False, "item", False),
("quantity", "quantity", quantity.Quantity, False, None, True),
])
return js
class SupplyRequestRequester(backboneelement.BackboneElement):
""" Who/what is requesting service.
The individual who initiated the request and has responsibility for its
activation.
"""
resource_type = "SupplyRequestRequester"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.agent = None
""" Individual making the request.
Type `FHIRReference` referencing `Practitioner, Organization, Patient, RelatedPerson, Device` (represented as `dict` in JSON). """
self.onBehalfOf = None
""" Organization agent is acting for.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
super(SupplyRequestRequester, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SupplyRequestRequester, self).elementProperties()
js.extend([
("agent", "agent", fhirreference.FHIRReference, False, None, True),
("onBehalfOf", "onBehalfOf", fhirreference.FHIRReference, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
try:
from . import timing
except ImportError:
timing = sys.modules[__package__ + '.timing']
| bsd-3-clause | 466071558fbed1109340af7b2018fdda | 39.362791 | 138 | 0.630445 | 4.341171 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/484c5d15ac06_bigquery_sync.py | 1 | 2419 | """bigquery sync
Revision ID: 484c5d15ac06
Revises: b662c5bb00cc
Create Date: 2019-05-31 16:22:55.821536
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "484c5d15ac06"
down_revision = "b662c5bb00cc"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"bigquery_sync",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("created", mysql.DATETIME(fsp=6), nullable=True),
sa.Column("modified", mysql.DATETIME(fsp=6), nullable=True),
sa.Column("dataset", sa.String(length=80), nullable=False),
sa.Column("table", sa.String(length=80), nullable=False),
sa.Column("participant_id", sa.Integer(), nullable=False),
sa.Column("resource", mysql.JSON(), nullable=False),
sa.ForeignKeyConstraint(["participant_id"], ["participant.participant_id"]),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_bigquery_sync_created"), "bigquery_sync", ["created"], unique=False)
op.create_index(op.f("ix_bigquery_sync_modified"), "bigquery_sync", ["modified"], unique=False)
op.create_index("ix_participant_ds_table", "bigquery_sync", ["participant_id", "dataset", "table"], unique=False)
op.execute("ALTER TABLE patient_status CHANGE COLUMN `created` `created` DATETIME(6) NULL;")
op.execute("ALTER TABLE patient_status CHANGE COLUMN `modified` `modified` DATETIME(6) NULL")
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_ds_table", table_name="bigquery_sync")
op.drop_index(op.f("ix_bigquery_sync_modified"), table_name="bigquery_sync")
op.drop_index(op.f("ix_bigquery_sync_created"), table_name="bigquery_sync")
op.drop_table("bigquery_sync")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | a6ded14920e1fc78382832e18c8f6e80 | 34.573529 | 117 | 0.668458 | 3.521106 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/person.py | 1 | 5297 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Person) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class Person(domainresource.DomainResource):
""" A generic person record.
Demographics and administrative information about a person independent of a
specific health-related context.
"""
resource_type = "Person"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.active = None
""" This person's record is in active use.
Type `bool`. """
self.address = None
""" One or more addresses for the person.
List of `Address` items (represented as `dict` in JSON). """
self.birthDate = None
""" The date on which the person was born.
Type `FHIRDate` (represented as `str` in JSON). """
self.gender = None
""" male | female | other | unknown.
Type `str`. """
self.identifier = None
""" A human identifier for this person.
List of `Identifier` items (represented as `dict` in JSON). """
self.link = None
""" Link to a resource that concerns the same actual person.
List of `PersonLink` items (represented as `dict` in JSON). """
self.managingOrganization = None
""" The organization that is the custodian of the person record.
Type `FHIRReference` (represented as `dict` in JSON). """
self.name = None
""" A name associated with the person.
List of `HumanName` items (represented as `dict` in JSON). """
self.photo = None
""" Image of the person.
Type `Attachment` (represented as `dict` in JSON). """
self.telecom = None
""" A contact detail for the person.
List of `ContactPoint` items (represented as `dict` in JSON). """
super(Person, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Person, self).elementProperties()
js.extend([
("active", "active", bool, False, None, False),
("address", "address", address.Address, True, None, False),
("birthDate", "birthDate", fhirdate.FHIRDate, False, None, False),
("gender", "gender", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("link", "link", PersonLink, True, None, False),
("managingOrganization", "managingOrganization", fhirreference.FHIRReference, False, None, False),
("name", "name", humanname.HumanName, True, None, False),
("photo", "photo", attachment.Attachment, False, None, False),
("telecom", "telecom", contactpoint.ContactPoint, True, None, False),
])
return js
from . import backboneelement
class PersonLink(backboneelement.BackboneElement):
""" Link to a resource that concerns the same actual person.
"""
resource_type = "PersonLink"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.assurance = None
""" level1 | level2 | level3 | level4.
Type `str`. """
self.target = None
""" The resource to which this actual person is associated.
Type `FHIRReference` (represented as `dict` in JSON). """
super(PersonLink, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(PersonLink, self).elementProperties()
js.extend([
("assurance", "assurance", str, False, None, False),
("target", "target", fhirreference.FHIRReference, False, None, True),
])
return js
import sys
try:
from . import address
except ImportError:
address = sys.modules[__package__ + '.address']
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import contactpoint
except ImportError:
contactpoint = sys.modules[__package__ + '.contactpoint']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import humanname
except ImportError:
humanname = sys.modules[__package__ + '.humanname']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
| bsd-3-clause | 8466b24cecb148e9f4f21a2753c877ba | 34.550336 | 110 | 0.608646 | 4.275222 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/tools/tool_libs/biobank_orders.py | 1 | 2552 | from datetime import datetime
from typing import Optional
import argparse
from sqlalchemy.orm import Session
from rdr_service.dao.mail_kit_order_dao import MailKitOrderDao
from rdr_service.model.biobank_mail_kit_order import BiobankMailKitOrder
from rdr_service.model.participant import Participant
from rdr_service.services.biobank_order import BiobankOrderService
from rdr_service.tools.tool_libs.tool_base import cli_run, ToolBase, logger
tool_cmd = 'biobank_orders'
tool_desc = 'Utility script for managing biobank orders'
class BiobankOrdersTool(ToolBase):
def run(self):
super(BiobankOrdersTool, self).run()
if self.args.command == 'send-to-mayolink':
self.upload_order_to_mayolink(barcode=self.args.barcode)
def _load_mail_kit(self, barcode: str, session: Session) -> Optional[BiobankMailKitOrder]:
mail_kit_orders = MailKitOrderDao.get_with_barcode(barcode=barcode, session=session)
if not mail_kit_orders:
logger.error(f'Unable to find order with barcode "{barcode}"')
return None
elif len(mail_kit_orders) > 1:
logger.error(f'Found too many orders with barcode "{barcode}"')
return None
else:
return mail_kit_orders[0]
def upload_order_to_mayolink(self, barcode):
with self.get_session() as session:
mail_kit_order = self._load_mail_kit(barcode=barcode, session=session)
if mail_kit_order is None:
logger.error('Unable to send order')
return
participant_origin = session.query(Participant.participantOrigin).filter(
Participant.participantId == mail_kit_order.participantId
).scalar()
logger.info(f'Posting order {mail_kit_order.order_id} to MayoLINK...')
BiobankOrderService.post_mailkit_order_delivery(
mailkit_order=mail_kit_order,
collected_time_utc=datetime.utcnow(),
order_origin_client_id=participant_origin,
report_notes=mail_kit_order.orderType
)
logger.info(f'Order sent to MayoLINK.')
def add_additional_arguments(parser: argparse.ArgumentParser):
subparsers = parser.add_subparsers(dest='command', required=True)
mayolink_parser = subparsers.add_parser('send-to-mayolink')
mayolink_parser.add_argument(
'--barcode',
help='The barcode for the order to push to MayoLINK'
)
def run():
return cli_run(tool_cmd, tool_desc, BiobankOrdersTool, add_additional_arguments)
| bsd-3-clause | 8250fffbbd75a51d5d17623a0569a97e | 37.089552 | 94 | 0.681426 | 3.725547 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/3da004006210_add_new_biosample_codes.py | 1 | 2734 | """add new biosample codes
Revision ID: 3da004006210
Revises: c2dd2332a63f
Create Date: 2018-03-01 09:20:45.647001
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import OrderStatus, SampleStatus
# revision identifiers, used by Alembic.
revision = "3da004006210"
down_revision = "c2dd2332a63f"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"participant_summary", sa.Column("sample_order_status_2pst8", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_2pst8_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_2sst8", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_2sst8_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_2pst8", model.utils.Enum(SampleStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_2pst8_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_2sst8", model.utils.Enum(SampleStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_2sst8_time", model.utils.UTCDateTime(), nullable=True)
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("participant_summary", "sample_status_2sst8_time")
op.drop_column("participant_summary", "sample_status_2sst8")
op.drop_column("participant_summary", "sample_status_2pst8_time")
op.drop_column("participant_summary", "sample_status_2pst8")
op.drop_column("participant_summary", "sample_order_status_2sst8_time")
op.drop_column("participant_summary", "sample_order_status_2sst8")
op.drop_column("participant_summary", "sample_order_status_2pst8_time")
op.drop_column("participant_summary", "sample_order_status_2pst8")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 2c575c3f122e240e3954a735077abc3c | 33.175 | 116 | 0.686906 | 3.338217 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/nutritionorder.py | 1 | 18114 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/NutritionOrder) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class NutritionOrder(domainresource.DomainResource):
""" Diet, formula or nutritional supplement request.
A request to supply a diet, formula feeding (enteral) or oral nutritional
supplement to a patient/resident.
"""
resource_type = "NutritionOrder"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.allergyIntolerance = None
""" List of the patient's food and nutrition-related allergies and
intolerances.
List of `FHIRReference` items referencing `AllergyIntolerance` (represented as `dict` in JSON). """
self.dateTime = None
""" Date and time the nutrition order was requested.
Type `FHIRDate` (represented as `str` in JSON). """
self.encounter = None
""" The encounter associated with this nutrition order.
Type `FHIRReference` referencing `Encounter` (represented as `dict` in JSON). """
self.enteralFormula = None
""" Enteral formula components.
Type `NutritionOrderEnteralFormula` (represented as `dict` in JSON). """
self.excludeFoodModifier = None
""" Order-specific modifier about the type of food that should not be
given.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.foodPreferenceModifier = None
""" Order-specific modifier about the type of food that should be given.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.identifier = None
""" Identifiers assigned to this order.
List of `Identifier` items (represented as `dict` in JSON). """
self.oralDiet = None
""" Oral diet components.
Type `NutritionOrderOralDiet` (represented as `dict` in JSON). """
self.orderer = None
""" Who ordered the diet, formula or nutritional supplement.
Type `FHIRReference` referencing `Practitioner` (represented as `dict` in JSON). """
self.patient = None
""" The person who requires the diet, formula or nutritional supplement.
Type `FHIRReference` referencing `Patient` (represented as `dict` in JSON). """
self.status = None
""" proposed | draft | planned | requested | active | on-hold |
completed | cancelled | entered-in-error.
Type `str`. """
self.supplement = None
""" Supplement components.
List of `NutritionOrderSupplement` items (represented as `dict` in JSON). """
super(NutritionOrder, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(NutritionOrder, self).elementProperties()
js.extend([
("allergyIntolerance", "allergyIntolerance", fhirreference.FHIRReference, True, None, False),
("dateTime", "dateTime", fhirdate.FHIRDate, False, None, True),
("encounter", "encounter", fhirreference.FHIRReference, False, None, False),
("enteralFormula", "enteralFormula", NutritionOrderEnteralFormula, False, None, False),
("excludeFoodModifier", "excludeFoodModifier", codeableconcept.CodeableConcept, True, None, False),
("foodPreferenceModifier", "foodPreferenceModifier", codeableconcept.CodeableConcept, True, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("oralDiet", "oralDiet", NutritionOrderOralDiet, False, None, False),
("orderer", "orderer", fhirreference.FHIRReference, False, None, False),
("patient", "patient", fhirreference.FHIRReference, False, None, True),
("status", "status", str, False, None, False),
("supplement", "supplement", NutritionOrderSupplement, True, None, False),
])
return js
from . import backboneelement
class NutritionOrderEnteralFormula(backboneelement.BackboneElement):
""" Enteral formula components.
Feeding provided through the gastrointestinal tract via a tube, catheter,
or stoma that delivers nutrition distal to the oral cavity.
"""
resource_type = "NutritionOrderEnteralFormula"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.additiveProductName = None
""" Product or brand name of the modular additive.
Type `str`. """
self.additiveType = None
""" Type of modular component to add to the feeding.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.administration = None
""" Formula feeding instruction as structured data.
List of `NutritionOrderEnteralFormulaAdministration` items (represented as `dict` in JSON). """
self.administrationInstruction = None
""" Formula feeding instructions expressed as text.
Type `str`. """
self.baseFormulaProductName = None
""" Product or brand name of the enteral or infant formula.
Type `str`. """
self.baseFormulaType = None
""" Type of enteral or infant formula.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.caloricDensity = None
""" Amount of energy per specified volume that is required.
Type `Quantity` (represented as `dict` in JSON). """
self.maxVolumeToDeliver = None
""" Upper limit on formula volume per unit of time.
Type `Quantity` (represented as `dict` in JSON). """
self.routeofAdministration = None
""" How the formula should enter the patient's gastrointestinal tract.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(NutritionOrderEnteralFormula, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(NutritionOrderEnteralFormula, self).elementProperties()
js.extend([
("additiveProductName", "additiveProductName", str, False, None, False),
("additiveType", "additiveType", codeableconcept.CodeableConcept, False, None, False),
("administration", "administration", NutritionOrderEnteralFormulaAdministration, True, None, False),
("administrationInstruction", "administrationInstruction", str, False, None, False),
("baseFormulaProductName", "baseFormulaProductName", str, False, None, False),
("baseFormulaType", "baseFormulaType", codeableconcept.CodeableConcept, False, None, False),
("caloricDensity", "caloricDensity", quantity.Quantity, False, None, False),
("maxVolumeToDeliver", "maxVolumeToDeliver", quantity.Quantity, False, None, False),
("routeofAdministration", "routeofAdministration", codeableconcept.CodeableConcept, False, None, False),
])
return js
class NutritionOrderEnteralFormulaAdministration(backboneelement.BackboneElement):
""" Formula feeding instruction as structured data.
Formula administration instructions as structured data. This repeating
structure allows for changing the administration rate or volume over time
for both bolus and continuous feeding. An example of this would be an
instruction to increase the rate of continuous feeding every 2 hours.
"""
resource_type = "NutritionOrderEnteralFormulaAdministration"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.quantity = None
""" The volume of formula to provide.
Type `Quantity` (represented as `dict` in JSON). """
self.rateQuantity = None
""" Speed with which the formula is provided per period of time.
Type `Quantity` (represented as `dict` in JSON). """
self.rateRatio = None
""" Speed with which the formula is provided per period of time.
Type `Ratio` (represented as `dict` in JSON). """
self.schedule = None
""" Scheduled frequency of enteral feeding.
Type `Timing` (represented as `dict` in JSON). """
super(NutritionOrderEnteralFormulaAdministration, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(NutritionOrderEnteralFormulaAdministration, self).elementProperties()
js.extend([
("quantity", "quantity", quantity.Quantity, False, None, False),
("rateQuantity", "rateQuantity", quantity.Quantity, False, "rate", False),
("rateRatio", "rateRatio", ratio.Ratio, False, "rate", False),
("schedule", "schedule", timing.Timing, False, None, False),
])
return js
class NutritionOrderOralDiet(backboneelement.BackboneElement):
""" Oral diet components.
Diet given orally in contrast to enteral (tube) feeding.
"""
resource_type = "NutritionOrderOralDiet"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.fluidConsistencyType = None
""" The required consistency of fluids and liquids provided to the
patient.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.instruction = None
""" Instructions or additional information about the oral diet.
Type `str`. """
self.nutrient = None
""" Required nutrient modifications.
List of `NutritionOrderOralDietNutrient` items (represented as `dict` in JSON). """
self.schedule = None
""" Scheduled frequency of diet.
List of `Timing` items (represented as `dict` in JSON). """
self.texture = None
""" Required texture modifications.
List of `NutritionOrderOralDietTexture` items (represented as `dict` in JSON). """
self.type = None
""" Type of oral diet or diet restrictions that describe what can be
consumed orally.
List of `CodeableConcept` items (represented as `dict` in JSON). """
super(NutritionOrderOralDiet, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(NutritionOrderOralDiet, self).elementProperties()
js.extend([
("fluidConsistencyType", "fluidConsistencyType", codeableconcept.CodeableConcept, True, None, False),
("instruction", "instruction", str, False, None, False),
("nutrient", "nutrient", NutritionOrderOralDietNutrient, True, None, False),
("schedule", "schedule", timing.Timing, True, None, False),
("texture", "texture", NutritionOrderOralDietTexture, True, None, False),
("type", "type", codeableconcept.CodeableConcept, True, None, False),
])
return js
class NutritionOrderOralDietNutrient(backboneelement.BackboneElement):
""" Required nutrient modifications.
Class that defines the quantity and type of nutrient modifications (for
example carbohydrate, fiber or sodium) required for the oral diet.
"""
resource_type = "NutritionOrderOralDietNutrient"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.amount = None
""" Quantity of the specified nutrient.
Type `Quantity` (represented as `dict` in JSON). """
self.modifier = None
""" Type of nutrient that is being modified.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(NutritionOrderOralDietNutrient, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(NutritionOrderOralDietNutrient, self).elementProperties()
js.extend([
("amount", "amount", quantity.Quantity, False, None, False),
("modifier", "modifier", codeableconcept.CodeableConcept, False, None, False),
])
return js
class NutritionOrderOralDietTexture(backboneelement.BackboneElement):
""" Required texture modifications.
Class that describes any texture modifications required for the patient to
safely consume various types of solid foods.
"""
resource_type = "NutritionOrderOralDietTexture"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.foodType = None
""" Concepts that are used to identify an entity that is ingested for
nutritional purposes.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.modifier = None
""" Code to indicate how to alter the texture of the foods, e.g. pureed.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(NutritionOrderOralDietTexture, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(NutritionOrderOralDietTexture, self).elementProperties()
js.extend([
("foodType", "foodType", codeableconcept.CodeableConcept, False, None, False),
("modifier", "modifier", codeableconcept.CodeableConcept, False, None, False),
])
return js
class NutritionOrderSupplement(backboneelement.BackboneElement):
""" Supplement components.
Oral nutritional products given in order to add further nutritional value
to the patient's diet.
"""
resource_type = "NutritionOrderSupplement"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.instruction = None
""" Instructions or additional information about the oral supplement.
Type `str`. """
self.productName = None
""" Product or brand name of the nutritional supplement.
Type `str`. """
self.quantity = None
""" Amount of the nutritional supplement.
Type `Quantity` (represented as `dict` in JSON). """
self.schedule = None
""" Scheduled frequency of supplement.
List of `Timing` items (represented as `dict` in JSON). """
self.type = None
""" Type of supplement product requested.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(NutritionOrderSupplement, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(NutritionOrderSupplement, self).elementProperties()
js.extend([
("instruction", "instruction", str, False, None, False),
("productName", "productName", str, False, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
("schedule", "schedule", timing.Timing, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
try:
from . import ratio
except ImportError:
ratio = sys.modules[__package__ + '.ratio']
try:
from . import timing
except ImportError:
timing = sys.modules[__package__ + '.timing']
| bsd-3-clause | 754b3c6f5162e308a2068bd0fb2b4f21 | 41.223776 | 117 | 0.640333 | 4.271162 | false | false | false | false |
django/django-localflavor | tests/test_se.py | 4 | 7806 | import datetime
from django.test import SimpleTestCase
from localflavor.se.forms import (SECountySelect, SEOrganisationNumberField, SEPersonalIdentityNumberField,
SEPostalCodeField)
class SELocalFlavorTests(SimpleTestCase):
def setUp(self):
# Mocking datetime.date to make sure
# localflavor.se.utils.validate_id_birthday works
class MockDate(datetime.date):
def today(cls):
return datetime.date(2008, 5, 14)
today = classmethod(today)
self._olddate = datetime.date
datetime.date = MockDate
def tearDown(self):
datetime.date = self._olddate
def test_SECountySelect(self):
f = SECountySelect()
out = '''<select name="swedish_county">
<option value="AB">Stockholm</option>
<option value="AC">V\xe4sterbotten</option>
<option value="BD">Norrbotten</option>
<option value="C">Uppsala</option>
<option value="D">S\xf6dermanland</option>
<option value="E" selected="selected">\xd6sterg\xf6tland</option>
<option value="F">J\xf6nk\xf6ping</option>
<option value="G">Kronoberg</option>
<option value="H">Kalmar</option>
<option value="I">Gotland</option>
<option value="K">Blekinge</option>
<option value="M">Sk\xe5ne</option>
<option value="N">Halland</option>
<option value="O">V\xe4stra G\xf6taland</option>
<option value="S">V\xe4rmland</option>
<option value="T">\xd6rebro</option>
<option value="U">V\xe4stmanland</option>
<option value="W">Dalarna</option>
<option value="X">G\xe4vleborg</option>
<option value="Y">V\xe4sternorrland</option>
<option value="Z">J\xe4mtland</option>
</select>'''
self.assertHTMLEqual(f.render('swedish_county', 'E'), out)
def test_SEOrganizationNumberField(self):
error_invalid = ['Enter a valid Swedish organisation number.']
valid = {
'870512-1989': '198705121989',
'19870512-1989': '198705121989',
'870512-2128': '198705122128',
'081015-6315': '190810156315',
'081015+6315': '180810156315',
'0810156315': '190810156315',
# Test some different organisation numbers
# IKEA Linköping
'556074-7569': '5560747569',
# Volvo Personvagnar
'556074-3089': '5560743089',
# LJS (organisation)
'822001-5476': '8220015476',
# LJS (organisation)
'8220015476': '8220015476',
# Katedralskolan Linköping (school)
'2120000449': '2120000449',
# Faux organisation number, which tests that the checksum can be 0
'232518-5060': '2325185060',
}
invalid = {
# Ordinary personal identity numbers for sole proprietors
# The same rules as for SEPersonalIdentityField applies here
'081015 6315': error_invalid,
'950231-4496': error_invalid,
'6914104499': error_invalid,
'950d314496': error_invalid,
'invalid!!!': error_invalid,
'870514-1111': error_invalid,
# Co-ordination number checking
# Co-ordination numbers are not valid organisation numbers
'870574-1315': error_invalid,
'870573-1311': error_invalid,
# Volvo Personvagnar, bad format
'556074+3089': error_invalid,
# Invalid checksum
'2120000441': error_invalid,
# Valid checksum but invalid organisation type
'1120000441': error_invalid,
}
self.assertFieldOutput(SEOrganisationNumberField, valid, invalid)
def test_SEPersonalIdentityNumberField(self):
error_invalid = ['Enter a valid Swedish personal identity number.']
error_coord = ['Co-ordination numbers are not allowed.']
valid = {
'870512-1989': '198705121989',
'870512-2128': '198705122128',
'19870512-1989': '198705121989',
'198705121989': '198705121989',
'081015-6315': '190810156315',
'0810156315': '190810156315',
# This is a "special-case" in the checksum calculation,
# where the sum is divisible by 10 (the checksum digit == 0)
'8705141060': '198705141060',
# + means that the person is older than 100 years
'081015+6315': '180810156315',
# Co-ordination number checking
'870574-1315': '198705741315',
'870574+1315': '188705741315',
'198705741315': '198705741315',
}
invalid = {
'081015 6315': error_invalid,
'950d314496': error_invalid,
'invalid!!!': error_invalid,
# Invalid dates
# February 31st does not exist
'950231-4496': error_invalid,
# Month 14 does not exist
'6914104499': error_invalid,
# There are no Swedish personal id numbers where year < 1800
'17430309-7135': error_invalid,
# Invalid checksum
'870514-1111': error_invalid,
# Co-ordination number with bad checksum
'870573-1311': error_invalid,
# Interim numbers should be rejected by default, even though they are valid
'901129-T003': error_invalid,
'19901129T003': error_invalid,
}
self.assertFieldOutput(SEPersonalIdentityNumberField, valid, invalid)
valid = {}
invalid = {
# Check valid co-ordination numbers that should not be accepted
# because of coordination_number=False
'870574-1315': error_coord,
'870574+1315': error_coord,
'8705741315': error_coord,
# Invalid co-ordination numbers should be treated as invalid, and not
# as co-ordination numbers
'870573-1311': error_invalid,
}
kwargs = {'coordination_number': False}
self.assertFieldOutput(SEPersonalIdentityNumberField, valid, invalid,
field_kwargs=kwargs)
valid = {
# All ordinary numbers should work when switching the first serial
# digit to a letter. Additionally, lower case letters should be
# accepted and normalized to upper case.
'870512-T989': '19870512T989',
'870512-r120': '19870512R120',
'19870512-S989': '19870512S989',
'19870512u989': '19870512U989',
'081015-W316': '19081015W316',
'081015x316': '19081015X316',
'870514J060': '19870514J060',
'081015+k316': '18081015K316',
}
invalid = {
# The concepts of interim and coordination numbers can not be
# combined and should be considered invalid
'870574-L315': error_invalid,
'870574+m315': error_invalid,
'870574N315': error_invalid,
# Invalid interim numbers should be reported as invalid
'870512-T988': error_invalid,
'870512-r121': error_invalid,
}
kwargs = {'interim_number': True}
self.assertFieldOutput(SEPersonalIdentityNumberField, valid, invalid,
field_kwargs=kwargs)
def test_SEPostalCodeField(self):
error_format = ['Enter a Swedish postal code in the format XXXXX.']
valid = {
'589 37': '58937',
'58937': '58937',
}
invalid = {
'abcasfassadf': error_format,
# Only one space is allowed for separation
'589 37': error_format,
# The postal code must not start with 0
'01234': error_format,
}
self.assertFieldOutput(SEPostalCodeField, valid, invalid)
| bsd-3-clause | a384a9f0665739deb83b4594f3137c1d | 39.435233 | 107 | 0.59021 | 3.684608 | false | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.