after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def propagate_to_anomaly(self, value):
"""Propagates an orbit to a specific true anomaly.
Parameters
----------
value : ~astropy.units.Quantity
Returns
-------
Orbit
Resulting orbit after propagation.
"""
# Silently wrap anomaly
nu = (value + np.pi * u.rad) % (2 * np.pi * u.rad) - np.pi * u.rad
# Compute time of flight for correct epoch
time_of_flight = self.time_to_anomaly(nu)
if time_of_flight < 0:
if self.ecc >= 1:
raise ValueError("True anomaly {:.2f} not reachable".format(value))
else:
# For a closed orbit, instead of moving backwards
# we need to do another revolution
time_of_flight = self.period - time_of_flight
return self.from_classical(
self.attractor,
self.a,
self.ecc,
self.inc,
self.raan,
self.argp,
nu,
epoch=self.epoch + time_of_flight,
plane=self.plane,
)
|
def propagate_to_anomaly(self, value):
"""Propagates an orbit to a specific true anomaly.
Parameters
----------
value : ~astropy.units.Quantity
Returns
-------
Orbit
Resulting orbit after propagation.
"""
# Compute time of flight for correct epoch
time_of_flight = self.time_to_anomaly(value)
return self.from_classical(
self.attractor,
self.a,
self.ecc,
self.inc,
self.raan,
self.argp,
value,
epoch=self.epoch + time_of_flight,
plane=self.plane,
)
|
https://github.com/poliastro/poliastro/issues/475
|
$ NUMBA_DISABLE_JIT=1 ipython --no-banner
In [1]: import numpy as np
In [2]: np.seterr(all="raise")
Out[2]: {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'}
In [3]: import numpy as np
...: import math
...: from astropy import units as u
...: from poliastro.bodies import Earth, Moon
...: from poliastro.twobody import Orbit
...:
...: r=[8.e3, 1.e3, 0.]*u.km
...: v=[-0.5, -0.5, 0.]*u.km/u.s
...: orbit1=Orbit.from_vectors(Earth,r,v)
...: orbit2=orbit1.propagate(1.*u.h)
...:
...:
---------------------------------------------------------------------------
FloatingPointError Traceback (most recent call last)
<ipython-input-3-08d7b74965c9> in <module>()
8 v=[-0.5, -0.5, 0.]*u.km/u.s
9 orbit1=Orbit.from_vectors(Earth,r,v)
---> 10 orbit2=orbit1.propagate(1.*u.h)
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/twobody/orbit.py in propagate(self, value, method, rtol, **kwargs)
403 time_of_flight = time.TimeDelta(value)
404
--> 405 return propagate(self, time_of_flight, method=method, rtol=rtol, **kwargs)
406
407 def sample(self, values=None, method=mean_motion):
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/twobody/propagation.py in propagate(orbit, time_of_flight, method, rtol, **kwargs)
177
178 """
--> 179 r, v = method(orbit, time_of_flight.to(u.s).value, rtol=rtol, **kwargs)
180 return orbit.from_vectors(orbit.attractor, r * u.km, v * u.km / u.s, orbit.epoch + time_of_flight, orbit.plane)
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/twobody/propagation.py in mean_motion(orbit, tofs, **kwargs)
118
119 if not hasattr(tofs, '__len__'):
--> 120 return mean_motion_fast(k, r0, v0, tofs)
121
122 results = [mean_motion_fast(k, r0, v0, tof) for tof in tofs]
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/core/propagation.py in mean_motion(k, r0, v0, tof)
33
34 # get the initial mean anomaly
---> 35 M0 = nu_to_M(nu0, ecc)
36 # strong elliptic or strong hyperbolic orbits
37 if np.abs(ecc - 1.0) > 1e-2:
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/core/angles.py in nu_to_M(nu, ecc, delta)
183 else:
184 D = nu_to_D(nu)
--> 185 M = D_to_M(D, ecc)
186 return M
187
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/core/angles.py in D_to_M(D, ecc)
155 @jit
156 def D_to_M(D, ecc):
--> 157 M = _kepler_equation_parabolic(D, 0.0, ecc)
158 return M
159
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/core/angles.py in _kepler_equation_parabolic(D, M, ecc)
26 @jit
27 def _kepler_equation_parabolic(D, M, ecc):
---> 28 return M_parabolic(ecc, D) - M
29
30
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/core/angles.py in M_parabolic(ecc, D, tolerance)
41 k = 0
42 while not small_term:
---> 43 term = (ecc - 1.0 / (2.0 * k + 3.0)) * (x ** k)
44 small_term = np.abs(term) < tolerance
45 S += term
FloatingPointError: overflow encountered in double_scalars
|
FloatingPointError
|
def _generate_time_values(self, nu_vals):
# Subtract current anomaly to start from the desired point
ecc = self.ecc.value
k = self.attractor.k.to_value(u.km**3 / u.s**2)
q = self.r_p.to_value(u.km)
time_values = [
delta_t_from_nu_fast(nu_val, ecc, k, q) for nu_val in nu_vals.to(u.rad).value
] * u.s - self.t_p
return time_values
|
def _generate_time_values(self, nu_vals):
# Subtract current anomaly to start from the desired point
ecc = self.ecc.value
nu = self.nu.to(u.rad).value
M_vals = [
nu_to_M_fast(nu_val, ecc) - nu_to_M_fast(nu, ecc)
for nu_val in nu_vals.to(u.rad).value
] * u.rad
time_values = (M_vals / self.n).decompose()
return time_values
|
https://github.com/poliastro/poliastro/issues/475
|
$ NUMBA_DISABLE_JIT=1 ipython --no-banner
In [1]: import numpy as np
In [2]: np.seterr(all="raise")
Out[2]: {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'}
In [3]: import numpy as np
...: import math
...: from astropy import units as u
...: from poliastro.bodies import Earth, Moon
...: from poliastro.twobody import Orbit
...:
...: r=[8.e3, 1.e3, 0.]*u.km
...: v=[-0.5, -0.5, 0.]*u.km/u.s
...: orbit1=Orbit.from_vectors(Earth,r,v)
...: orbit2=orbit1.propagate(1.*u.h)
...:
...:
---------------------------------------------------------------------------
FloatingPointError Traceback (most recent call last)
<ipython-input-3-08d7b74965c9> in <module>()
8 v=[-0.5, -0.5, 0.]*u.km/u.s
9 orbit1=Orbit.from_vectors(Earth,r,v)
---> 10 orbit2=orbit1.propagate(1.*u.h)
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/twobody/orbit.py in propagate(self, value, method, rtol, **kwargs)
403 time_of_flight = time.TimeDelta(value)
404
--> 405 return propagate(self, time_of_flight, method=method, rtol=rtol, **kwargs)
406
407 def sample(self, values=None, method=mean_motion):
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/twobody/propagation.py in propagate(orbit, time_of_flight, method, rtol, **kwargs)
177
178 """
--> 179 r, v = method(orbit, time_of_flight.to(u.s).value, rtol=rtol, **kwargs)
180 return orbit.from_vectors(orbit.attractor, r * u.km, v * u.km / u.s, orbit.epoch + time_of_flight, orbit.plane)
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/twobody/propagation.py in mean_motion(orbit, tofs, **kwargs)
118
119 if not hasattr(tofs, '__len__'):
--> 120 return mean_motion_fast(k, r0, v0, tofs)
121
122 results = [mean_motion_fast(k, r0, v0, tof) for tof in tofs]
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/core/propagation.py in mean_motion(k, r0, v0, tof)
33
34 # get the initial mean anomaly
---> 35 M0 = nu_to_M(nu0, ecc)
36 # strong elliptic or strong hyperbolic orbits
37 if np.abs(ecc - 1.0) > 1e-2:
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/core/angles.py in nu_to_M(nu, ecc, delta)
183 else:
184 D = nu_to_D(nu)
--> 185 M = D_to_M(D, ecc)
186 return M
187
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/core/angles.py in D_to_M(D, ecc)
155 @jit
156 def D_to_M(D, ecc):
--> 157 M = _kepler_equation_parabolic(D, 0.0, ecc)
158 return M
159
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/core/angles.py in _kepler_equation_parabolic(D, M, ecc)
26 @jit
27 def _kepler_equation_parabolic(D, M, ecc):
---> 28 return M_parabolic(ecc, D) - M
29
30
~/.miniconda36/envs/poliastro37/lib/python3.7/site-packages/poliastro/core/angles.py in M_parabolic(ecc, D, tolerance)
41 k = 0
42 while not small_term:
---> 43 term = (ecc - 1.0 / (2.0 * k + 3.0)) * (x ** k)
44 small_term = np.abs(term) < tolerance
45 S += term
FloatingPointError: overflow encountered in double_scalars
|
FloatingPointError
|
def from_sbdb(cls, name, **kwargs):
"""Return osculating `Orbit` by using `SBDB` from Astroquery.
Parameters
----------
name: string
Name of the body to make the request.
Returns
-------
ss: poliastro.twobody.orbit.Orbit
Orbit corresponding to body_name
Examples
--------
>>> from poliastro.twobody.orbit import Orbit
>>> apophis_orbit = Orbit.from_sbdb('apophis') # doctest: +REMOTE_DATA
"""
from poliastro.bodies import Sun
obj = SBDB.query(name, full_precision=True, **kwargs)
if "count" in obj:
# no error till now ---> more than one object has been found
# contains all the name of the objects
objects_name = obj["list"]["name"]
objects_name_in_str = "" # used to store them in string form each in new line
for i in objects_name:
objects_name_in_str += i + "\n"
raise ValueError(
str(obj["count"]) + " different objects found: \n" + objects_name_in_str
)
if "object" not in obj.keys():
raise ValueError("Object {} not found".format(name))
a = obj["orbit"]["elements"]["a"].to(u.AU) * u.AU
ecc = float(obj["orbit"]["elements"]["e"]) * u.one
inc = obj["orbit"]["elements"]["i"].to(u.deg) * u.deg
raan = obj["orbit"]["elements"]["om"].to(u.deg) * u.deg
argp = obj["orbit"]["elements"]["w"].to(u.deg) * u.deg
# Since JPL provides Mean Anomaly (M) we need to make
# the conversion to the true anomaly (\nu)
nu = M_to_nu(obj["orbit"]["elements"]["ma"].to(u.deg) * u.deg, ecc)
epoch = time.Time(obj["orbit"]["epoch"].to(u.d), format="jd")
ss = cls.from_classical(
Sun,
a,
ecc,
inc,
raan,
argp,
nu,
epoch=epoch.tdb,
plane=Planes.EARTH_ECLIPTIC,
)
return ss
|
def from_sbdb(cls, name, **kwargs):
"""Return osculating `Orbit` by using `SBDB` from Astroquery.
Parameters
----------
name: string
Name of the body to make the request.
Returns
-------
ss: poliastro.twobody.orbit.Orbit
Orbit corresponding to body_name
Examples
--------
>>> from poliastro.twobody.orbit import Orbit
>>> apophis_orbit = Orbit.from_sbdb('apophis') # doctest: +REMOTE_DATA
"""
from poliastro.bodies import Sun
obj = SBDB.query(name, full_precision=True, **kwargs)
if "count" in obj:
# no error till now ---> more than one object has been found
# contains all the name of the objects
objects_name = obj["list"]["name"]
objects_name_in_str = "" # used to store them in string form each in new line
for i in objects_name:
objects_name_in_str += i + "\n"
raise ValueError(
str(obj["count"]) + " different objects found: \n" + objects_name_in_str
)
a = obj["orbit"]["elements"]["a"].to(u.AU) * u.AU
ecc = float(obj["orbit"]["elements"]["e"]) * u.one
inc = obj["orbit"]["elements"]["i"].to(u.deg) * u.deg
raan = obj["orbit"]["elements"]["om"].to(u.deg) * u.deg
argp = obj["orbit"]["elements"]["w"].to(u.deg) * u.deg
# Since JPL provides Mean Anomaly (M) we need to make
# the conversion to the true anomaly (\nu)
nu = M_to_nu(obj["orbit"]["elements"]["ma"].to(u.deg) * u.deg, ecc)
epoch = time.Time(obj["orbit"]["epoch"].to(u.d), format="jd")
ss = cls.from_classical(
Sun,
a,
ecc,
inc,
raan,
argp,
nu,
epoch=epoch.tdb,
plane=Planes.EARTH_ECLIPTIC,
)
return ss
|
https://github.com/poliastro/poliastro/issues/916
|
In [9]: Orbit.from_sbdb("67/P")
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-9-75d3da3ffd57> in <module>
----> 1 Orbit.from_sbdb("67/P")
~/.pyenv/versions/poliastro37_4/lib/python3.7/site-packages/poliastro/twobody/orbit.py in from_sbdb(cls, name, **kargs)
572 )
573
--> 574 a = obj["orbit"]["elements"]["a"].to(u.AU) * u.AU
575 ecc = float(obj["orbit"]["elements"]["e"]) * u.one
576 inc = obj["orbit"]["elements"]["i"].to(u.deg) * u.deg
KeyError: 'orbit'
|
KeyError
|
def record_from_name(name):
"""Search `dastcom.idx` and return logical records that match a given string.
Body name, SPK-ID, or alternative designations can be used.
Parameters
----------
name : str
Body name.
Returns
-------
records : list (int)
DASTCOM5 database logical records matching str.
"""
records = []
lines = string_record_from_name(name)
for line in lines:
records.append(int(line[:8].lstrip()))
return records
|
def record_from_name(name):
"""Search `dastcom.idx` and return logical records that match a given string.
Body name, SPK-ID, or alternative designations can be used.
Parameters
----------
name : str
Body name.
Returns
-------
records : list (int)
DASTCOM5 database logical records matching str.
"""
records = []
lines = string_record_from_name(name)
for line in lines:
records.append(int(line[:6].lstrip()))
return records
|
https://github.com/poliastro/poliastro/issues/902
|
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-9-3bb5fa7c99be> in <module>
----> 1 halleys = dastcom5.orbit_from_name("1P")
2
3 frame = StaticOrbitPlotter()
4 frame.plot(halleys[0], label="Halley")
5 frame.plot(halleys[5], label="Halley")
~/.pyenv/versions/3.8.0/envs/poliastro38/lib/python3.8/site-packages/poliastro/neos/dastcom5.py in orbit_from_name(name)
339 orbits = []
340 for record in records:
--> 341 orbits.append(orbit_from_record(record))
342 return orbits
343
~/.pyenv/versions/3.8.0/envs/poliastro38/lib/python3.8/site-packages/poliastro/neos/dastcom5.py in orbit_from_record(record)
359
360 """
--> 361 body_data = read_record(record)
362 a = body_data["A"].item() * u.au
363 ecc = body_data["EC"].item() * u.one
~/.pyenv/versions/3.8.0/envs/poliastro38/lib/python3.8/site-packages/poliastro/neos/dastcom5.py in read_record(record)
508
509 with open(ast_path, "rb") as f:
--> 510 f.seek(phis_rec, os.SEEK_SET)
511 body_data = np.fromfile(f, dtype=AST_DTYPE, count=1)
512 else:
OSError: [Errno 22] Invalid argument
|
OSError
|
def norm(vec):
r"""Returns the norm of a 3 dimension vector.
.. math::
\left \| \vec{v} \right \| = \sqrt{\sum_{i=1}^{n}v_{i}^2}
Parameters
----------
vec: ndarray
Dimension 3 vector.
Examples
--------
>>> vec = np.array([1, 1, 1])
>>> norm(vec)
1.7320508075688772
"""
vec = 1.0 * vec # Cast to float
return np.sqrt(vec.dot(vec))
|
def norm(vec):
r"""Returns the norm of a 3 dimension vector.
.. math::
\left \| \vec{v} \right \| = \sqrt{\sum_{i=1}^{n}v_{i}^2}
Parameters
----------
vec: ndarray
Dimension 3 vector.
Examples
--------
>>> from poliastro.core.util import norm
>>> from astropy import units as u
>>> vec = [1, 1, 1] * u.m
>>> norm(vec)
1.7320508075688772
"""
vec = 1.0 * vec # Cast to float
return np.sqrt(vec.dot(vec))
|
https://github.com/poliastro/poliastro/issues/761
|
_____________________ [doctest] poliastro.core.util.cross ______________________
143 b : ndarray
144 3 Dimension vector.
145
146 Examples
147 --------
148 >>> from poliastro.core.util import cross
149 >>> from astropy import units as u
150 >>> i = [1, 0, 0] * u.m
151 >>> j = [0, 1, 0] * u.m
152 >>> cross(i, j)
UNEXPECTED EXCEPTION: TypeError('only dimensionless scalar quantities can be converted to Python scalars')
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/astropy/units/quantity.py", line 715, in to_value
scale = self.unit._to(unit)
File "/usr/lib/python3/dist-packages/astropy/units/core.py", line 953, in _to
"'{0!r}' is not a scaled version of '{1!r}'".format(self, other))
astropy.units.core.UnitConversionError: 'Unit("m2")' is not a scaled version of 'Unit(dimensionless)'
|
astropy.units.core.UnitConversionError
|
def cross(a, b):
r"""Computes cross product between two vectors.
.. math::
\vec{w} = \vec{u} \times \vec{v} = \begin{vmatrix}
u_{y} & y_{z} \\
v_{y} & v_{z}
\end{vmatrix}\vec{i} - \begin{vmatrix}
u_{x} & u_{z} \\
v_{x} & v_{z}
\end{vmatrix}\vec{j} + \begin{vmatrix}
u_{x} & u_{y} \\
v_{x} & v_{y}
\end{vmatrix}\vec{k}
Parameters
----------
a : ndarray
3 Dimension vector.
b : ndarray
3 Dimension vector.
Examples
--------
>>> i = np.array([1., 0., 0.])
>>> j = np.array([0., 1., 0.])
>>> cross(i, j)
array([0., 0., 1.])
Note
-----
np.cross is not supported in numba nopython mode, see
https://github.com/numba/numba/issues/2978
"""
return np.array(
(
a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0],
)
)
|
def cross(a, b):
r"""Computes cross product between two vectors.
.. math::
\vec{w} = \vec{u} \times \vec{v} = \begin{vmatrix}
u_{y} & y_{z} \\
v_{y} & v_{z}
\end{vmatrix}\vec{i} - \begin{vmatrix}
u_{x} & u_{z} \\
v_{x} & v_{z}
\end{vmatrix}\vec{j} + \begin{vmatrix}
u_{x} & u_{y} \\
v_{x} & v_{y}
\end{vmatrix}\vec{k}
Parameters
----------
a : ndarray
3 Dimension vector.
b : ndarray
3 Dimension vector.
Examples
--------
>>> from poliastro.core.util import cross
>>> from astropy import units as u
>>> i = [1, 0, 0] * u.m
>>> j = [0, 1, 0] * u.m
>>> cross(i, j)
array([0., 0., 1.])
Note
-----
np.cross is not supported in numba nopython mode, see
https://github.com/numba/numba/issues/2978
"""
return np.array(
(
a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0],
)
)
|
https://github.com/poliastro/poliastro/issues/761
|
_____________________ [doctest] poliastro.core.util.cross ______________________
143 b : ndarray
144 3 Dimension vector.
145
146 Examples
147 --------
148 >>> from poliastro.core.util import cross
149 >>> from astropy import units as u
150 >>> i = [1, 0, 0] * u.m
151 >>> j = [0, 1, 0] * u.m
152 >>> cross(i, j)
UNEXPECTED EXCEPTION: TypeError('only dimensionless scalar quantities can be converted to Python scalars')
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/astropy/units/quantity.py", line 715, in to_value
scale = self.unit._to(unit)
File "/usr/lib/python3/dist-packages/astropy/units/core.py", line 953, in _to
"'{0!r}' is not a scaled version of '{1!r}'".format(self, other))
astropy.units.core.UnitConversionError: 'Unit("m2")' is not a scaled version of 'Unit(dimensionless)'
|
astropy.units.core.UnitConversionError
|
def propagate(self, value, method=mean_motion, rtol=1e-10, **kwargs):
"""Propagates an orbit a specified time.
If value is true anomaly, propagate orbit to this anomaly and return the result.
Otherwise, if time is provided, propagate this `Orbit` some `time` and return the result.
Parameters
----------
value : ~astropy.units.Quantity, ~astropy.time.Time, ~astropy.time.TimeDelta
Scalar time to propagate.
rtol : float, optional
Relative tolerance for the propagation algorithm, default to 1e-10.
method : function, optional
Method used for propagation
**kwargs
parameters used in perturbation models
Returns
-------
Orbit
New orbit after propagation.
"""
if isinstance(value, time.Time) and not isinstance(value, time.TimeDelta):
time_of_flight = value - self.epoch
else:
# Works for both Quantity and TimeDelta objects
time_of_flight = time.TimeDelta(value)
cartesian = propagate(self, time_of_flight, method=method, rtol=rtol, **kwargs)
new_epoch = self.epoch + time_of_flight
# TODO: Unify with sample
# If the frame supports obstime, set the time values
try:
kwargs = {}
if "obstime" in self.frame.frame_attributes:
kwargs["obstime"] = new_epoch
# Use of a protected method instead of frame.realize_frame
# because the latter does not let the user choose the representation type
# in one line despite its parameter names, see
# https://github.com/astropy/astropy/issues/7784
coords = self.frame._replicate(
cartesian, representation_type="cartesian", **kwargs
)
return self.from_coords(self.attractor, coords, plane=self.plane)
except NotImplementedError:
return self.from_vectors(
self.attractor,
cartesian.xyz,
cartesian.differentials["s"].d_xyz,
new_epoch,
)
|
def propagate(self, value, method=mean_motion, rtol=1e-10, **kwargs):
"""Propagates an orbit a specified time.
If value is true anomaly, propagate orbit to this anomaly and return the result.
Otherwise, if time is provided, propagate this `Orbit` some `time` and return the result.
Parameters
----------
value : ~astropy.units.Quantity, ~astropy.time.Time, ~astropy.time.TimeDelta
Scalar time to propagate.
rtol : float, optional
Relative tolerance for the propagation algorithm, default to 1e-10.
method : function, optional
Method used for propagation
**kwargs
parameters used in perturbation models
Returns
-------
Orbit
New orbit after propagation.
"""
if isinstance(value, time.Time) and not isinstance(value, time.TimeDelta):
time_of_flight = value - self.epoch
else:
# Works for both Quantity and TimeDelta objects
time_of_flight = time.TimeDelta(value)
cartesian = propagate(self, time_of_flight, method=method, rtol=rtol, **kwargs)
# If the frame supports obstime, set the time values
kwargs = {}
if "obstime" in self.frame.frame_attributes:
kwargs["obstime"] = self.epoch + time_of_flight
else:
warn(
"Frame {} does not support 'obstime', time values were not returned".format(
self.frame.__class__
)
)
# Use of a protected method instead of frame.realize_frame
# because the latter does not let the user choose the representation type
# in one line despite its parameter names, see
# https://github.com/astropy/astropy/issues/7784
coords = self.frame._replicate(cartesian, representation_type="cartesian", **kwargs)
return self.from_coords(self.attractor, coords, plane=self.plane)
|
https://github.com/poliastro/poliastro/issues/654
|
from poliastro.twobody import Orbit
from poliastro.bodies import Moon
from astropy import units as u
orb = Orbit.from_vectors(Moon, [-17621.48704193, 9218.72943252, -10947.19144579] * u.km, [ -6985.91793854, -13970.02807282, -7272.34554685] * u.km / u.day)
orb
2211 x 22792 km x 36.0 deg orbit around Moon (☾) at epoch J2000.000 (TT)
orb.sample(3)
/home/juanlu/Development/poliastro/poliastro-library/src/poliastro/twobody/orbit.py:1082: UserWarning:
No frame found for attractor Moon (☾), returning only cartesian coordinates instead
<CartesianRepresentation (x, y, z) in km
[( 1738.77087758, -814.03020271, 1096.10391796),
(-17926.29064628, 8392.44675468, -11300.55584963),
( 1738.77087758, -814.03020271, 1096.10391796)]
(has differentials w.r.t.: 's')>
orb.propagate(orb.period / 2)
Traceback (most recent call last):
File "/home/juanlu/Development/poliastro/poliastro-library/src/poliastro/frames.py", line 337, in get_frame
frames = _FRAME_MAPPING[attractor]
KeyError: Moon (☾)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/juanlu/Development/poliastro/poliastro-library/src/poliastro/twobody/orbit.py", line 913, in propagate
if "obstime" in self.frame.frame_attributes:
File "/home/juanlu/Development/poliastro/poliastro-library/src/poliastro/twobody/orbit.py", line 101, in frame
self._frame = get_frame(self.attractor, self._plane, self.epoch)
File "/home/juanlu/Development/poliastro/poliastro-library/src/poliastro/frames.py", line 340, in get_frame
"Frames for orbits around custom bodies are not yet supported"
NotImplementedError: Frames for orbits around custom bodies are not yet supported
|
KeyError
|
def plot(self, orbit, label=None, color=None):
"""Plots state and osculating orbit in their plane."""
if not self._frame:
self.set_frame(*orbit.pqw())
if (orbit, label) not in self._orbits:
self._orbits.append((orbit, label))
# if new attractor radius is smaller, plot it
new_radius = max(orbit.attractor.R.to(u.km).value, orbit.r_p.to(u.km).value / 6)
if not self._attractor_radius:
self.set_attractor(orbit)
elif new_radius < self._attractor_radius:
self.set_attractor(orbit)
lines = []
_, positions = orbit.sample(self.num_points)
rr = positions.get_xyz().transpose()
# Project on OrbitPlotter frame
# x_vec, y_vec, z_vec = self._frame
x, y = self._project(rr)
x0, y0 = self._project(orbit.r[None])
# Plot current position
(l,) = self.ax.plot(x0.to(u.km).value, y0.to(u.km).value, "o", mew=0, color=color)
lines.append(l)
# Plot trajectory
(l,) = self.ax.plot(x.to(u.km).value, y.to(u.km).value, "--", color=l.get_color())
lines.append(l)
if label:
# This will apply the label to either the point or the osculating
# orbit depending on the last plotted line, as they share variable
if not self.ax.get_legend():
size = self.ax.figure.get_size_inches() + [8, 0]
self.ax.figure.set_size_inches(size)
orbit.epoch.out_subfmt = "date_hm"
label = "{} ({})".format(orbit.epoch.iso, label)
l.set_label(label)
self.ax.legend(bbox_to_anchor=(1.05, 1), title="Names and epochs")
self.ax.set_xlabel("$x$ (km)")
self.ax.set_ylabel("$y$ (km)")
self.ax.set_aspect(1)
return lines
|
def plot(self, orbit, label=None, color=None):
"""Plots state and osculating orbit in their plane."""
if not self._frame:
self.set_frame(*orbit.pqw())
if (orbit, label) not in self._orbits:
self._orbits.append((orbit, label))
# if new attractor radius is smaller, plot it
new_radius = max(orbit.attractor.R.to(u.km).value, orbit.r_p.to(u.km).value / 6)
if not self._attractor_radius:
self.set_attractor(orbit)
elif new_radius < self._attractor_radius:
self.set_attractor(orbit)
lines = []
_, positions = orbit.sample(self.num_points)
rr = positions.get_xyz().transpose()
# Project on OrbitPlotter frame
# x_vec, y_vec, z_vec = self._frame
rr_proj = rr - rr.dot(self._frame[2])[:, None] * self._frame[2]
x = rr_proj.dot(self._frame[0])
y = rr_proj.dot(self._frame[1])
# Plot current position
(l,) = self.ax.plot(
x[0].to(u.km).value, y[0].to(u.km).value, "o", mew=0, color=color
)
lines.append(l)
(l,) = self.ax.plot(x.to(u.km).value, y.to(u.km).value, "--", color=l.get_color())
lines.append(l)
if label:
# This will apply the label to either the point or the osculating
# orbit depending on the last plotted line, as they share variable
if not self.ax.get_legend():
size = self.ax.figure.get_size_inches() + [8, 0]
self.ax.figure.set_size_inches(size)
orbit.epoch.out_subfmt = "date_hm"
label = "{} ({})".format(orbit.epoch.iso, label)
l.set_label(label)
self.ax.legend(bbox_to_anchor=(1.05, 1), title="Names and epochs")
self.ax.set_xlabel("$x$ (km)")
self.ax.set_ylabel("$y$ (km)")
self.ax.set_aspect(1)
return lines
|
https://github.com/poliastro/poliastro/issues/326
|
/usr/local/lib/python3.6/site-packages/astropy/units/quantity.py:639: RuntimeWarning:
invalid value encountered in log
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in __add__(self, other)
1369 try:
-> 1370 other = TimeDelta(other)
1371 except Exception:
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in __init__(self, val, val2, format, scale, copy)
1539
-> 1540 self._init_from_vals(val, val2, format, scale, copy)
1541
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in _init_from_vals(self, val, val2, format, scale, copy, precision, in_subfmt, out_subfmt)
329 self._time = self._get_time_fmt(val, val2, format, scale,
--> 330 precision, in_subfmt, out_subfmt)
331 self._format = self._time.name
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in _get_time_fmt(self, val, val2, format, scale, precision, in_subfmt, out_subfmt)
373 else:
--> 374 raise ValueError('Input values did not match {0}'.format(err_msg))
375
ValueError: Input values did not match the format class jd
During handling of the above exception, another exception occurred:
OperandTypeError Traceback (most recent call last)
<ipython-input-5-f1d654cf6abc> in <module>()
2
3 op.plot(parking)
----> 4 op.plot(exit)
5
6 plt.xlim(-8000, 8000)
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/plotting.py in plot(self, orbit, label, color)
152 lines = []
153
--> 154 _, positions = orbit.sample(self.num_points)
155 rr = positions.get_xyz().transpose()
156
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/twobody/orbit.py in sample(self, values, function)
308 nu_values = np.insert(nu_values, 0, self.ecc)
309
--> 310 return self.sample(nu_values, function)
311
312 elif hasattr(values, "unit") and values.unit in ('rad', 'deg'):
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/twobody/orbit.py in sample(self, values, function)
311
312 elif hasattr(values, "unit") and values.unit in ('rad', 'deg'):
--> 313 values = self._generate_time_values(values)
314 return (values, self._sample(values, function))
315
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/twobody/orbit.py in _generate_time_values(self, nu_vals)
324 def _generate_time_values(self, nu_vals):
325 M_vals = nu_to_M(nu_vals, self.ecc)
--> 326 time_values = self.epoch + (M_vals / self.n).decompose()
327 return time_values
328
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in __add__(self, other)
1370 other = TimeDelta(other)
1371 except Exception:
-> 1372 raise OperandTypeError(self, other, '+')
1373
1374 # Tdelta + something is dealt with in TimeDelta, so we have
OperandTypeError: Unsupported operand type(s) for +: 'Time' and 'Quantity'
|
ValueError
|
def sample(self, values=None, method=mean_motion):
"""Samples an orbit to some specified time values.
.. versionadded:: 0.8.0
Parameters
----------
values : Multiple options
Number of interval points (default to 100),
True anomaly values,
Time values.
Returns
-------
(Time, CartesianRepresentation)
A tuple containing Time and Position vector in each
given value.
Notes
-----
When specifying a number of points, the initial and final
position is present twice inside the result (first and
last row). This is more useful for plotting.
Examples
--------
>>> from astropy import units as u
>>> from poliastro.examples import iss
>>> iss.sample()
>>> iss.sample(10)
>>> iss.sample([0, 180] * u.deg)
>>> iss.sample([0, 10, 20] * u.minute)
>>> iss.sample([iss.epoch + iss.period / 2])
"""
if values is None:
return self.sample(100, method)
elif isinstance(values, int):
if self.ecc < 1:
# first sample eccentric anomaly, then transform into true anomaly
# why sampling eccentric anomaly uniformly to minimize error in the apocenter, see
# http://www.dtic.mil/dtic/tr/fulltext/u2/a605040.pdf
# Start from pericenter
E_values = np.linspace(0, 2 * np.pi, values) * u.rad
nu_values = E_to_nu(E_values, self.ecc)
else:
# Select a sensible limiting value for non-closed orbits
# This corresponds to max(r = 3p, r = self.r)
# We have to wrap nu in [-180, 180) to compare it with the output of
# the arc cosine, which is in the range [0, 180)
# Start from -nu_limit
wrapped_nu = self.nu if self.nu < 180 * u.deg else self.nu - 360 * u.deg
nu_limit = max(np.arccos(-(1 - 1 / 3.0) / self.ecc), wrapped_nu)
nu_values = np.linspace(-nu_limit, nu_limit, values)
return self.sample(nu_values, method)
elif hasattr(values, "unit") and values.unit in ("rad", "deg"):
values = self._generate_time_values(values)
return values, self._sample(values, method)
|
def sample(self, values=None, method=mean_motion):
"""Samples an orbit to some specified time values.
.. versionadded:: 0.8.0
Parameters
----------
values : Multiple options
Number of interval points (default to 100),
True anomaly values,
Time values.
Returns
-------
(Time, CartesianRepresentation)
A tuple containing Time and Position vector in each
given value.
Notes
-----
When specifying a number of points, the initial and final
position is present twice inside the result (first and
last row). This is more useful for plotting.
Examples
--------
>>> from astropy import units as u
>>> from poliastro.examples import iss
>>> iss.sample()
>>> iss.sample(10)
>>> iss.sample([0, 180] * u.deg)
>>> iss.sample([0, 10, 20] * u.minute)
>>> iss.sample([iss.epoch + iss.period / 2])
"""
if values is None:
return self.sample(100, method)
elif isinstance(values, int):
if self.ecc < 1:
# first sample eccentric anomaly, then transform into true anomaly
# why sampling eccentric anomaly uniformly to minimize error in the apocenter, see
# http://www.dtic.mil/dtic/tr/fulltext/u2/a605040.pdf
E_values = np.linspace(0, 2 * np.pi, values) * u.rad
nu_values = E_to_nu(E_values, self.ecc)
else:
# Select a sensible limiting value for non-closed orbits
# This corresponds to r = 3p
nu_limit = np.arccos(-(1 - 1 / 3.0) / self.ecc)
nu_values = np.linspace(-nu_limit, nu_limit, values)
nu_values = np.insert(nu_values, 0, self.ecc)
return self.sample(nu_values, method)
elif hasattr(values, "unit") and values.unit in ("rad", "deg"):
values = self._generate_time_values(values)
return (values, self._sample(values, method))
|
https://github.com/poliastro/poliastro/issues/326
|
/usr/local/lib/python3.6/site-packages/astropy/units/quantity.py:639: RuntimeWarning:
invalid value encountered in log
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in __add__(self, other)
1369 try:
-> 1370 other = TimeDelta(other)
1371 except Exception:
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in __init__(self, val, val2, format, scale, copy)
1539
-> 1540 self._init_from_vals(val, val2, format, scale, copy)
1541
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in _init_from_vals(self, val, val2, format, scale, copy, precision, in_subfmt, out_subfmt)
329 self._time = self._get_time_fmt(val, val2, format, scale,
--> 330 precision, in_subfmt, out_subfmt)
331 self._format = self._time.name
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in _get_time_fmt(self, val, val2, format, scale, precision, in_subfmt, out_subfmt)
373 else:
--> 374 raise ValueError('Input values did not match {0}'.format(err_msg))
375
ValueError: Input values did not match the format class jd
During handling of the above exception, another exception occurred:
OperandTypeError Traceback (most recent call last)
<ipython-input-5-f1d654cf6abc> in <module>()
2
3 op.plot(parking)
----> 4 op.plot(exit)
5
6 plt.xlim(-8000, 8000)
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/plotting.py in plot(self, orbit, label, color)
152 lines = []
153
--> 154 _, positions = orbit.sample(self.num_points)
155 rr = positions.get_xyz().transpose()
156
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/twobody/orbit.py in sample(self, values, function)
308 nu_values = np.insert(nu_values, 0, self.ecc)
309
--> 310 return self.sample(nu_values, function)
311
312 elif hasattr(values, "unit") and values.unit in ('rad', 'deg'):
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/twobody/orbit.py in sample(self, values, function)
311
312 elif hasattr(values, "unit") and values.unit in ('rad', 'deg'):
--> 313 values = self._generate_time_values(values)
314 return (values, self._sample(values, function))
315
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/twobody/orbit.py in _generate_time_values(self, nu_vals)
324 def _generate_time_values(self, nu_vals):
325 M_vals = nu_to_M(nu_vals, self.ecc)
--> 326 time_values = self.epoch + (M_vals / self.n).decompose()
327 return time_values
328
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in __add__(self, other)
1370 other = TimeDelta(other)
1371 except Exception:
-> 1372 raise OperandTypeError(self, other, '+')
1373
1374 # Tdelta + something is dealt with in TimeDelta, so we have
OperandTypeError: Unsupported operand type(s) for +: 'Time' and 'Quantity'
|
ValueError
|
def _generate_time_values(self, nu_vals):
# Subtract current anomaly to start from the desired point
M_vals = nu_to_M(nu_vals, self.ecc) - nu_to_M(self.nu, self.ecc)
time_values = self.epoch + (M_vals / self.n).decompose()
return time_values
|
def _generate_time_values(self, nu_vals):
M_vals = nu_to_M(nu_vals, self.ecc)
time_values = self.epoch + (M_vals / self.n).decompose()
return time_values
|
https://github.com/poliastro/poliastro/issues/326
|
/usr/local/lib/python3.6/site-packages/astropy/units/quantity.py:639: RuntimeWarning:
invalid value encountered in log
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in __add__(self, other)
1369 try:
-> 1370 other = TimeDelta(other)
1371 except Exception:
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in __init__(self, val, val2, format, scale, copy)
1539
-> 1540 self._init_from_vals(val, val2, format, scale, copy)
1541
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in _init_from_vals(self, val, val2, format, scale, copy, precision, in_subfmt, out_subfmt)
329 self._time = self._get_time_fmt(val, val2, format, scale,
--> 330 precision, in_subfmt, out_subfmt)
331 self._format = self._time.name
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in _get_time_fmt(self, val, val2, format, scale, precision, in_subfmt, out_subfmt)
373 else:
--> 374 raise ValueError('Input values did not match {0}'.format(err_msg))
375
ValueError: Input values did not match the format class jd
During handling of the above exception, another exception occurred:
OperandTypeError Traceback (most recent call last)
<ipython-input-5-f1d654cf6abc> in <module>()
2
3 op.plot(parking)
----> 4 op.plot(exit)
5
6 plt.xlim(-8000, 8000)
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/plotting.py in plot(self, orbit, label, color)
152 lines = []
153
--> 154 _, positions = orbit.sample(self.num_points)
155 rr = positions.get_xyz().transpose()
156
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/twobody/orbit.py in sample(self, values, function)
308 nu_values = np.insert(nu_values, 0, self.ecc)
309
--> 310 return self.sample(nu_values, function)
311
312 elif hasattr(values, "unit") and values.unit in ('rad', 'deg'):
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/twobody/orbit.py in sample(self, values, function)
311
312 elif hasattr(values, "unit") and values.unit in ('rad', 'deg'):
--> 313 values = self._generate_time_values(values)
314 return (values, self._sample(values, function))
315
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/twobody/orbit.py in _generate_time_values(self, nu_vals)
324 def _generate_time_values(self, nu_vals):
325 M_vals = nu_to_M(nu_vals, self.ecc)
--> 326 time_values = self.epoch + (M_vals / self.n).decompose()
327 return time_values
328
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in __add__(self, other)
1370 other = TimeDelta(other)
1371 except Exception:
-> 1372 raise OperandTypeError(self, other, '+')
1373
1374 # Tdelta + something is dealt with in TimeDelta, so we have
OperandTypeError: Unsupported operand type(s) for +: 'Time' and 'Quantity'
|
ValueError
|
def get_param_decl(param):
def to_string(node):
"""Convert Doxygen node content to a string."""
result = []
if node is not None:
for p in node.content_:
value = p.value
if not isinstance(value, six.text_type):
value = value.valueOf_
result.append(value)
return " ".join(result)
param_type = to_string(param.type_)
param_name = param.declname if param.declname else param.defname
if not param_name:
param_decl = param_type
else:
param_decl, number_of_subs = re.subn(
r"(\([*&]+)(\))", r"\g<1>" + param_name + r"\g<2>", param_type
)
if number_of_subs == 0:
param_decl = param_type + " " + param_name
if param.array:
param_decl += param.array
if param.defval:
param_decl += " = " + to_string(param.defval)
return param_decl
|
def get_param_decl(param):
def to_string(node):
"""Convert Doxygen node content to a string."""
result = []
for p in node.content_:
value = p.value
if not isinstance(value, six.text_type):
value = value.valueOf_
result.append(value)
return " ".join(result)
param_type = to_string(param.type_)
param_name = param.declname if param.declname else param.defname
if not param_name:
param_decl = param_type
else:
param_decl, number_of_subs = re.subn(
r"(\([*&]+)(\))", r"\g<1>" + param_name + r"\g<2>", param_type
)
if number_of_subs == 0:
param_decl = param_type + " " + param_name
if param.array:
param_decl += param.array
if param.defval:
param_decl += " = " + to_string(param.defval)
return param_decl
|
https://github.com/michaeljones/breathe/issues/402
|
# Sphinx version: 1.8.2
# Python version: 2.7.12 (CPython)
# Docutils version: 0.14
# Jinja2 version: 2.10
# Last messages:
# Running Sphinx v1.8.2
# making output directory...
# building [mo]: targets for 0 po files that are out of date
# building [html]: targets for 2 source files that are out of date
# updating environment:
# 2 added, 0 changed, 0 removed
# reading sources... [ 50%] doxydoc
# Loaded extensions:
# sphinx.ext.mathjax (1.8.2) from /home/stbr6072/.local/lib/python2.7/site-packages/sphinx/ext/mathjax.pyc
# alabaster (0.7.12) from /home/stbr6072/.local/lib/python2.7/site-packages/alabaster/__init__.pyc
# breathe (4.11.0) from /usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/__init__.pyc
Traceback (most recent call last):
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/cmd/build.py", line 304, in build_main
app.build(args.force_all, filenames)
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/application.py", line 341, in build
self.builder.build_update()
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/builders/__init__.py", line 347, in build_update
len(to_build))
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/builders/__init__.py", line 360, in build
updated_docnames = set(self.read())
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/builders/__init__.py", line 468, in read
self._read_serial(docnames)
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/builders/__init__.py", line 490, in _read_serial
self.read_doc(docname)
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/builders/__init__.py", line 534, in read_doc
doctree = read_doc(self.app, self.env, self.env.doc2path(docname))
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/io.py", line 318, in read_doc
pub.publish()
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/core.py", line 217, in publish
self.settings)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/readers/__init__.py", line 72, in read
self.parse()
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/readers/__init__.py", line 78, in parse
self.parser.parse(self.input, document)
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/parsers.py", line 88, in parse
self.statemachine.run(inputstring, document, inliner=self.inliner)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 171, in run
input_source=document['source'])
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/statemachine.py", line 239, in run
context, state, transitions)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/statemachine.py", line 460, in check_line
return method(match, context, next_state)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 2753, in underline
self.section(title, source, style, lineno - 1, messages)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 327, in section
self.new_subsection(title, lineno, messages)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 395, in new_subsection
node=section_node, match_titles=True)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 282, in nested_parse
node=node, match_titles=match_titles)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 196, in run
results = StateMachineWS.run(self, input_lines, input_offset)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/statemachine.py", line 239, in run
context, state, transitions)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/statemachine.py", line 460, in check_line
return method(match, context, next_state)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 2328, in explicit_markup
self.explicit_list(blank_finish)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 2358, in explicit_list
match_titles=self.state_machine.match_titles)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 319, in nested_list_parse
node=node, match_titles=match_titles)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 196, in run
results = StateMachineWS.run(self, input_lines, input_offset)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/statemachine.py", line 239, in run
context, state, transitions)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/statemachine.py", line 460, in check_line
return method(match, context, next_state)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 2631, in explicit_markup
nodelist, blank_finish = self.explicit_construct(match)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 2338, in explicit_construct
return method(self, expmatch)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 2081, in directive
directive_class, match, type_name, option_presets)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 2130, in run_directive
result = directive_instance.run()
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/directive/index.py", line 90, in run
return self.handle_contents(project_info)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/directive/index.py", line 59, in handle_contents
node_list = object_renderer.render(context.node_stack[0], context)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1305, in render
result = method(self, node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 455, in visit_doxygen
nodelist.extend(self.render(compound))
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1305, in render
result = method(self, node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1235, in dispatch_compound
return self.visit_file(node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 540, in visit_file
return self.visit_compound(node, render_signature=render_signature)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 469, in visit_compound
rendered_data = self.render(file_data, parent_context)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1305, in render
result = method(self, node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 460, in visit_doxygendef
return self.render(node.compounddef)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1305, in render
result = method(self, node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 611, in visit_compounddef
child_nodes = self.render(sectiondef)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1305, in render
result = method(self, node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 645, in visit_sectiondef
node_list.extend(self.render_iterable(node.memberdef))
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1315, in render_iterable
output.extend(self.render(entry))
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1305, in render
result = method(self, node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1242, in dispatch_memberdef
return self.visit_function(node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 985, in visit_function
param_decl = get_param_decl(param)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 181, in get_param_decl
param_type = to_string(param.type_)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 174, in to_string
for p in node.content_:
AttributeError: 'NoneType' object has no attribute 'content_'
|
AttributeError
|
def to_string(node):
"""Convert Doxygen node content to a string."""
result = []
if node is not None:
for p in node.content_:
value = p.value
if not isinstance(value, six.text_type):
value = value.valueOf_
result.append(value)
return " ".join(result)
|
def to_string(node):
"""Convert Doxygen node content to a string."""
result = []
for p in node.content_:
value = p.value
if not isinstance(value, six.text_type):
value = value.valueOf_
result.append(value)
return " ".join(result)
|
https://github.com/michaeljones/breathe/issues/402
|
# Sphinx version: 1.8.2
# Python version: 2.7.12 (CPython)
# Docutils version: 0.14
# Jinja2 version: 2.10
# Last messages:
# Running Sphinx v1.8.2
# making output directory...
# building [mo]: targets for 0 po files that are out of date
# building [html]: targets for 2 source files that are out of date
# updating environment:
# 2 added, 0 changed, 0 removed
# reading sources... [ 50%] doxydoc
# Loaded extensions:
# sphinx.ext.mathjax (1.8.2) from /home/stbr6072/.local/lib/python2.7/site-packages/sphinx/ext/mathjax.pyc
# alabaster (0.7.12) from /home/stbr6072/.local/lib/python2.7/site-packages/alabaster/__init__.pyc
# breathe (4.11.0) from /usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/__init__.pyc
Traceback (most recent call last):
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/cmd/build.py", line 304, in build_main
app.build(args.force_all, filenames)
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/application.py", line 341, in build
self.builder.build_update()
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/builders/__init__.py", line 347, in build_update
len(to_build))
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/builders/__init__.py", line 360, in build
updated_docnames = set(self.read())
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/builders/__init__.py", line 468, in read
self._read_serial(docnames)
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/builders/__init__.py", line 490, in _read_serial
self.read_doc(docname)
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/builders/__init__.py", line 534, in read_doc
doctree = read_doc(self.app, self.env, self.env.doc2path(docname))
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/io.py", line 318, in read_doc
pub.publish()
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/core.py", line 217, in publish
self.settings)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/readers/__init__.py", line 72, in read
self.parse()
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/readers/__init__.py", line 78, in parse
self.parser.parse(self.input, document)
File "/home/stbr6072/.local/lib/python2.7/site-packages/sphinx/parsers.py", line 88, in parse
self.statemachine.run(inputstring, document, inliner=self.inliner)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 171, in run
input_source=document['source'])
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/statemachine.py", line 239, in run
context, state, transitions)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/statemachine.py", line 460, in check_line
return method(match, context, next_state)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 2753, in underline
self.section(title, source, style, lineno - 1, messages)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 327, in section
self.new_subsection(title, lineno, messages)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 395, in new_subsection
node=section_node, match_titles=True)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 282, in nested_parse
node=node, match_titles=match_titles)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 196, in run
results = StateMachineWS.run(self, input_lines, input_offset)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/statemachine.py", line 239, in run
context, state, transitions)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/statemachine.py", line 460, in check_line
return method(match, context, next_state)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 2328, in explicit_markup
self.explicit_list(blank_finish)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 2358, in explicit_list
match_titles=self.state_machine.match_titles)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 319, in nested_list_parse
node=node, match_titles=match_titles)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 196, in run
results = StateMachineWS.run(self, input_lines, input_offset)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/statemachine.py", line 239, in run
context, state, transitions)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/statemachine.py", line 460, in check_line
return method(match, context, next_state)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 2631, in explicit_markup
nodelist, blank_finish = self.explicit_construct(match)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 2338, in explicit_construct
return method(self, expmatch)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 2081, in directive
directive_class, match, type_name, option_presets)
File "/home/stbr6072/.local/lib/python2.7/site-packages/docutils/parsers/rst/states.py", line 2130, in run_directive
result = directive_instance.run()
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/directive/index.py", line 90, in run
return self.handle_contents(project_info)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/directive/index.py", line 59, in handle_contents
node_list = object_renderer.render(context.node_stack[0], context)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1305, in render
result = method(self, node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 455, in visit_doxygen
nodelist.extend(self.render(compound))
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1305, in render
result = method(self, node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1235, in dispatch_compound
return self.visit_file(node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 540, in visit_file
return self.visit_compound(node, render_signature=render_signature)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 469, in visit_compound
rendered_data = self.render(file_data, parent_context)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1305, in render
result = method(self, node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 460, in visit_doxygendef
return self.render(node.compounddef)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1305, in render
result = method(self, node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 611, in visit_compounddef
child_nodes = self.render(sectiondef)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1305, in render
result = method(self, node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 645, in visit_sectiondef
node_list.extend(self.render_iterable(node.memberdef))
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1315, in render_iterable
output.extend(self.render(entry))
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1305, in render
result = method(self, node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 1242, in dispatch_memberdef
return self.visit_function(node)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 985, in visit_function
param_decl = get_param_decl(param)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 181, in get_param_decl
param_type = to_string(param.type_)
File "/usr/local/lib/python2.7/dist-packages/breathe-4.11.0-py2.7.egg/breathe/renderer/sphinxrenderer.py", line 174, in to_string
for p in node.content_:
AttributeError: 'NoneType' object has no attribute 'content_'
|
AttributeError
|
def _wrap_init_error(init_error):
# type: (F) -> F
def sentry_init_error(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(AwsLambdaIntegration)
if integration is None:
return init_error(*args, **kwargs)
# If an integration is there, a client has to be there.
client = hub.client # type: Any
with capture_internal_exceptions():
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
exc_info = sys.exc_info()
if exc_info and all(exc_info):
sentry_event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "aws_lambda", "handled": False},
)
hub.capture_event(sentry_event, hint=hint)
return init_error(*args, **kwargs)
return sentry_init_error # type: ignore
|
def _wrap_init_error(init_error):
# type: (F) -> F
def sentry_init_error(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(AwsLambdaIntegration)
if integration is None:
return init_error(*args, **kwargs)
# If an integration is there, a client has to be there.
client = hub.client # type: Any
with capture_internal_exceptions():
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
exc_info = sys.exc_info()
if exc_info and all(exc_info):
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "aws_lambda", "handled": False},
)
hub.capture_event(event, hint=hint)
return init_error(*args, **kwargs)
return sentry_init_error # type: ignore
|
https://github.com/getsentry/sentry-python/issues/891
|
[ERROR] AttributeError: 'list' object has no attribute 'get'
Traceback (most recent call last):
File "/var/task/sentry_sdk/integrations/aws_lambda.py", line 106, in sentry_handler
headers = event.get("headers",
{}
)
|
AttributeError
|
def sentry_init_error(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(AwsLambdaIntegration)
if integration is None:
return init_error(*args, **kwargs)
# If an integration is there, a client has to be there.
client = hub.client # type: Any
with capture_internal_exceptions():
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
exc_info = sys.exc_info()
if exc_info and all(exc_info):
sentry_event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "aws_lambda", "handled": False},
)
hub.capture_event(sentry_event, hint=hint)
return init_error(*args, **kwargs)
|
def sentry_init_error(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(AwsLambdaIntegration)
if integration is None:
return init_error(*args, **kwargs)
# If an integration is there, a client has to be there.
client = hub.client # type: Any
with capture_internal_exceptions():
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
exc_info = sys.exc_info()
if exc_info and all(exc_info):
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "aws_lambda", "handled": False},
)
hub.capture_event(event, hint=hint)
return init_error(*args, **kwargs)
|
https://github.com/getsentry/sentry-python/issues/891
|
[ERROR] AttributeError: 'list' object has no attribute 'get'
Traceback (most recent call last):
File "/var/task/sentry_sdk/integrations/aws_lambda.py", line 106, in sentry_handler
headers = event.get("headers",
{}
)
|
AttributeError
|
def _wrap_handler(handler):
# type: (F) -> F
def sentry_handler(aws_event, context, *args, **kwargs):
# type: (Any, Any, *Any, **Any) -> Any
# Per https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html,
# `event` here is *likely* a dictionary, but also might be a number of
# other types (str, int, float, None).
#
# In some cases, it is a list (if the user is batch-invoking their
# function, for example), in which case we'll use the first entry as a
# representative from which to try pulling request data. (Presumably it
# will be the same for all events in the list, since they're all hitting
# the lambda in the same request.)
if isinstance(aws_event, list):
request_data = aws_event[0]
batch_size = len(aws_event)
else:
request_data = aws_event
batch_size = 1
if not isinstance(request_data, dict):
# If we're not dealing with a dictionary, we won't be able to get
# headers, path, http method, etc in any case, so it's fine that
# this is empty
request_data = {}
hub = Hub.current
integration = hub.get_integration(AwsLambdaIntegration)
if integration is None:
return handler(aws_event, context, *args, **kwargs)
# If an integration is there, a client has to be there.
client = hub.client # type: Any
configured_time = context.get_remaining_time_in_millis()
with hub.push_scope() as scope:
with capture_internal_exceptions():
scope.clear_breadcrumbs()
scope.add_event_processor(
_make_request_event_processor(
request_data, context, configured_time
)
)
scope.set_tag("aws_region", context.invoked_function_arn.split(":")[3])
if batch_size > 1:
scope.set_tag("batch_request", True)
scope.set_tag("batch_size", batch_size)
timeout_thread = None
# Starting the Timeout thread only if the configured time is greater than Timeout warning
# buffer and timeout_warning parameter is set True.
if (
integration.timeout_warning
and configured_time > TIMEOUT_WARNING_BUFFER
):
waiting_time = (
configured_time - TIMEOUT_WARNING_BUFFER
) / MILLIS_TO_SECONDS
timeout_thread = TimeoutThread(
waiting_time,
configured_time / MILLIS_TO_SECONDS,
)
# Starting the thread to raise timeout warning exception
timeout_thread.start()
headers = request_data.get("headers", {})
transaction = Transaction.continue_from_headers(
headers, op="serverless.function", name=context.function_name
)
with hub.start_transaction(transaction):
try:
return handler(aws_event, context, *args, **kwargs)
except Exception:
exc_info = sys.exc_info()
sentry_event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "aws_lambda", "handled": False},
)
hub.capture_event(sentry_event, hint=hint)
reraise(*exc_info)
finally:
if timeout_thread:
timeout_thread.stop()
return sentry_handler # type: ignore
|
def _wrap_handler(handler):
# type: (F) -> F
def sentry_handler(event, context, *args, **kwargs):
# type: (Any, Any, *Any, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(AwsLambdaIntegration)
if integration is None:
return handler(event, context, *args, **kwargs)
# If an integration is there, a client has to be there.
client = hub.client # type: Any
configured_time = context.get_remaining_time_in_millis()
with hub.push_scope() as scope:
with capture_internal_exceptions():
scope.clear_breadcrumbs()
scope.add_event_processor(
_make_request_event_processor(event, context, configured_time)
)
scope.set_tag("aws_region", context.invoked_function_arn.split(":")[3])
timeout_thread = None
# Starting the Timeout thread only if the configured time is greater than Timeout warning
# buffer and timeout_warning parameter is set True.
if (
integration.timeout_warning
and configured_time > TIMEOUT_WARNING_BUFFER
):
waiting_time = (
configured_time - TIMEOUT_WARNING_BUFFER
) / MILLIS_TO_SECONDS
timeout_thread = TimeoutThread(
waiting_time,
configured_time / MILLIS_TO_SECONDS,
)
# Starting the thread to raise timeout warning exception
timeout_thread.start()
headers = event.get("headers", {})
transaction = Transaction.continue_from_headers(
headers, op="serverless.function", name=context.function_name
)
with hub.start_transaction(transaction):
try:
return handler(event, context, *args, **kwargs)
except Exception:
exc_info = sys.exc_info()
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "aws_lambda", "handled": False},
)
hub.capture_event(event, hint=hint)
reraise(*exc_info)
finally:
if timeout_thread:
timeout_thread.stop()
return sentry_handler # type: ignore
|
https://github.com/getsentry/sentry-python/issues/891
|
[ERROR] AttributeError: 'list' object has no attribute 'get'
Traceback (most recent call last):
File "/var/task/sentry_sdk/integrations/aws_lambda.py", line 106, in sentry_handler
headers = event.get("headers",
{}
)
|
AttributeError
|
def sentry_handler(aws_event, context, *args, **kwargs):
# type: (Any, Any, *Any, **Any) -> Any
# Per https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html,
# `event` here is *likely* a dictionary, but also might be a number of
# other types (str, int, float, None).
#
# In some cases, it is a list (if the user is batch-invoking their
# function, for example), in which case we'll use the first entry as a
# representative from which to try pulling request data. (Presumably it
# will be the same for all events in the list, since they're all hitting
# the lambda in the same request.)
if isinstance(aws_event, list):
request_data = aws_event[0]
batch_size = len(aws_event)
else:
request_data = aws_event
batch_size = 1
if not isinstance(request_data, dict):
# If we're not dealing with a dictionary, we won't be able to get
# headers, path, http method, etc in any case, so it's fine that
# this is empty
request_data = {}
hub = Hub.current
integration = hub.get_integration(AwsLambdaIntegration)
if integration is None:
return handler(aws_event, context, *args, **kwargs)
# If an integration is there, a client has to be there.
client = hub.client # type: Any
configured_time = context.get_remaining_time_in_millis()
with hub.push_scope() as scope:
with capture_internal_exceptions():
scope.clear_breadcrumbs()
scope.add_event_processor(
_make_request_event_processor(request_data, context, configured_time)
)
scope.set_tag("aws_region", context.invoked_function_arn.split(":")[3])
if batch_size > 1:
scope.set_tag("batch_request", True)
scope.set_tag("batch_size", batch_size)
timeout_thread = None
# Starting the Timeout thread only if the configured time is greater than Timeout warning
# buffer and timeout_warning parameter is set True.
if integration.timeout_warning and configured_time > TIMEOUT_WARNING_BUFFER:
waiting_time = (
configured_time - TIMEOUT_WARNING_BUFFER
) / MILLIS_TO_SECONDS
timeout_thread = TimeoutThread(
waiting_time,
configured_time / MILLIS_TO_SECONDS,
)
# Starting the thread to raise timeout warning exception
timeout_thread.start()
headers = request_data.get("headers", {})
transaction = Transaction.continue_from_headers(
headers, op="serverless.function", name=context.function_name
)
with hub.start_transaction(transaction):
try:
return handler(aws_event, context, *args, **kwargs)
except Exception:
exc_info = sys.exc_info()
sentry_event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "aws_lambda", "handled": False},
)
hub.capture_event(sentry_event, hint=hint)
reraise(*exc_info)
finally:
if timeout_thread:
timeout_thread.stop()
|
def sentry_handler(event, context, *args, **kwargs):
# type: (Any, Any, *Any, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(AwsLambdaIntegration)
if integration is None:
return handler(event, context, *args, **kwargs)
# If an integration is there, a client has to be there.
client = hub.client # type: Any
configured_time = context.get_remaining_time_in_millis()
with hub.push_scope() as scope:
with capture_internal_exceptions():
scope.clear_breadcrumbs()
scope.add_event_processor(
_make_request_event_processor(event, context, configured_time)
)
scope.set_tag("aws_region", context.invoked_function_arn.split(":")[3])
timeout_thread = None
# Starting the Timeout thread only if the configured time is greater than Timeout warning
# buffer and timeout_warning parameter is set True.
if integration.timeout_warning and configured_time > TIMEOUT_WARNING_BUFFER:
waiting_time = (
configured_time - TIMEOUT_WARNING_BUFFER
) / MILLIS_TO_SECONDS
timeout_thread = TimeoutThread(
waiting_time,
configured_time / MILLIS_TO_SECONDS,
)
# Starting the thread to raise timeout warning exception
timeout_thread.start()
headers = event.get("headers", {})
transaction = Transaction.continue_from_headers(
headers, op="serverless.function", name=context.function_name
)
with hub.start_transaction(transaction):
try:
return handler(event, context, *args, **kwargs)
except Exception:
exc_info = sys.exc_info()
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "aws_lambda", "handled": False},
)
hub.capture_event(event, hint=hint)
reraise(*exc_info)
finally:
if timeout_thread:
timeout_thread.stop()
|
https://github.com/getsentry/sentry-python/issues/891
|
[ERROR] AttributeError: 'list' object has no attribute 'get'
Traceback (most recent call last):
File "/var/task/sentry_sdk/integrations/aws_lambda.py", line 106, in sentry_handler
headers = event.get("headers",
{}
)
|
AttributeError
|
def _make_request_event_processor(aws_event, aws_context, configured_timeout):
# type: (Any, Any, Any) -> EventProcessor
start_time = datetime.utcnow()
def event_processor(sentry_event, hint, start_time=start_time):
# type: (Event, Hint, datetime) -> Optional[Event]
remaining_time_in_milis = aws_context.get_remaining_time_in_millis()
exec_duration = configured_timeout - remaining_time_in_milis
extra = sentry_event.setdefault("extra", {})
extra["lambda"] = {
"function_name": aws_context.function_name,
"function_version": aws_context.function_version,
"invoked_function_arn": aws_context.invoked_function_arn,
"aws_request_id": aws_context.aws_request_id,
"execution_duration_in_millis": exec_duration,
"remaining_time_in_millis": remaining_time_in_milis,
}
extra["cloudwatch logs"] = {
"url": _get_cloudwatch_logs_url(aws_context, start_time),
"log_group": aws_context.log_group_name,
"log_stream": aws_context.log_stream_name,
}
request = sentry_event.get("request", {})
if "httpMethod" in aws_event:
request["method"] = aws_event["httpMethod"]
request["url"] = _get_url(aws_event, aws_context)
if "queryStringParameters" in aws_event:
request["query_string"] = aws_event["queryStringParameters"]
if "headers" in aws_event:
request["headers"] = _filter_headers(aws_event["headers"])
if _should_send_default_pii():
user_info = sentry_event.setdefault("user", {})
id = aws_event.get("identity", {}).get("userArn")
if id is not None:
user_info.setdefault("id", id)
ip = aws_event.get("identity", {}).get("sourceIp")
if ip is not None:
user_info.setdefault("ip_address", ip)
if "body" in aws_event:
request["data"] = aws_event.get("body", "")
else:
if aws_event.get("body", None):
# Unfortunately couldn't find a way to get structured body from AWS
# event. Meaning every body is unstructured to us.
request["data"] = AnnotatedValue("", {"rem": [["!raw", "x", 0, 0]]})
sentry_event["request"] = request
return sentry_event
return event_processor
|
def _make_request_event_processor(aws_event, aws_context, configured_timeout):
# type: (Any, Any, Any) -> EventProcessor
start_time = datetime.utcnow()
def event_processor(event, hint, start_time=start_time):
# type: (Event, Hint, datetime) -> Optional[Event]
remaining_time_in_milis = aws_context.get_remaining_time_in_millis()
exec_duration = configured_timeout - remaining_time_in_milis
extra = event.setdefault("extra", {})
extra["lambda"] = {
"function_name": aws_context.function_name,
"function_version": aws_context.function_version,
"invoked_function_arn": aws_context.invoked_function_arn,
"aws_request_id": aws_context.aws_request_id,
"execution_duration_in_millis": exec_duration,
"remaining_time_in_millis": remaining_time_in_milis,
}
extra["cloudwatch logs"] = {
"url": _get_cloudwatch_logs_url(aws_context, start_time),
"log_group": aws_context.log_group_name,
"log_stream": aws_context.log_stream_name,
}
request = event.get("request", {})
if "httpMethod" in aws_event:
request["method"] = aws_event["httpMethod"]
request["url"] = _get_url(aws_event, aws_context)
if "queryStringParameters" in aws_event:
request["query_string"] = aws_event["queryStringParameters"]
if "headers" in aws_event:
request["headers"] = _filter_headers(aws_event["headers"])
if _should_send_default_pii():
user_info = event.setdefault("user", {})
id = aws_event.get("identity", {}).get("userArn")
if id is not None:
user_info.setdefault("id", id)
ip = aws_event.get("identity", {}).get("sourceIp")
if ip is not None:
user_info.setdefault("ip_address", ip)
if "body" in aws_event:
request["data"] = aws_event.get("body", "")
else:
if aws_event.get("body", None):
# Unfortunately couldn't find a way to get structured body from AWS
# event. Meaning every body is unstructured to us.
request["data"] = AnnotatedValue("", {"rem": [["!raw", "x", 0, 0]]})
event["request"] = request
return event
return event_processor
|
https://github.com/getsentry/sentry-python/issues/891
|
[ERROR] AttributeError: 'list' object has no attribute 'get'
Traceback (most recent call last):
File "/var/task/sentry_sdk/integrations/aws_lambda.py", line 106, in sentry_handler
headers = event.get("headers",
{}
)
|
AttributeError
|
def event_processor(sentry_event, hint, start_time=start_time):
# type: (Event, Hint, datetime) -> Optional[Event]
remaining_time_in_milis = aws_context.get_remaining_time_in_millis()
exec_duration = configured_timeout - remaining_time_in_milis
extra = sentry_event.setdefault("extra", {})
extra["lambda"] = {
"function_name": aws_context.function_name,
"function_version": aws_context.function_version,
"invoked_function_arn": aws_context.invoked_function_arn,
"aws_request_id": aws_context.aws_request_id,
"execution_duration_in_millis": exec_duration,
"remaining_time_in_millis": remaining_time_in_milis,
}
extra["cloudwatch logs"] = {
"url": _get_cloudwatch_logs_url(aws_context, start_time),
"log_group": aws_context.log_group_name,
"log_stream": aws_context.log_stream_name,
}
request = sentry_event.get("request", {})
if "httpMethod" in aws_event:
request["method"] = aws_event["httpMethod"]
request["url"] = _get_url(aws_event, aws_context)
if "queryStringParameters" in aws_event:
request["query_string"] = aws_event["queryStringParameters"]
if "headers" in aws_event:
request["headers"] = _filter_headers(aws_event["headers"])
if _should_send_default_pii():
user_info = sentry_event.setdefault("user", {})
id = aws_event.get("identity", {}).get("userArn")
if id is not None:
user_info.setdefault("id", id)
ip = aws_event.get("identity", {}).get("sourceIp")
if ip is not None:
user_info.setdefault("ip_address", ip)
if "body" in aws_event:
request["data"] = aws_event.get("body", "")
else:
if aws_event.get("body", None):
# Unfortunately couldn't find a way to get structured body from AWS
# event. Meaning every body is unstructured to us.
request["data"] = AnnotatedValue("", {"rem": [["!raw", "x", 0, 0]]})
sentry_event["request"] = request
return sentry_event
|
def event_processor(event, hint, start_time=start_time):
# type: (Event, Hint, datetime) -> Optional[Event]
remaining_time_in_milis = aws_context.get_remaining_time_in_millis()
exec_duration = configured_timeout - remaining_time_in_milis
extra = event.setdefault("extra", {})
extra["lambda"] = {
"function_name": aws_context.function_name,
"function_version": aws_context.function_version,
"invoked_function_arn": aws_context.invoked_function_arn,
"aws_request_id": aws_context.aws_request_id,
"execution_duration_in_millis": exec_duration,
"remaining_time_in_millis": remaining_time_in_milis,
}
extra["cloudwatch logs"] = {
"url": _get_cloudwatch_logs_url(aws_context, start_time),
"log_group": aws_context.log_group_name,
"log_stream": aws_context.log_stream_name,
}
request = event.get("request", {})
if "httpMethod" in aws_event:
request["method"] = aws_event["httpMethod"]
request["url"] = _get_url(aws_event, aws_context)
if "queryStringParameters" in aws_event:
request["query_string"] = aws_event["queryStringParameters"]
if "headers" in aws_event:
request["headers"] = _filter_headers(aws_event["headers"])
if _should_send_default_pii():
user_info = event.setdefault("user", {})
id = aws_event.get("identity", {}).get("userArn")
if id is not None:
user_info.setdefault("id", id)
ip = aws_event.get("identity", {}).get("sourceIp")
if ip is not None:
user_info.setdefault("ip_address", ip)
if "body" in aws_event:
request["data"] = aws_event.get("body", "")
else:
if aws_event.get("body", None):
# Unfortunately couldn't find a way to get structured body from AWS
# event. Meaning every body is unstructured to us.
request["data"] = AnnotatedValue("", {"rem": [["!raw", "x", 0, 0]]})
event["request"] = request
return event
|
https://github.com/getsentry/sentry-python/issues/891
|
[ERROR] AttributeError: 'list' object has no attribute 'get'
Traceback (most recent call last):
File "/var/task/sentry_sdk/integrations/aws_lambda.py", line 106, in sentry_handler
headers = event.get("headers",
{}
)
|
AttributeError
|
def _get_url(aws_event, aws_context):
# type: (Any, Any) -> str
path = aws_event.get("path", None)
headers = aws_event.get("headers", {})
host = headers.get("Host", None)
proto = headers.get("X-Forwarded-Proto", None)
if proto and host and path:
return "{}://{}{}".format(proto, host, path)
return "awslambda:///{}".format(aws_context.function_name)
|
def _get_url(event, context):
# type: (Any, Any) -> str
path = event.get("path", None)
headers = event.get("headers", {})
host = headers.get("Host", None)
proto = headers.get("X-Forwarded-Proto", None)
if proto and host and path:
return "{}://{}{}".format(proto, host, path)
return "awslambda:///{}".format(context.function_name)
|
https://github.com/getsentry/sentry-python/issues/891
|
[ERROR] AttributeError: 'list' object has no attribute 'get'
Traceback (most recent call last):
File "/var/task/sentry_sdk/integrations/aws_lambda.py", line 106, in sentry_handler
headers = event.get("headers",
{}
)
|
AttributeError
|
def _get_cloudwatch_logs_url(aws_context, start_time):
# type: (Any, datetime) -> str
"""
Generates a CloudWatchLogs console URL based on the context object
Arguments:
aws_context {Any} -- context from lambda handler
Returns:
str -- AWS Console URL to logs.
"""
formatstring = "%Y-%m-%dT%H:%M:%SZ"
url = (
"https://console.aws.amazon.com/cloudwatch/home?region={region}"
"#logEventViewer:group={log_group};stream={log_stream}"
";start={start_time};end={end_time}"
).format(
region=environ.get("AWS_REGION"),
log_group=aws_context.log_group_name,
log_stream=aws_context.log_stream_name,
start_time=(start_time - timedelta(seconds=1)).strftime(formatstring),
end_time=(datetime.utcnow() + timedelta(seconds=2)).strftime(formatstring),
)
return url
|
def _get_cloudwatch_logs_url(context, start_time):
# type: (Any, datetime) -> str
"""
Generates a CloudWatchLogs console URL based on the context object
Arguments:
context {Any} -- context from lambda handler
Returns:
str -- AWS Console URL to logs.
"""
formatstring = "%Y-%m-%dT%H:%M:%SZ"
url = (
"https://console.aws.amazon.com/cloudwatch/home?region={region}"
"#logEventViewer:group={log_group};stream={log_stream}"
";start={start_time};end={end_time}"
).format(
region=environ.get("AWS_REGION"),
log_group=context.log_group_name,
log_stream=context.log_stream_name,
start_time=(start_time - timedelta(seconds=1)).strftime(formatstring),
end_time=(datetime.utcnow() + timedelta(seconds=2)).strftime(formatstring),
)
return url
|
https://github.com/getsentry/sentry-python/issues/891
|
[ERROR] AttributeError: 'list' object has no attribute 'get'
Traceback (most recent call last):
File "/var/task/sentry_sdk/integrations/aws_lambda.py", line 106, in sentry_handler
headers = event.get("headers",
{}
)
|
AttributeError
|
def patch_channels_asgi_handler_impl(cls):
# type: (Any) -> None
import channels # type: ignore
from sentry_sdk.integrations.django import DjangoIntegration
if channels.__version__ < "3.0.0":
old_app = cls.__call__
async def sentry_patched_asgi_handler(self, receive, send):
# type: (Any, Any, Any) -> Any
if Hub.current.get_integration(DjangoIntegration) is None:
return await old_app(self, receive, send)
middleware = SentryAsgiMiddleware(
lambda _scope: old_app.__get__(self, cls), unsafe_context_data=True
)
return await middleware(self.scope)(receive, send)
cls.__call__ = sentry_patched_asgi_handler
else:
# The ASGI handler in Channels >= 3 has the same signature as
# the Django handler.
patch_django_asgi_handler_impl(cls)
|
def patch_channels_asgi_handler_impl(cls):
# type: (Any) -> None
from sentry_sdk.integrations.django import DjangoIntegration
old_app = cls.__call__
async def sentry_patched_asgi_handler(self, receive, send):
# type: (Any, Any, Any) -> Any
if Hub.current.get_integration(DjangoIntegration) is None:
return await old_app(self, receive, send)
middleware = SentryAsgiMiddleware(
lambda _scope: old_app.__get__(self, cls), unsafe_context_data=True
)
return await middleware(self.scope)(receive, send)
cls.__call__ = sentry_patched_asgi_handler
|
https://github.com/getsentry/sentry-python/issues/911
|
Exception inside application: sentry_patched_asgi_handler() takes 3 positional arguments but 4 were given
Traceback (most recent call last):
File "env/lib/python3.8/site-packages/channels/routing.py", line 71, in __call__
return await application(scope, receive, send)
TypeError: sentry_patched_asgi_handler() takes 3 positional arguments but 4 were given
HTTP POST /api/v2/auth/login 500 [0.67, 127.0.0.1:37750]
|
TypeError
|
def _emit(self, record):
# type: (LogRecord) -> None
if not _can_record(record):
return
hub = Hub.current
if hub.client is None:
return
client_options = hub.client.options
# exc_info might be None or (None, None, None)
#
# exc_info may also be any falsy value due to Python stdlib being
# liberal with what it receives and Celery's billiard being "liberal"
# with what it sends. See
# https://github.com/getsentry/sentry-python/issues/904
if record.exc_info and record.exc_info[0] is not None:
event, hint = event_from_exception(
record.exc_info,
client_options=client_options,
mechanism={"type": "logging", "handled": True},
)
elif record.exc_info and record.exc_info[0] is None:
event = {}
hint = {}
with capture_internal_exceptions():
event["threads"] = {
"values": [
{
"stacktrace": current_stacktrace(client_options["with_locals"]),
"crashed": False,
"current": True,
}
]
}
else:
event = {}
hint = {}
hint["log_record"] = record
event["level"] = _logging_to_event_level(record.levelname)
event["logger"] = record.name
event["logentry"] = {"message": to_string(record.msg), "params": record.args}
event["extra"] = _extra_from_record(record)
hub.capture_event(event, hint=hint)
|
def _emit(self, record):
# type: (LogRecord) -> None
if not _can_record(record):
return
hub = Hub.current
if hub.client is None:
return
client_options = hub.client.options
# exc_info might be None or (None, None, None)
if record.exc_info is not None and record.exc_info[0] is not None:
event, hint = event_from_exception(
record.exc_info,
client_options=client_options,
mechanism={"type": "logging", "handled": True},
)
elif record.exc_info and record.exc_info[0] is None:
event = {}
hint = {}
with capture_internal_exceptions():
event["threads"] = {
"values": [
{
"stacktrace": current_stacktrace(client_options["with_locals"]),
"crashed": False,
"current": True,
}
]
}
else:
event = {}
hint = {}
hint["log_record"] = record
event["level"] = _logging_to_event_level(record.levelname)
event["logger"] = record.name
event["logentry"] = {"message": to_string(record.msg), "params": record.args}
event["extra"] = _extra_from_record(record)
hub.capture_event(event, hint=hint)
|
https://github.com/getsentry/sentry-python/issues/904
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/logging.py", line 164, in emit
return self._emit(record)
File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/logging.py", line 178, in _emit
if record.exc_info is not None and record.exc_info[0] is not None:
TypeError: 'int' object is not subscriptable
|
TypeError
|
def pure_eval_frame(frame):
# type: (FrameType) -> Dict[str, Any]
source = executing.Source.for_frame(frame)
if not source.tree:
return {}
statements = source.statements_at_line(frame.f_lineno)
if not statements:
return {}
scope = stmt = list(statements)[0]
while True:
# Get the parent first in case the original statement is already
# a function definition, e.g. if we're calling a decorator
# In that case we still want the surrounding scope, not that function
scope = scope.parent
if isinstance(scope, (ast.FunctionDef, ast.ClassDef, ast.Module)):
break
evaluator = pure_eval.Evaluator.from_frame(frame)
expressions = evaluator.interesting_expressions_grouped(scope)
def closeness(expression):
# type: (Tuple[List[Any], Any]) -> Tuple[int, int]
# Prioritise expressions with a node closer to the statement executed
# without being after that statement
# A higher return value is better - the expression will appear
# earlier in the list of values and is less likely to be trimmed
nodes, _value = expression
def start(n):
# type: (ast.expr) -> Tuple[int, int]
return (n.lineno, n.col_offset)
nodes_before_stmt = [
node for node in nodes if start(node) < stmt.last_token.end
]
if nodes_before_stmt:
# The position of the last node before or in the statement
return max(start(node) for node in nodes_before_stmt)
else:
# The position of the first node after the statement
# Negative means it's always lower priority than nodes that come before
# Less negative means closer to the statement and higher priority
lineno, col_offset = min(start(node) for node in nodes)
return (-lineno, -col_offset)
# This adds the first_token and last_token attributes to nodes
atok = source.asttokens()
expressions.sort(key=closeness, reverse=True)
return {
atok.get_text(nodes[0]): value
for nodes, value in expressions[: serializer.MAX_DATABAG_BREADTH]
}
|
def pure_eval_frame(frame):
# type: (FrameType) -> Dict[str, Any]
source = executing.Source.for_frame(frame)
if not source.tree:
return {}
statements = source.statements_at_line(frame.f_lineno)
if not statements:
return {}
scope = stmt = list(statements)[0]
while True:
# Get the parent first in case the original statement is already
# a function definition, e.g. if we're calling a decorator
# In that case we still want the surrounding scope, not that function
scope = scope.parent
if isinstance(scope, (ast.FunctionDef, ast.ClassDef, ast.Module)):
break
evaluator = pure_eval.Evaluator.from_frame(frame)
expressions = evaluator.interesting_expressions_grouped(scope)
def closeness(expression):
# type: (Tuple[List[Any], Any]) -> int
# Prioritise expressions with a node closer to the statement executed
# without being after that statement
# A higher return value is better - the expression will appear
# earlier in the list of values and is less likely to be trimmed
nodes, _value = expression
nodes_before_stmt = [
node for node in nodes if node.first_token.startpos < stmt.last_token.endpos
]
if nodes_before_stmt:
# The position of the last node before or in the statement
return max(node.first_token.startpos for node in nodes_before_stmt)
else:
# The position of the first node after the statement
# Negative means it's always lower priority than nodes that come before
# Less negative means closer to the statement and higher priority
return -min(node.first_token.startpos for node in nodes)
# This adds the first_token and last_token attributes to nodes
atok = source.asttokens()
expressions.sort(key=closeness, reverse=True)
return {
atok.get_text(nodes[0]): value
for nodes, value in expressions[: serializer.MAX_DATABAG_BREADTH]
}
|
https://github.com/getsentry/sentry-python/issues/893
|
File "/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py", line 443, in fetch
return await self._execute(query, args, 0, timeout)
File "/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py", line 1445, in _execute
result, _ = await self.__execute(
File "/server/athenian/api/db.py", line 191, in _asyncpg_execute
result = await self._execute_original(query, args, limit, timeout, return_status)
File "/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py", line 1454, in __execute
return await self._do_execute(query, executor, timeout)
File "/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py", line 1476, in _do_execute
result = await executor(stmt, None)
File "asyncpg/protocol/protocol.pyx", line 196, in bind_execute
return await waiter
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/scope.py", line 353, in apply_to_event
new_event = event_processor(event, hint)
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py", line 79, in add_executing_info
pure_eval_frame(tb.tb_frame) or sentry_frame["vars"]
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py", line 128, in pure_eval_frame
expressions.sort(key=closeness, reverse=True)
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py", line 113, in closeness
nodes_before_stmt = [
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py", line 114, in <listcomp>
node for node in nodes if node.first_token.startpos < stmt.last_token.endpos
AttributeError: 'Name' object has no attribute 'first_token'
|
asyncpg.exceptions.ConnectionDoesNotExistError
|
def closeness(expression):
# type: (Tuple[List[Any], Any]) -> Tuple[int, int]
# Prioritise expressions with a node closer to the statement executed
# without being after that statement
# A higher return value is better - the expression will appear
# earlier in the list of values and is less likely to be trimmed
nodes, _value = expression
def start(n):
# type: (ast.expr) -> Tuple[int, int]
return (n.lineno, n.col_offset)
nodes_before_stmt = [node for node in nodes if start(node) < stmt.last_token.end]
if nodes_before_stmt:
# The position of the last node before or in the statement
return max(start(node) for node in nodes_before_stmt)
else:
# The position of the first node after the statement
# Negative means it's always lower priority than nodes that come before
# Less negative means closer to the statement and higher priority
lineno, col_offset = min(start(node) for node in nodes)
return (-lineno, -col_offset)
|
def closeness(expression):
# type: (Tuple[List[Any], Any]) -> int
# Prioritise expressions with a node closer to the statement executed
# without being after that statement
# A higher return value is better - the expression will appear
# earlier in the list of values and is less likely to be trimmed
nodes, _value = expression
nodes_before_stmt = [
node for node in nodes if node.first_token.startpos < stmt.last_token.endpos
]
if nodes_before_stmt:
# The position of the last node before or in the statement
return max(node.first_token.startpos for node in nodes_before_stmt)
else:
# The position of the first node after the statement
# Negative means it's always lower priority than nodes that come before
# Less negative means closer to the statement and higher priority
return -min(node.first_token.startpos for node in nodes)
|
https://github.com/getsentry/sentry-python/issues/893
|
File "/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py", line 443, in fetch
return await self._execute(query, args, 0, timeout)
File "/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py", line 1445, in _execute
result, _ = await self.__execute(
File "/server/athenian/api/db.py", line 191, in _asyncpg_execute
result = await self._execute_original(query, args, limit, timeout, return_status)
File "/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py", line 1454, in __execute
return await self._do_execute(query, executor, timeout)
File "/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py", line 1476, in _do_execute
result = await executor(stmt, None)
File "asyncpg/protocol/protocol.pyx", line 196, in bind_execute
return await waiter
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/scope.py", line 353, in apply_to_event
new_event = event_processor(event, hint)
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py", line 79, in add_executing_info
pure_eval_frame(tb.tb_frame) or sentry_frame["vars"]
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py", line 128, in pure_eval_frame
expressions.sort(key=closeness, reverse=True)
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py", line 113, in closeness
nodes_before_stmt = [
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py", line 114, in <listcomp>
node for node in nodes if node.first_token.startpos < stmt.last_token.endpos
AttributeError: 'Name' object has no attribute 'first_token'
|
asyncpg.exceptions.ConnectionDoesNotExistError
|
def _sentry_start_response(
old_start_response, # type: StartResponse
span, # type: Span
status, # type: str
response_headers, # type: WsgiResponseHeaders
exc_info=None, # type: Optional[WsgiExcInfo]
):
# type: (...) -> WsgiResponseIter
with capture_internal_exceptions():
status_int = int(status.split(" ", 1)[0])
span.set_http_status(status_int)
if exc_info is None:
# The Django Rest Framework WSGI test client, and likely other
# (incorrect) implementations, cannot deal with the exc_info argument
# if one is present. Avoid providing a third argument if not necessary.
return old_start_response(status, response_headers)
else:
return old_start_response(status, response_headers, exc_info)
|
def _sentry_start_response(
old_start_response, span, status, response_headers, exc_info=None
):
# type: (Callable[[str, U, Optional[E]], T], Span, str, U, Optional[E]) -> T
with capture_internal_exceptions():
status_int = int(status.split(" ", 1)[0])
span.set_http_status(status_int)
return old_start_response(status, response_headers, exc_info)
|
https://github.com/getsentry/sentry-python/issues/585
|
Traceback (most recent call last):
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/apscheduler/executors/base_py3.py", line 29, in run_coroutine_job
retval = await job.func(*job.args, **job.kwargs)
File "/var/www/pixiu.40huo.cn/pixiu/backend/scheduler.py", line 96, in refresh_task
req = await fetch_resource_list(loop=loop)
File "/var/www/pixiu.40huo.cn/pixiu/backend/scheduler.py", line 27, in fetch_resource_list
req = await loop.run_in_executor(executor, send_req, "get", reverse(viewname="resource-list"))
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/var/www/pixiu.40huo.cn/pixiu/utils/http_req.py", line 37, in send_req
return client.get(url=abs_url, headers=headers)
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/requests/sessions.py", line 546, in get
return self.request('GET', url, **kwargs)
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/rest_framework/test.py", line 115, in request
return super().request(method, url, *args, **kwargs)
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/requests/sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/requests/sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/rest_framework/test.py", line 93, in send
wsgi_response = self.app(environ, start_response)
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/sentry_sdk/integrations/django/__init__.py", line 101, in sentry_patched_wsgi_handler
return SentryWsgiMiddleware(lambda *a, **kw: old_app(self, *a, **kw))(
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/sentry_sdk/integrations/wsgi.py", line 106, in __call__
reraise(*_capture_exception(hub))
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/sentry_sdk/_compat.py", line 54, in reraise
raise value
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/sentry_sdk/integrations/wsgi.py", line 101, in __call__
rv = self.app(
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/sentry_sdk/integrations/django/__init__.py", line 101, in <lambda>
return SentryWsgiMiddleware(lambda *a, **kw: old_app(self, *a, **kw))(
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/django/core/handlers/wsgi.py", line 142, in __call__
start_response(status, response_headers)
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/sentry_sdk/integrations/wsgi.py", line 121, in _sentry_start_response
return old_start_response(status, response_headers, exc_info)
TypeError: start_response() takes 2 positional arguments but 3 were given
|
TypeError
|
def __call__(self, status, response_headers, exc_info=None):
# type: (str, WsgiResponseHeaders, Optional[WsgiExcInfo]) -> WsgiResponseIter
pass
|
def __call__(self, environ, start_response):
# type: (Dict[str, str], Callable[..., Any]) -> _ScopedResponse
if _wsgi_middleware_applied.get(False):
return self.app(environ, start_response)
_wsgi_middleware_applied.set(True)
try:
hub = Hub(Hub.current)
with hub:
with capture_internal_exceptions():
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
scope._name = "wsgi"
scope.add_event_processor(_make_wsgi_event_processor(environ))
span = Span.continue_from_environ(environ)
span.op = "http.server"
span.transaction = "generic WSGI request"
with hub.start_span(span) as span:
try:
rv = self.app(
environ,
functools.partial(_sentry_start_response, start_response, span),
)
except BaseException:
reraise(*_capture_exception(hub))
finally:
_wsgi_middleware_applied.set(False)
return _ScopedResponse(hub, rv)
|
https://github.com/getsentry/sentry-python/issues/585
|
Traceback (most recent call last):
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/apscheduler/executors/base_py3.py", line 29, in run_coroutine_job
retval = await job.func(*job.args, **job.kwargs)
File "/var/www/pixiu.40huo.cn/pixiu/backend/scheduler.py", line 96, in refresh_task
req = await fetch_resource_list(loop=loop)
File "/var/www/pixiu.40huo.cn/pixiu/backend/scheduler.py", line 27, in fetch_resource_list
req = await loop.run_in_executor(executor, send_req, "get", reverse(viewname="resource-list"))
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/var/www/pixiu.40huo.cn/pixiu/utils/http_req.py", line 37, in send_req
return client.get(url=abs_url, headers=headers)
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/requests/sessions.py", line 546, in get
return self.request('GET', url, **kwargs)
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/rest_framework/test.py", line 115, in request
return super().request(method, url, *args, **kwargs)
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/requests/sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/requests/sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/rest_framework/test.py", line 93, in send
wsgi_response = self.app(environ, start_response)
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/sentry_sdk/integrations/django/__init__.py", line 101, in sentry_patched_wsgi_handler
return SentryWsgiMiddleware(lambda *a, **kw: old_app(self, *a, **kw))(
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/sentry_sdk/integrations/wsgi.py", line 106, in __call__
reraise(*_capture_exception(hub))
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/sentry_sdk/_compat.py", line 54, in reraise
raise value
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/sentry_sdk/integrations/wsgi.py", line 101, in __call__
rv = self.app(
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/sentry_sdk/integrations/django/__init__.py", line 101, in <lambda>
return SentryWsgiMiddleware(lambda *a, **kw: old_app(self, *a, **kw))(
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/django/core/handlers/wsgi.py", line 142, in __call__
start_response(status, response_headers)
File "/var/www/pixiu.40huo.cn/pixiu/venv/lib/python3.8/site-packages/sentry_sdk/integrations/wsgi.py", line 121, in _sentry_start_response
return old_start_response(status, response_headers, exc_info)
TypeError: start_response() takes 2 positional arguments but 3 were given
|
TypeError
|
def __init__(self, app):
# type: (Any) -> None
self.app = app
if _looks_like_asgi3(app):
self.__call__ = self._run_asgi3 # type: Callable[..., Any]
else:
self.__call__ = self._run_asgi2
|
def __init__(self, app):
# type: (Any) -> None
self.app = app
|
https://github.com/getsentry/sentry-python/issues/556
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py", line 385, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/usr/local/lib/python3.7/site-packages/uvicorn/middleware/proxy_headers.py", line 45, in __call__
return await self.app(scope, receive, send)
File "/usr/local/lib/python3.7/site-packages/uvicorn/middleware/asgi2.py", line 7, in __call__
await instance(receive, send)
File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py", line 54, in run_asgi2
scope, lambda: self.app(scope)(receive, send)
File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py", line 93, in _run_app
raise exc from None
File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py", line 90, in _run_app
return await callback()
File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py", line 54, in <lambda>
scope, lambda: self.app(scope)(receive, send)
TypeError: __call__() missing 2 required positional arguments: 'receive' and 'send'
|
TypeError
|
def _timed_queue_join(self, timeout):
# type: (float) -> bool
deadline = time() + timeout
queue = self._queue
real_all_tasks_done = getattr(queue, "all_tasks_done", None) # type: Optional[Any]
if real_all_tasks_done is not None:
real_all_tasks_done.acquire()
all_tasks_done = real_all_tasks_done # type: Optional[Any]
elif queue.__module__.startswith("eventlet."):
all_tasks_done = getattr(queue, "_cond", None)
else:
all_tasks_done = None
try:
while queue.unfinished_tasks: # type: ignore
delay = deadline - time()
if delay <= 0:
return False
if all_tasks_done is not None:
all_tasks_done.wait(timeout=delay)
else:
# worst case, we just poll the number of remaining tasks
sleep(0.1)
return True
finally:
if real_all_tasks_done is not None:
real_all_tasks_done.release() # type: ignore
|
def _timed_queue_join(self, timeout):
# type: (float) -> bool
deadline = time() + timeout
queue = self._queue
queue.all_tasks_done.acquire() # type: ignore
try:
while queue.unfinished_tasks: # type: ignore
delay = deadline - time()
if delay <= 0:
return False
queue.all_tasks_done.wait(timeout=delay) # type: ignore
return True
finally:
queue.all_tasks_done.release() # type: ignore
|
https://github.com/getsentry/sentry-python/issues/471
|
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/Users/jibanez/API/.conda/envs/cimrender/lib/python3.6/site-packages/sentry_sdk/worker.py", line 84, in flush
self._wait_flush(timeout, callback)
File "/Users/jibanez/API/.conda/envs/cimrender/lib/python3.6/site-packages/sentry_sdk/worker.py", line 90, in _wait_flush
if not self._timed_queue_join(initial_timeout):
File "/Users/jibanez/API/.conda/envs/cimrender/lib/python3.6/site-packages/sentry_sdk/worker.py", line 48, in _timed_queue_join
queue.all_tasks_done.acquire() # type: ignore
AttributeError: 'Queue' object has no attribute 'all_tasks_done'
|
AttributeError
|
def setup_once():
# type: () -> None
old_start = Thread.start
def sentry_start(self, *a, **kw):
hub = Hub.current
integration = hub.get_integration(ThreadingIntegration)
if integration is not None:
if not integration.propagate_hub:
hub_ = None
else:
hub_ = Hub(hub)
# Patching instance methods in `start()` creates a reference cycle if
# done in a naive way. See
# https://github.com/getsentry/sentry-python/pull/434
#
# In threading module, using current_thread API will access current thread instance
# without holding it to avoid a reference cycle in an easier way.
self.run = _wrap_run(hub_, self.run.__func__)
return old_start(self, *a, **kw) # type: ignore
Thread.start = sentry_start # type: ignore
|
def setup_once():
# type: () -> None
old_start = Thread.start
def sentry_start(self, *a, **kw):
hub = Hub.current
integration = hub.get_integration(ThreadingIntegration)
if integration is not None:
if not integration.propagate_hub:
hub_ = None
else:
hub_ = Hub(hub)
self.run = _wrap_run(hub_, self.run)
return old_start(self, *a, **kw) # type: ignore
Thread.start = sentry_start # type: ignore
|
https://github.com/getsentry/sentry-python/issues/423
|
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/Users/tony.li/miniconda3/lib/python3.7/concurrent/futures/process.py", line 101, in _python_exit
thread_wakeup.wakeup()
File "/Users/tony.li/miniconda3/lib/python3.7/concurrent/futures/process.py", line 89, in wakeup
self._writer.send_bytes(b"")
File "/Users/tony.li/miniconda3/lib/python3.7/multiprocessing/connection.py", line 183, in send_bytes
self._check_closed()
File "/Users/tony.li/miniconda3/lib/python3.7/multiprocessing/connection.py", line 136, in _check_closed
raise OSError("handle is closed")
OSError: handle is closed
|
OSError
|
def sentry_start(self, *a, **kw):
hub = Hub.current
integration = hub.get_integration(ThreadingIntegration)
if integration is not None:
if not integration.propagate_hub:
hub_ = None
else:
hub_ = Hub(hub)
# Patching instance methods in `start()` creates a reference cycle if
# done in a naive way. See
# https://github.com/getsentry/sentry-python/pull/434
#
# In threading module, using current_thread API will access current thread instance
# without holding it to avoid a reference cycle in an easier way.
self.run = _wrap_run(hub_, self.run.__func__)
return old_start(self, *a, **kw) # type: ignore
|
def sentry_start(self, *a, **kw):
hub = Hub.current
integration = hub.get_integration(ThreadingIntegration)
if integration is not None:
if not integration.propagate_hub:
hub_ = None
else:
hub_ = Hub(hub)
self.run = _wrap_run(hub_, self.run)
return old_start(self, *a, **kw) # type: ignore
|
https://github.com/getsentry/sentry-python/issues/423
|
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/Users/tony.li/miniconda3/lib/python3.7/concurrent/futures/process.py", line 101, in _python_exit
thread_wakeup.wakeup()
File "/Users/tony.li/miniconda3/lib/python3.7/concurrent/futures/process.py", line 89, in wakeup
self._writer.send_bytes(b"")
File "/Users/tony.li/miniconda3/lib/python3.7/multiprocessing/connection.py", line 183, in send_bytes
self._check_closed()
File "/Users/tony.li/miniconda3/lib/python3.7/multiprocessing/connection.py", line 136, in _check_closed
raise OSError("handle is closed")
OSError: handle is closed
|
OSError
|
def _wrap_run(parent_hub, old_run_func):
def run(*a, **kw):
hub = parent_hub or Hub.current
with hub:
try:
self = current_thread()
return old_run_func(self, *a, **kw)
except Exception:
reraise(*_capture_exception())
return run
|
def _wrap_run(parent_hub, old_run):
def run(*a, **kw):
hub = parent_hub or Hub.current
with hub:
try:
return old_run(*a, **kw)
except Exception:
reraise(*_capture_exception())
return run
|
https://github.com/getsentry/sentry-python/issues/423
|
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/Users/tony.li/miniconda3/lib/python3.7/concurrent/futures/process.py", line 101, in _python_exit
thread_wakeup.wakeup()
File "/Users/tony.li/miniconda3/lib/python3.7/concurrent/futures/process.py", line 89, in wakeup
self._writer.send_bytes(b"")
File "/Users/tony.li/miniconda3/lib/python3.7/multiprocessing/connection.py", line 183, in send_bytes
self._check_closed()
File "/Users/tony.li/miniconda3/lib/python3.7/multiprocessing/connection.py", line 136, in _check_closed
raise OSError("handle is closed")
OSError: handle is closed
|
OSError
|
def run(*a, **kw):
hub = parent_hub or Hub.current
with hub:
try:
self = current_thread()
return old_run_func(self, *a, **kw)
except Exception:
reraise(*_capture_exception())
|
def run(*a, **kw):
hub = parent_hub or Hub.current
with hub:
try:
return old_run(*a, **kw)
except Exception:
reraise(*_capture_exception())
|
https://github.com/getsentry/sentry-python/issues/423
|
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/Users/tony.li/miniconda3/lib/python3.7/concurrent/futures/process.py", line 101, in _python_exit
thread_wakeup.wakeup()
File "/Users/tony.li/miniconda3/lib/python3.7/concurrent/futures/process.py", line 89, in wakeup
self._writer.send_bytes(b"")
File "/Users/tony.li/miniconda3/lib/python3.7/multiprocessing/connection.py", line 183, in send_bytes
self._check_closed()
File "/Users/tony.li/miniconda3/lib/python3.7/multiprocessing/connection.py", line 136, in _check_closed
raise OSError("handle is closed")
OSError: handle is closed
|
OSError
|
def format_sql(sql, params):
# type: (Any, Any) -> Tuple[str, List[str]]
rv = []
if isinstance(params, dict):
# convert sql with named parameters to sql with unnamed parameters
conv = _FormatConverter(params)
if params:
sql = sql % conv
params = conv.params
else:
params = ()
for param in params or ():
if param is None:
rv.append("NULL")
param = safe_repr(param)
rv.append(param)
return sql, rv
|
def format_sql(sql, params):
# type: (Any, Any) -> Tuple[str, List[str]]
rv = []
if isinstance(params, dict):
# convert sql with named parameters to sql with unnamed parameters
conv = _FormatConverter(params)
if params:
sql = sql_to_string(sql)
sql = sql % conv
params = conv.params
else:
params = ()
for param in params or ():
if param is None:
rv.append("NULL")
param = safe_repr(param)
rv.append(param)
return sql, rv
|
https://github.com/getsentry/sentry-python/issues/201
|
$ ./manage.py test-sentry
Traceback (most recent call last):
File "./manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.6/site-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python3.6/site-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File "/ph/ph/app/management/commands/test-sentry.py", line 18, in handle
'foo': 'foo',
File "/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py", line 100, in execute
return super().execute(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 273, in execute
record_sql(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 240, in record_sql
real_sql, real_params = format_sql(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 222, in format_sql
sql = sql % conv
TypeError: unsupported operand type(s) for %: 'Composed' and '_FormatConverter'
$
|
TypeError
|
def record_sql(sql, params, cursor=None):
# type: (Any, Any, Any) -> None
hub = Hub.current
if hub.get_integration(DjangoIntegration) is None:
return
with capture_internal_exceptions():
if cursor and hasattr(cursor, "mogrify"): # psycopg2
real_sql = cursor.mogrify(sql, params)
with capture_internal_exceptions():
if isinstance(real_sql, bytes):
real_sql = real_sql.decode(cursor.connection.encoding)
else:
real_sql, real_params = format_sql(sql, params)
if real_params:
try:
real_sql = format_and_strip(real_sql, real_params)
except Exception:
pass
hub.add_breadcrumb(message=real_sql, category="query")
|
def record_sql(sql, params):
# type: (Any, Any) -> None
hub = Hub.current
if hub.get_integration(DjangoIntegration) is None:
return
real_sql, real_params = format_sql(sql, params)
if real_params:
try:
real_sql = format_and_strip(real_sql, real_params)
except Exception:
pass
hub.add_breadcrumb(message=real_sql, category="query")
|
https://github.com/getsentry/sentry-python/issues/201
|
$ ./manage.py test-sentry
Traceback (most recent call last):
File "./manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.6/site-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python3.6/site-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File "/ph/ph/app/management/commands/test-sentry.py", line 18, in handle
'foo': 'foo',
File "/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py", line 100, in execute
return super().execute(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 273, in execute
record_sql(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 240, in record_sql
real_sql, real_params = format_sql(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 222, in format_sql
sql = sql % conv
TypeError: unsupported operand type(s) for %: 'Composed' and '_FormatConverter'
$
|
TypeError
|
def install_sql_hook():
# type: () -> None
"""If installed this causes Django's queries to be captured."""
try:
from django.db.backends.utils import CursorWrapper # type: ignore
except ImportError:
from django.db.backends.util import CursorWrapper # type: ignore
try:
real_execute = CursorWrapper.execute
real_executemany = CursorWrapper.executemany
except AttributeError:
# This won't work on Django versions < 1.6
return
def record_many_sql(sql, param_list, cursor):
for params in param_list:
record_sql(sql, params, cursor)
def execute(self, sql, params=None):
try:
return real_execute(self, sql, params)
finally:
record_sql(sql, params, self.cursor)
def executemany(self, sql, param_list):
try:
return real_executemany(self, sql, param_list)
finally:
record_many_sql(sql, param_list, self.cursor)
CursorWrapper.execute = execute
CursorWrapper.executemany = executemany
ignore_logger("django.db.backends")
|
def install_sql_hook():
# type: () -> None
"""If installed this causes Django's queries to be captured."""
try:
from django.db.backends.utils import CursorWrapper # type: ignore
except ImportError:
from django.db.backends.util import CursorWrapper # type: ignore
try:
real_execute = CursorWrapper.execute
real_executemany = CursorWrapper.executemany
except AttributeError:
# This won't work on Django versions < 1.6
return
def record_many_sql(sql, param_list):
for params in param_list:
record_sql(sql, params)
def execute(self, sql, params=None):
try:
return real_execute(self, sql, params)
finally:
record_sql(sql, params)
def executemany(self, sql, param_list):
try:
return real_executemany(self, sql, param_list)
finally:
record_many_sql(sql, param_list)
CursorWrapper.execute = execute
CursorWrapper.executemany = executemany
ignore_logger("django.db.backends")
|
https://github.com/getsentry/sentry-python/issues/201
|
$ ./manage.py test-sentry
Traceback (most recent call last):
File "./manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.6/site-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python3.6/site-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File "/ph/ph/app/management/commands/test-sentry.py", line 18, in handle
'foo': 'foo',
File "/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py", line 100, in execute
return super().execute(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 273, in execute
record_sql(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 240, in record_sql
real_sql, real_params = format_sql(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 222, in format_sql
sql = sql % conv
TypeError: unsupported operand type(s) for %: 'Composed' and '_FormatConverter'
$
|
TypeError
|
def record_many_sql(sql, param_list, cursor):
for params in param_list:
record_sql(sql, params, cursor)
|
def record_many_sql(sql, param_list):
for params in param_list:
record_sql(sql, params)
|
https://github.com/getsentry/sentry-python/issues/201
|
$ ./manage.py test-sentry
Traceback (most recent call last):
File "./manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.6/site-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python3.6/site-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File "/ph/ph/app/management/commands/test-sentry.py", line 18, in handle
'foo': 'foo',
File "/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py", line 100, in execute
return super().execute(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 273, in execute
record_sql(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 240, in record_sql
real_sql, real_params = format_sql(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 222, in format_sql
sql = sql % conv
TypeError: unsupported operand type(s) for %: 'Composed' and '_FormatConverter'
$
|
TypeError
|
def execute(self, sql, params=None):
try:
return real_execute(self, sql, params)
finally:
record_sql(sql, params, self.cursor)
|
def execute(self, sql, params=None):
try:
return real_execute(self, sql, params)
finally:
record_sql(sql, params)
|
https://github.com/getsentry/sentry-python/issues/201
|
$ ./manage.py test-sentry
Traceback (most recent call last):
File "./manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.6/site-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python3.6/site-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File "/ph/ph/app/management/commands/test-sentry.py", line 18, in handle
'foo': 'foo',
File "/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py", line 100, in execute
return super().execute(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 273, in execute
record_sql(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 240, in record_sql
real_sql, real_params = format_sql(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 222, in format_sql
sql = sql % conv
TypeError: unsupported operand type(s) for %: 'Composed' and '_FormatConverter'
$
|
TypeError
|
def executemany(self, sql, param_list):
try:
return real_executemany(self, sql, param_list)
finally:
record_many_sql(sql, param_list, self.cursor)
|
def executemany(self, sql, param_list):
try:
return real_executemany(self, sql, param_list)
finally:
record_many_sql(sql, param_list)
|
https://github.com/getsentry/sentry-python/issues/201
|
$ ./manage.py test-sentry
Traceback (most recent call last):
File "./manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.6/site-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python3.6/site-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File "/ph/ph/app/management/commands/test-sentry.py", line 18, in handle
'foo': 'foo',
File "/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py", line 100, in execute
return super().execute(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 273, in execute
record_sql(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 240, in record_sql
real_sql, real_params = format_sql(sql, params)
File "/usr/local/lib/python3.6/site-packages/sentry_sdk/integrations/django/__init__.py", line 222, in format_sql
sql = sql % conv
TypeError: unsupported operand type(s) for %: 'Composed' and '_FormatConverter'
$
|
TypeError
|
def setup_once():
import celery.app.trace as trace # type: ignore
old_build_tracer = trace.build_tracer
def sentry_build_tracer(name, task, *args, **kwargs):
# Need to patch both methods because older celery sometimes
# short-circuits to task.run if it thinks it's safe.
task.__call__ = _wrap_task_call(task, task.__call__)
task.run = _wrap_task_call(task, task.run)
return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))
trace.build_tracer = sentry_build_tracer
_patch_worker_exit()
# This logger logs every status of every task that ran on the worker.
# Meaning that every task's breadcrumbs are full of stuff like "Task
# <foo> raised unexpected <bar>".
ignore_logger("celery.worker.job")
|
def setup_once():
import celery.app.trace as trace # type: ignore
old_build_tracer = trace.build_tracer
def sentry_build_tracer(name, task, *args, **kwargs):
# Need to patch both methods because older celery sometimes
# short-circuits to task.run if it thinks it's safe.
task.__call__ = _wrap_task_call(task.__call__)
task.run = _wrap_task_call(task.run)
return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))
trace.build_tracer = sentry_build_tracer
# This logger logs every status of every task that ran on the worker.
# Meaning that every task's breadcrumbs are full of stuff like "Task
# <foo> raised unexpected <bar>".
ignore_logger("celery.worker.job")
|
https://github.com/getsentry/sentry-python/issues/285
|
[2019-03-08 21:24:21,117: ERROR/ForkPoolWorker-31] Task simple_task[d6e959b1-7253-4e55-861d-c1968ae14e1c] raised unexpected: RuntimeError('No active exception to reraise')
Traceback (most recent call last):
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 382, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/celery_factory.py", line 28, in __call__
return TaskBase.__call__(self, *args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 641, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/simple_task.py", line 4, in simple_task
raise Exception("Testing Celery exception")
RuntimeError: No active exception to reraise
|
RuntimeError
|
def sentry_build_tracer(name, task, *args, **kwargs):
# Need to patch both methods because older celery sometimes
# short-circuits to task.run if it thinks it's safe.
task.__call__ = _wrap_task_call(task, task.__call__)
task.run = _wrap_task_call(task, task.run)
return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))
|
def sentry_build_tracer(name, task, *args, **kwargs):
# Need to patch both methods because older celery sometimes
# short-circuits to task.run if it thinks it's safe.
task.__call__ = _wrap_task_call(task.__call__)
task.run = _wrap_task_call(task.run)
return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))
|
https://github.com/getsentry/sentry-python/issues/285
|
[2019-03-08 21:24:21,117: ERROR/ForkPoolWorker-31] Task simple_task[d6e959b1-7253-4e55-861d-c1968ae14e1c] raised unexpected: RuntimeError('No active exception to reraise')
Traceback (most recent call last):
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 382, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/celery_factory.py", line 28, in __call__
return TaskBase.__call__(self, *args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 641, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/simple_task.py", line 4, in simple_task
raise Exception("Testing Celery exception")
RuntimeError: No active exception to reraise
|
RuntimeError
|
def _wrap_task_call(task, f):
# Need to wrap task call because the exception is caught before we get to
# see it. Also celery's reported stacktrace is untrustworthy.
def _inner(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
exc_info = sys.exc_info()
with capture_internal_exceptions():
_capture_exception(task, exc_info)
reraise(*exc_info)
return _inner
|
def _wrap_task_call(f):
# Need to wrap task call because the exception is caught before we get to
# see it. Also celery's reported stacktrace is untrustworthy.
def _inner(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
reraise(*_capture_exception())
return _inner
|
https://github.com/getsentry/sentry-python/issues/285
|
[2019-03-08 21:24:21,117: ERROR/ForkPoolWorker-31] Task simple_task[d6e959b1-7253-4e55-861d-c1968ae14e1c] raised unexpected: RuntimeError('No active exception to reraise')
Traceback (most recent call last):
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 382, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/celery_factory.py", line 28, in __call__
return TaskBase.__call__(self, *args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 641, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/simple_task.py", line 4, in simple_task
raise Exception("Testing Celery exception")
RuntimeError: No active exception to reraise
|
RuntimeError
|
def _inner(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
exc_info = sys.exc_info()
with capture_internal_exceptions():
_capture_exception(task, exc_info)
reraise(*exc_info)
|
def _inner(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
reraise(*_capture_exception())
|
https://github.com/getsentry/sentry-python/issues/285
|
[2019-03-08 21:24:21,117: ERROR/ForkPoolWorker-31] Task simple_task[d6e959b1-7253-4e55-861d-c1968ae14e1c] raised unexpected: RuntimeError('No active exception to reraise')
Traceback (most recent call last):
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 382, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/celery_factory.py", line 28, in __call__
return TaskBase.__call__(self, *args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 641, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/simple_task.py", line 4, in simple_task
raise Exception("Testing Celery exception")
RuntimeError: No active exception to reraise
|
RuntimeError
|
def _make_event_processor(task, uuid, args, kwargs, request=None):
def event_processor(event, hint):
with capture_internal_exceptions():
event["transaction"] = task.name
with capture_internal_exceptions():
extra = event.setdefault("extra", {})
extra["celery-job"] = {
"task_name": task.name,
"args": args,
"kwargs": kwargs,
}
if "exc_info" in hint:
with capture_internal_exceptions():
if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
event["fingerprint"] = [
"celery",
"SoftTimeLimitExceeded",
getattr(task, "name", task),
]
return event
return event_processor
|
def _make_event_processor(task, uuid, args, kwargs, request=None):
def event_processor(event, hint):
with capture_internal_exceptions():
event["transaction"] = task.name
with capture_internal_exceptions():
extra = event.setdefault("extra", {})
extra["celery-job"] = {
"task_name": task.name,
"args": args,
"kwargs": kwargs,
}
if "exc_info" in hint:
with capture_internal_exceptions():
if isinstance(hint["exc_info"][1], Retry):
return None
if hasattr(task, "throws") and isinstance(
hint["exc_info"][1], task.throws
):
return None
with capture_internal_exceptions():
if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
event["fingerprint"] = [
"celery",
"SoftTimeLimitExceeded",
getattr(task, "name", task),
]
return event
return event_processor
|
https://github.com/getsentry/sentry-python/issues/285
|
[2019-03-08 21:24:21,117: ERROR/ForkPoolWorker-31] Task simple_task[d6e959b1-7253-4e55-861d-c1968ae14e1c] raised unexpected: RuntimeError('No active exception to reraise')
Traceback (most recent call last):
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 382, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/celery_factory.py", line 28, in __call__
return TaskBase.__call__(self, *args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 641, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/simple_task.py", line 4, in simple_task
raise Exception("Testing Celery exception")
RuntimeError: No active exception to reraise
|
RuntimeError
|
def event_processor(event, hint):
with capture_internal_exceptions():
event["transaction"] = task.name
with capture_internal_exceptions():
extra = event.setdefault("extra", {})
extra["celery-job"] = {
"task_name": task.name,
"args": args,
"kwargs": kwargs,
}
if "exc_info" in hint:
with capture_internal_exceptions():
if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
event["fingerprint"] = [
"celery",
"SoftTimeLimitExceeded",
getattr(task, "name", task),
]
return event
|
def event_processor(event, hint):
with capture_internal_exceptions():
event["transaction"] = task.name
with capture_internal_exceptions():
extra = event.setdefault("extra", {})
extra["celery-job"] = {
"task_name": task.name,
"args": args,
"kwargs": kwargs,
}
if "exc_info" in hint:
with capture_internal_exceptions():
if isinstance(hint["exc_info"][1], Retry):
return None
if hasattr(task, "throws") and isinstance(hint["exc_info"][1], task.throws):
return None
with capture_internal_exceptions():
if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
event["fingerprint"] = [
"celery",
"SoftTimeLimitExceeded",
getattr(task, "name", task),
]
return event
|
https://github.com/getsentry/sentry-python/issues/285
|
[2019-03-08 21:24:21,117: ERROR/ForkPoolWorker-31] Task simple_task[d6e959b1-7253-4e55-861d-c1968ae14e1c] raised unexpected: RuntimeError('No active exception to reraise')
Traceback (most recent call last):
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 382, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/celery_factory.py", line 28, in __call__
return TaskBase.__call__(self, *args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 641, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/simple_task.py", line 4, in simple_task
raise Exception("Testing Celery exception")
RuntimeError: No active exception to reraise
|
RuntimeError
|
def _capture_exception(task, exc_info):
hub = Hub.current
if hub.get_integration(CeleryIntegration) is None:
return
if isinstance(exc_info[1], Retry):
return
if hasattr(task, "throws") and isinstance(exc_info[1], task.throws):
return
event, hint = event_from_exception(
exc_info,
client_options=hub.client.options,
mechanism={"type": "celery", "handled": False},
)
hub.capture_event(event, hint=hint)
|
def _capture_exception():
hub = Hub.current
exc_info = sys.exc_info()
if hub.get_integration(CeleryIntegration) is not None:
event, hint = event_from_exception(
exc_info,
client_options=hub.client.options,
mechanism={"type": "celery", "handled": False},
)
hub.capture_event(event, hint=hint)
return exc_info
|
https://github.com/getsentry/sentry-python/issues/285
|
[2019-03-08 21:24:21,117: ERROR/ForkPoolWorker-31] Task simple_task[d6e959b1-7253-4e55-861d-c1968ae14e1c] raised unexpected: RuntimeError('No active exception to reraise')
Traceback (most recent call last):
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 382, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/celery_factory.py", line 28, in __call__
return TaskBase.__call__(self, *args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 641, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/simple_task.py", line 4, in simple_task
raise Exception("Testing Celery exception")
RuntimeError: No active exception to reraise
|
RuntimeError
|
def get_by_scope_and_name(cls, scope, name):
"""
Get a key value store given a scope and name.
:param scope: Scope which the key belongs to.
:type scope: ``str``
:param name: Name of the key.
:type key: ``str``
:rtype: :class:`KeyValuePairDB` or ``None``
"""
query_result = cls.impl.query(scope=scope, name=name)
if not query_result:
msg = 'The key "%s" does not exist in the StackStorm datastore.'
raise DataStoreKeyNotFoundError(msg % name)
return query_result.first() if query_result else None
|
def get_by_scope_and_name(cls, scope, name):
"""
Get a key value store given a scope and name.
:param scope: Scope which the key belongs to.
:type scope: ``str``
:param name: Name of the key.
:type key: ``str``
:rtype: :class:`KeyValuePairDB` or ``None``
"""
query_result = cls.impl.query(scope=scope, name=name)
if not query_result:
msg = 'The key "%s" does not exist in the StackStorm datastore.'
raise StackStormDBObjectNotFoundError(msg % name)
return query_result.first() if query_result else None
|
https://github.com/StackStorm/st2/issues/4979
|
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: 2020-06-26 18:27:35,134 ERROR [-] Failed to call controller function "get_one" for operation "st2api.controllers.v1.keyvalue:key_value_pair_controller.get_one": The key "aws_inspector.AWSInspector:us-east-2.last_start_time_" does not exist in the StackStorm datastore.
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: Traceback (most recent call last):
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2common/router.py", line 515, in __call__
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: resp = func(**kw)
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2api/controllers/v1/keyvalue.py", line 124, in get_one
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: from_model_kwargs=from_model_kwargs
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2api/controllers/resource.py", line 402, in _get_one_by_scope_and_name
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: instance = self.access.get_by_scope_and_name(scope=scope, name=name)
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2common/persistence/keyvalue.py", line 115, in get_by_scope_and_name
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: raise StackStormDBObjectNotFoundError(msg % name)
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: st2common.exceptions.db.StackStormDBObjectNotFoundError: The key "aws_inspector.AWSInspector:us-east-2.last_start_time_" does not exist in the StackStorm datastore.sod
|
StackStormDBObjectNotFoundError
|
def __call__(self, req):
"""
The method is invoked on every request and shows the lifecycle of the request received from
the middleware.
Although some middleware may use parts of the API spec, it is safe to assume that if you're
looking for the particular spec property handler, it's most likely a part of this method.
At the time of writing, the only property being utilized by middleware was `x-log-result`.
"""
LOG.debug("Received call with WebOb: %s", req)
endpoint, path_vars = self.match(req)
LOG.debug("Parsed endpoint: %s", endpoint)
LOG.debug("Parsed path_vars: %s", path_vars)
context = copy.copy(getattr(self, "mock_context", {}))
cookie_token = None
# Handle security
if "security" in endpoint:
security = endpoint.get("security")
else:
security = self.spec.get("security", [])
if self.auth and security:
try:
security_definitions = self.spec.get("securityDefinitions", {})
for statement in security:
declaration, options = statement.copy().popitem()
definition = security_definitions[declaration]
if definition["type"] == "apiKey":
if definition["in"] == "header":
token = req.headers.get(definition["name"])
elif definition["in"] == "query":
token = req.GET.get(definition["name"])
elif definition["in"] == "cookie":
token = req.cookies.get(definition["name"])
else:
token = None
if token:
_, auth_func = op_resolver(definition["x-operationId"])
auth_resp = auth_func(token)
# Include information on how user authenticated inside the context
if "auth-token" in definition["name"].lower():
auth_method = "authentication token"
elif "api-key" in definition["name"].lower():
auth_method = "API key"
context["user"] = User.get_by_name(auth_resp.user)
context["auth_info"] = {
"method": auth_method,
"location": definition["in"],
}
# Also include token expiration time when authenticated via auth token
if "auth-token" in definition["name"].lower():
context["auth_info"]["token_expire"] = auth_resp.expiry
if "x-set-cookie" in definition:
max_age = (
auth_resp.expiry - date_utils.get_datetime_utc_now()
)
cookie_token = cookies.make_cookie(
definition["x-set-cookie"],
token,
max_age=max_age,
httponly=True,
)
break
if "user" not in context:
raise auth_exc.NoAuthSourceProvidedError(
"One of Token or API key required."
)
except (
auth_exc.NoAuthSourceProvidedError,
auth_exc.MultipleAuthSourcesError,
) as e:
LOG.error(six.text_type(e))
return abort_unauthorized(six.text_type(e))
except auth_exc.TokenNotProvidedError as e:
LOG.exception("Token is not provided.")
return abort_unauthorized(six.text_type(e))
except auth_exc.TokenNotFoundError as e:
LOG.exception("Token is not found.")
return abort_unauthorized(six.text_type(e))
except auth_exc.TokenExpiredError as e:
LOG.exception("Token has expired.")
return abort_unauthorized(six.text_type(e))
except auth_exc.ApiKeyNotProvidedError as e:
LOG.exception("API key is not provided.")
return abort_unauthorized(six.text_type(e))
except auth_exc.ApiKeyNotFoundError as e:
LOG.exception("API key is not found.")
return abort_unauthorized(six.text_type(e))
except auth_exc.ApiKeyDisabledError as e:
LOG.exception("API key is disabled.")
return abort_unauthorized(six.text_type(e))
if cfg.CONF.rbac.enable:
user_db = context["user"]
permission_type = endpoint.get("x-permissions", None)
if permission_type:
rbac_backend = get_rbac_backend()
resolver = rbac_backend.get_resolver_for_permission_type(
permission_type
)
has_permission = resolver.user_has_permission(user_db, permission_type)
if not has_permission:
raise rbac_exc.ResourceTypeAccessDeniedError(
user_db, permission_type
)
# Collect parameters
kw = {}
for param in endpoint.get("parameters", []) + endpoint.get("x-parameters", []):
name = param["name"]
argument_name = param.get("x-as", None) or name
source = param["in"]
default = param.get("default", None)
# Collecting params from different sources
if source == "query":
kw[argument_name] = req.GET.get(name, default)
elif source == "path":
kw[argument_name] = path_vars[name]
elif source == "header":
kw[argument_name] = req.headers.get(name, default)
elif source == "formData":
kw[argument_name] = req.POST.get(name, default)
elif source == "environ":
kw[argument_name] = req.environ.get(name.upper(), default)
elif source == "context":
kw[argument_name] = context.get(name, default)
elif source == "request":
kw[argument_name] = getattr(req, name)
elif source == "body":
content_type = req.headers.get("Content-Type", "application/json")
content_type = parse_content_type_header(content_type=content_type)[0]
schema = param["schema"]
# NOTE: HACK: Workaround for eventlet wsgi server which sets Content-Type to
# text/plain if Content-Type is not provided in the request.
# All ouf our API endpoints except /v1/workflows/inspection and
# /exp/validation/mistral expect application/json so we explicitly set it to that
# if not provided (set to text/plain by the base http server) and if it's not
# /v1/workflows/inspection and /exp/validation/mistral API endpoints.
if not self.is_gunicorn and content_type == "text/plain":
operation_id = endpoint["operationId"]
if (
"workflow_inspection_controller" not in operation_id
and "mistral_validation_controller" not in operation_id
):
content_type = "application/json"
# Note: We also want to perform validation if no body is explicitly provided - in a
# lot of POST, PUT scenarios, body is mandatory
if not req.body and content_type == "application/json":
req.body = b"{}"
try:
if content_type == "application/json":
data = req.json
elif content_type == "text/plain":
data = req.body
elif content_type in [
"application/x-www-form-urlencoded",
"multipart/form-data",
]:
data = urlparse.parse_qs(req.body)
else:
raise ValueError('Unsupported Content-Type: "%s"' % (content_type))
except Exception as e:
detail = "Failed to parse request body: %s" % six.text_type(e)
raise exc.HTTPBadRequest(detail=detail)
# Special case for Python 3
if (
six.PY3
and content_type == "text/plain"
and isinstance(data, six.binary_type)
):
# Convert bytes to text type (string / unicode)
data = data.decode("utf-8")
try:
CustomValidator(schema, resolver=self.spec_resolver).validate(data)
except (jsonschema.ValidationError, ValueError) as e:
raise exc.HTTPBadRequest(
detail=getattr(e, "message", six.text_type(e)),
comment=traceback.format_exc(),
)
if content_type == "text/plain":
kw[argument_name] = data
else:
class Body(object):
def __init__(self, **entries):
self.__dict__.update(entries)
ref = schema.get("$ref", None)
if ref:
with self.spec_resolver.resolving(ref) as resolved:
schema = resolved
if "x-api-model" in schema:
input_type = schema.get("type", [])
_, Model = op_resolver(schema["x-api-model"])
if input_type and not isinstance(input_type, (list, tuple)):
input_type = [input_type]
# root attribute is not an object, we need to use wrapper attribute to
# make it work with **kwarg expansion
if input_type and "array" in input_type:
data = {"data": data}
instance = self._get_model_instance(model_cls=Model, data=data)
# Call validate on the API model - note we should eventually move all
# those model schema definitions into openapi.yaml
try:
instance = instance.validate()
except (jsonschema.ValidationError, ValueError) as e:
raise exc.HTTPBadRequest(
detail=getattr(e, "message", six.text_type(e)),
comment=traceback.format_exc(),
)
else:
LOG.debug(
"Missing x-api-model definition for %s, using generic Body "
"model." % (endpoint["operationId"])
)
model = Body
instance = self._get_model_instance(model_cls=model, data=data)
kw[argument_name] = instance
# Making sure all required params are present
required = param.get("required", False)
if required and kw[argument_name] is None:
detail = 'Required parameter "%s" is missing' % name
raise exc.HTTPBadRequest(detail=detail)
# Validating and casting param types
param_type = param.get("type", None)
if kw[argument_name] is not None:
if param_type == "boolean":
positive = ("true", "1", "yes", "y")
negative = ("false", "0", "no", "n")
if str(kw[argument_name]).lower() not in positive + negative:
detail = 'Parameter "%s" is not of type boolean' % argument_name
raise exc.HTTPBadRequest(detail=detail)
kw[argument_name] = str(kw[argument_name]).lower() in positive
elif param_type == "integer":
regex = r"^-?[0-9]+$"
if not re.search(regex, str(kw[argument_name])):
detail = 'Parameter "%s" is not of type integer' % argument_name
raise exc.HTTPBadRequest(detail=detail)
kw[argument_name] = int(kw[argument_name])
elif param_type == "number":
regex = r"^[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?$"
if not re.search(regex, str(kw[argument_name])):
detail = 'Parameter "%s" is not of type float' % argument_name
raise exc.HTTPBadRequest(detail=detail)
kw[argument_name] = float(kw[argument_name])
elif (
param_type == "array"
and param.get("items", {}).get("type", None) == "string"
):
if kw[argument_name] is None:
kw[argument_name] = []
elif isinstance(kw[argument_name], (list, tuple)):
# argument is already an array
pass
else:
kw[argument_name] = kw[argument_name].split(",")
# Call the controller
try:
controller_instance, func = op_resolver(endpoint["operationId"])
except Exception as e:
LOG.exception(
'Failed to load controller for operation "%s": %s'
% (endpoint["operationId"], six.text_type(e))
)
raise e
try:
resp = func(**kw)
except DataStoreKeyNotFoundError as e:
LOG.warning(
'Failed to call controller function "%s" for operation "%s": %s'
% (func.__name__, endpoint["operationId"], six.text_type(e))
)
raise e
except Exception as e:
LOG.exception(
'Failed to call controller function "%s" for operation "%s": %s'
% (func.__name__, endpoint["operationId"], six.text_type(e))
)
raise e
# Handle response
if resp is None:
resp = Response()
if not hasattr(resp, "__call__"):
resp = Response(json=resp)
operation_id = endpoint["operationId"]
# Process the response removing attributes based on the exclude_attribute and
# include_attributes query param filter values (if specified)
include_attributes = kw.get("include_attributes", None)
exclude_attributes = kw.get("exclude_attributes", None)
has_include_or_exclude_attributes = bool(include_attributes) or bool(
exclude_attributes
)
# NOTE: We do NOT want to process stream controller response
is_streamming_controller = endpoint.get(
"x-is-streaming-endpoint", bool("st2stream" in operation_id)
)
if not is_streamming_controller and resp.body and has_include_or_exclude_attributes:
# NOTE: We need to check for response.body attribute since resp.json throws if JSON
# response is not available
mandatory_include_fields = getattr(
controller_instance, "mandatory_include_fields_response", []
)
data = self._process_response(
data=resp.json,
mandatory_include_fields=mandatory_include_fields,
include_attributes=include_attributes,
exclude_attributes=exclude_attributes,
)
resp.json = data
responses = endpoint.get("responses", {})
response_spec = responses.get(str(resp.status_code), None)
default_response_spec = responses.get("default", None)
if not response_spec and default_response_spec:
LOG.debug(
'No custom response spec found for endpoint "%s", using a default one'
% (endpoint["operationId"])
)
response_spec_name = "default"
else:
response_spec_name = str(resp.status_code)
response_spec = response_spec or default_response_spec
if (
response_spec
and "schema" in response_spec
and not has_include_or_exclude_attributes
):
# NOTE: We don't perform response validation when include or exclude attributes are
# provided because this means partial response which likely won't pass the validation
LOG.debug(
'Using response spec "%s" for endpoint %s and status code %s'
% (response_spec_name, endpoint["operationId"], resp.status_code)
)
try:
validator = CustomValidator(
response_spec["schema"], resolver=self.spec_resolver
)
response_type = response_spec["schema"].get("type", "json")
if response_type == "string":
validator.validate(resp.text)
else:
validator.validate(resp.json)
except (jsonschema.ValidationError, ValueError):
LOG.exception("Response validation failed.")
resp.headers.add("Warning", '199 OpenAPI "Response validation failed"')
else:
LOG.debug(
'No response spec found for endpoint "%s"' % (endpoint["operationId"])
)
if cookie_token:
resp.headerlist.append(("Set-Cookie", cookie_token))
return resp
|
def __call__(self, req):
"""
The method is invoked on every request and shows the lifecycle of the request received from
the middleware.
Although some middleware may use parts of the API spec, it is safe to assume that if you're
looking for the particular spec property handler, it's most likely a part of this method.
At the time of writing, the only property being utilized by middleware was `x-log-result`.
"""
LOG.debug("Received call with WebOb: %s", req)
endpoint, path_vars = self.match(req)
LOG.debug("Parsed endpoint: %s", endpoint)
LOG.debug("Parsed path_vars: %s", path_vars)
context = copy.copy(getattr(self, "mock_context", {}))
cookie_token = None
# Handle security
if "security" in endpoint:
security = endpoint.get("security")
else:
security = self.spec.get("security", [])
if self.auth and security:
try:
security_definitions = self.spec.get("securityDefinitions", {})
for statement in security:
declaration, options = statement.copy().popitem()
definition = security_definitions[declaration]
if definition["type"] == "apiKey":
if definition["in"] == "header":
token = req.headers.get(definition["name"])
elif definition["in"] == "query":
token = req.GET.get(definition["name"])
elif definition["in"] == "cookie":
token = req.cookies.get(definition["name"])
else:
token = None
if token:
_, auth_func = op_resolver(definition["x-operationId"])
auth_resp = auth_func(token)
# Include information on how user authenticated inside the context
if "auth-token" in definition["name"].lower():
auth_method = "authentication token"
elif "api-key" in definition["name"].lower():
auth_method = "API key"
context["user"] = User.get_by_name(auth_resp.user)
context["auth_info"] = {
"method": auth_method,
"location": definition["in"],
}
# Also include token expiration time when authenticated via auth token
if "auth-token" in definition["name"].lower():
context["auth_info"]["token_expire"] = auth_resp.expiry
if "x-set-cookie" in definition:
max_age = (
auth_resp.expiry - date_utils.get_datetime_utc_now()
)
cookie_token = cookies.make_cookie(
definition["x-set-cookie"],
token,
max_age=max_age,
httponly=True,
)
break
if "user" not in context:
raise auth_exc.NoAuthSourceProvidedError(
"One of Token or API key required."
)
except (
auth_exc.NoAuthSourceProvidedError,
auth_exc.MultipleAuthSourcesError,
) as e:
LOG.error(six.text_type(e))
return abort_unauthorized(six.text_type(e))
except auth_exc.TokenNotProvidedError as e:
LOG.exception("Token is not provided.")
return abort_unauthorized(six.text_type(e))
except auth_exc.TokenNotFoundError as e:
LOG.exception("Token is not found.")
return abort_unauthorized(six.text_type(e))
except auth_exc.TokenExpiredError as e:
LOG.exception("Token has expired.")
return abort_unauthorized(six.text_type(e))
except auth_exc.ApiKeyNotProvidedError as e:
LOG.exception("API key is not provided.")
return abort_unauthorized(six.text_type(e))
except auth_exc.ApiKeyNotFoundError as e:
LOG.exception("API key is not found.")
return abort_unauthorized(six.text_type(e))
except auth_exc.ApiKeyDisabledError as e:
LOG.exception("API key is disabled.")
return abort_unauthorized(six.text_type(e))
if cfg.CONF.rbac.enable:
user_db = context["user"]
permission_type = endpoint.get("x-permissions", None)
if permission_type:
rbac_backend = get_rbac_backend()
resolver = rbac_backend.get_resolver_for_permission_type(
permission_type
)
has_permission = resolver.user_has_permission(user_db, permission_type)
if not has_permission:
raise rbac_exc.ResourceTypeAccessDeniedError(
user_db, permission_type
)
# Collect parameters
kw = {}
for param in endpoint.get("parameters", []) + endpoint.get("x-parameters", []):
name = param["name"]
argument_name = param.get("x-as", None) or name
source = param["in"]
default = param.get("default", None)
# Collecting params from different sources
if source == "query":
kw[argument_name] = req.GET.get(name, default)
elif source == "path":
kw[argument_name] = path_vars[name]
elif source == "header":
kw[argument_name] = req.headers.get(name, default)
elif source == "formData":
kw[argument_name] = req.POST.get(name, default)
elif source == "environ":
kw[argument_name] = req.environ.get(name.upper(), default)
elif source == "context":
kw[argument_name] = context.get(name, default)
elif source == "request":
kw[argument_name] = getattr(req, name)
elif source == "body":
content_type = req.headers.get("Content-Type", "application/json")
content_type = parse_content_type_header(content_type=content_type)[0]
schema = param["schema"]
# NOTE: HACK: Workaround for eventlet wsgi server which sets Content-Type to
# text/plain if Content-Type is not provided in the request.
# All ouf our API endpoints except /v1/workflows/inspection and
# /exp/validation/mistral expect application/json so we explicitly set it to that
# if not provided (set to text/plain by the base http server) and if it's not
# /v1/workflows/inspection and /exp/validation/mistral API endpoints.
if not self.is_gunicorn and content_type == "text/plain":
operation_id = endpoint["operationId"]
if (
"workflow_inspection_controller" not in operation_id
and "mistral_validation_controller" not in operation_id
):
content_type = "application/json"
# Note: We also want to perform validation if no body is explicitly provided - in a
# lot of POST, PUT scenarios, body is mandatory
if not req.body and content_type == "application/json":
req.body = b"{}"
try:
if content_type == "application/json":
data = req.json
elif content_type == "text/plain":
data = req.body
elif content_type in [
"application/x-www-form-urlencoded",
"multipart/form-data",
]:
data = urlparse.parse_qs(req.body)
else:
raise ValueError('Unsupported Content-Type: "%s"' % (content_type))
except Exception as e:
detail = "Failed to parse request body: %s" % six.text_type(e)
raise exc.HTTPBadRequest(detail=detail)
# Special case for Python 3
if (
six.PY3
and content_type == "text/plain"
and isinstance(data, six.binary_type)
):
# Convert bytes to text type (string / unicode)
data = data.decode("utf-8")
try:
CustomValidator(schema, resolver=self.spec_resolver).validate(data)
except (jsonschema.ValidationError, ValueError) as e:
raise exc.HTTPBadRequest(
detail=getattr(e, "message", six.text_type(e)),
comment=traceback.format_exc(),
)
if content_type == "text/plain":
kw[argument_name] = data
else:
class Body(object):
def __init__(self, **entries):
self.__dict__.update(entries)
ref = schema.get("$ref", None)
if ref:
with self.spec_resolver.resolving(ref) as resolved:
schema = resolved
if "x-api-model" in schema:
input_type = schema.get("type", [])
_, Model = op_resolver(schema["x-api-model"])
if input_type and not isinstance(input_type, (list, tuple)):
input_type = [input_type]
# root attribute is not an object, we need to use wrapper attribute to
# make it work with **kwarg expansion
if input_type and "array" in input_type:
data = {"data": data}
instance = self._get_model_instance(model_cls=Model, data=data)
# Call validate on the API model - note we should eventually move all
# those model schema definitions into openapi.yaml
try:
instance = instance.validate()
except (jsonschema.ValidationError, ValueError) as e:
raise exc.HTTPBadRequest(
detail=getattr(e, "message", six.text_type(e)),
comment=traceback.format_exc(),
)
else:
LOG.debug(
"Missing x-api-model definition for %s, using generic Body "
"model." % (endpoint["operationId"])
)
model = Body
instance = self._get_model_instance(model_cls=model, data=data)
kw[argument_name] = instance
# Making sure all required params are present
required = param.get("required", False)
if required and kw[argument_name] is None:
detail = 'Required parameter "%s" is missing' % name
raise exc.HTTPBadRequest(detail=detail)
# Validating and casting param types
param_type = param.get("type", None)
if kw[argument_name] is not None:
if param_type == "boolean":
positive = ("true", "1", "yes", "y")
negative = ("false", "0", "no", "n")
if str(kw[argument_name]).lower() not in positive + negative:
detail = 'Parameter "%s" is not of type boolean' % argument_name
raise exc.HTTPBadRequest(detail=detail)
kw[argument_name] = str(kw[argument_name]).lower() in positive
elif param_type == "integer":
regex = r"^-?[0-9]+$"
if not re.search(regex, str(kw[argument_name])):
detail = 'Parameter "%s" is not of type integer' % argument_name
raise exc.HTTPBadRequest(detail=detail)
kw[argument_name] = int(kw[argument_name])
elif param_type == "number":
regex = r"^[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?$"
if not re.search(regex, str(kw[argument_name])):
detail = 'Parameter "%s" is not of type float' % argument_name
raise exc.HTTPBadRequest(detail=detail)
kw[argument_name] = float(kw[argument_name])
elif (
param_type == "array"
and param.get("items", {}).get("type", None) == "string"
):
if kw[argument_name] is None:
kw[argument_name] = []
elif isinstance(kw[argument_name], (list, tuple)):
# argument is already an array
pass
else:
kw[argument_name] = kw[argument_name].split(",")
# Call the controller
try:
controller_instance, func = op_resolver(endpoint["operationId"])
except Exception as e:
LOG.exception(
'Failed to load controller for operation "%s": %s'
% (endpoint["operationId"], six.text_type(e))
)
raise e
try:
resp = func(**kw)
except Exception as e:
LOG.exception(
'Failed to call controller function "%s" for operation "%s": %s'
% (func.__name__, endpoint["operationId"], six.text_type(e))
)
raise e
# Handle response
if resp is None:
resp = Response()
if not hasattr(resp, "__call__"):
resp = Response(json=resp)
operation_id = endpoint["operationId"]
# Process the response removing attributes based on the exclude_attribute and
# include_attributes query param filter values (if specified)
include_attributes = kw.get("include_attributes", None)
exclude_attributes = kw.get("exclude_attributes", None)
has_include_or_exclude_attributes = bool(include_attributes) or bool(
exclude_attributes
)
# NOTE: We do NOT want to process stream controller response
is_streamming_controller = endpoint.get(
"x-is-streaming-endpoint", bool("st2stream" in operation_id)
)
if not is_streamming_controller and resp.body and has_include_or_exclude_attributes:
# NOTE: We need to check for response.body attribute since resp.json throws if JSON
# response is not available
mandatory_include_fields = getattr(
controller_instance, "mandatory_include_fields_response", []
)
data = self._process_response(
data=resp.json,
mandatory_include_fields=mandatory_include_fields,
include_attributes=include_attributes,
exclude_attributes=exclude_attributes,
)
resp.json = data
responses = endpoint.get("responses", {})
response_spec = responses.get(str(resp.status_code), None)
default_response_spec = responses.get("default", None)
if not response_spec and default_response_spec:
LOG.debug(
'No custom response spec found for endpoint "%s", using a default one'
% (endpoint["operationId"])
)
response_spec_name = "default"
else:
response_spec_name = str(resp.status_code)
response_spec = response_spec or default_response_spec
if (
response_spec
and "schema" in response_spec
and not has_include_or_exclude_attributes
):
# NOTE: We don't perform response validation when include or exclude attributes are
# provided because this means partial response which likely won't pass the validation
LOG.debug(
'Using response spec "%s" for endpoint %s and status code %s'
% (response_spec_name, endpoint["operationId"], resp.status_code)
)
try:
validator = CustomValidator(
response_spec["schema"], resolver=self.spec_resolver
)
response_type = response_spec["schema"].get("type", "json")
if response_type == "string":
validator.validate(resp.text)
else:
validator.validate(resp.json)
except (jsonschema.ValidationError, ValueError):
LOG.exception("Response validation failed.")
resp.headers.add("Warning", '199 OpenAPI "Response validation failed"')
else:
LOG.debug(
'No response spec found for endpoint "%s"' % (endpoint["operationId"])
)
if cookie_token:
resp.headerlist.append(("Set-Cookie", cookie_token))
return resp
|
https://github.com/StackStorm/st2/issues/4979
|
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: 2020-06-26 18:27:35,134 ERROR [-] Failed to call controller function "get_one" for operation "st2api.controllers.v1.keyvalue:key_value_pair_controller.get_one": The key "aws_inspector.AWSInspector:us-east-2.last_start_time_" does not exist in the StackStorm datastore.
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: Traceback (most recent call last):
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2common/router.py", line 515, in __call__
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: resp = func(**kw)
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2api/controllers/v1/keyvalue.py", line 124, in get_one
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: from_model_kwargs=from_model_kwargs
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2api/controllers/resource.py", line 402, in _get_one_by_scope_and_name
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: instance = self.access.get_by_scope_and_name(scope=scope, name=name)
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2common/persistence/keyvalue.py", line 115, in get_by_scope_and_name
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: raise StackStormDBObjectNotFoundError(msg % name)
Jun 26 18:27:35 ip-10-15-1-121 gunicorn[184385]: st2common.exceptions.db.StackStormDBObjectNotFoundError: The key "aws_inspector.AWSInspector:us-east-2.last_start_time_" does not exist in the StackStorm datastore.sod
|
StackStormDBObjectNotFoundError
|
def to_serializable_dict(self, mask_secrets=False):
"""
Serialize database model to a dictionary.
:param mask_secrets: True to mask secrets in the resulting dict.
:type mask_secrets: ``boolean``
:rtype: ``dict``
"""
serializable_dict = {}
for k in sorted(six.iterkeys(self._fields)):
v = getattr(self, k)
if isinstance(v, JSON_UNFRIENDLY_TYPES):
v = str(v)
elif isinstance(v, me.EmbeddedDocument):
v = json.loads(v.to_json())
serializable_dict[k] = v
if mask_secrets and cfg.CONF.log.mask_secrets:
serializable_dict = self.mask_secrets(value=serializable_dict)
return serializable_dict
|
def to_serializable_dict(self, mask_secrets=False):
"""
Serialize database model to a dictionary.
:param mask_secrets: True to mask secrets in the resulting dict.
:type mask_secrets: ``boolean``
:rtype: ``dict``
"""
serializable_dict = {}
for k in sorted(six.iterkeys(self._fields)):
v = getattr(self, k)
v = str(v) if isinstance(v, JSON_UNFRIENDLY_TYPES) else v
serializable_dict[k] = v
if mask_secrets and cfg.CONF.log.mask_secrets:
serializable_dict = self.mask_secrets(value=serializable_dict)
return serializable_dict
|
https://github.com/StackStorm/st2/issues/4934
|
2020-05-04 14:18:49,473 140709056184880 INFO engine [-] Found 1 rules defined for trigger core.38d1bbdd-a659-4038-b3f4-ce250eb79db8
2020-05-04 14:18:49,475 140709056184880 ERROR traceback [-] Traceback (most recent call last):
2020-05-04 14:18:49,476 140709056184880 ERROR traceback [-] File "/usr/lib64/python2.7/logging/__init__.py", line 851, in emit
2020-05-04 14:18:49,476 140709056184880 ERROR traceback [-] msg = self.format(record)
2020-05-04 14:18:49,477 140709056184880 ERROR traceback [-] File "/usr/lib64/python2.7/logging/__init__.py", line 724, in format
2020-05-04 14:18:49,477 140709056184880 ERROR traceback [-] return fmt.format(record)
2020-05-04 14:18:49,478 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/logging/formatters.py", line 176, in format
2020-05-04 14:18:49,478 140709056184880 ERROR traceback [-] attributes = self._format_extra_attributes(attributes=attributes)
2020-05-04 14:18:49,479 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/logging/formatters.py", line 151, in _format_extra_attributes
2020-05-04 14:18:49,480 140709056184880 ERROR traceback [-] value = serialize_object(obj=value)
2020-05-04 14:18:49,480 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/logging/formatters.py", line 65, in serialize_object
2020-05-04 14:18:49,481 140709056184880 ERROR traceback [-] value = obj.to_serializable_dict(mask_secrets=True)
2020-05-04 14:18:49,481 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/models/db/stormbase.py", line 105, in to_serializable_dict
2020-05-04 14:18:49,482 140709056184880 ERROR traceback [-] serializable_dict = self.mask_secrets(value=serializable_dict)
2020-05-04 14:18:49,482 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/models/db/rule.py", line 122, in mask_secrets
2020-05-04 14:18:49,483 140709056184880 ERROR traceback [-] action_ref = result.get('action', {}).get('ref', None)
2020-05-04 14:18:49,484 140709056184880 ERROR traceback [-] AttributeError: 'str' object has no attribute 'get'
2020-05-04 14:18:49,484 140709056184880 ERROR filter [-] Logged from file filter.py, line 72
2020-05-04 14:18:49,486 140709056184880 ERROR traceback [-] Traceback (most recent call last):
2020-05-04 14:18:49,487 140709056184880 ERROR traceback [-] File "/usr/lib64/python2.7/logging/__init__.py", line 851, in emit
2020-05-04 14:18:49,488 140709056184880 ERROR traceback [-] msg = self.format(record)
2020-05-04 14:18:49,488 140709056184880 ERROR traceback [-] File "/usr/lib64/python2.7/logging/__init__.py", line 724, in format
2020-05-04 14:18:49,489 140709056184880 ERROR traceback [-] return fmt.format(record)
2020-05-04 14:18:49,489 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/logging/formatters.py", line 176, in format
2020-05-04 14:18:49,490 140709056184880 ERROR traceback [-] attributes = self._format_extra_attributes(attributes=attributes)
2020-05-04 14:18:49,490 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/logging/formatters.py", line 151, in _format_extra_attributes
2020-05-04 14:18:49,491 140709056184880 ERROR traceback [-] value = serialize_object(obj=value)
2020-05-04 14:18:49,491 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/logging/formatters.py", line 65, in serialize_object
2020-05-04 14:18:49,492 140709056184880 ERROR traceback [-] value = obj.to_serializable_dict(mask_secrets=True)
2020-05-04 14:18:49,493 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/models/db/stormbase.py", line 105, in to_serializable_dict
2020-05-04 14:18:49,493 140709056184880 ERROR traceback [-] serializable_dict = self.mask_secrets(value=serializable_dict)
2020-05-04 14:18:49,494 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/models/db/rule.py", line 122, in mask_secrets
2020-05-04 14:18:49,494 140709056184880 ERROR traceback [-] action_ref = result.get('action', {}).get('ref', None)
2020-05-04 14:18:49,495 140709056184880 ERROR traceback [-] AttributeError: 'str' object has no attribute 'get'
2020-05-04 14:18:49,495 140709056184880 ERROR handlers [-] Logged from file filter.py, line 72
2020-05-04 14:18:49,497 140709056184880 ERROR traceback [-] Traceback (most recent call last):
2020-05-04 14:18:49,498 140709056184880 ERROR traceback [-] File "/usr/lib64/python2.7/logging/__init__.py", line 851, in emit
2020-05-04 14:18:49,499 140709056184880 ERROR traceback [-] msg = self.format(record)
2020-05-04 14:18:49,499 140709056184880 ERROR traceback [-] File "/usr/lib64/python2.7/logging/__init__.py", line 724, in format
2020-05-04 14:18:49,500 140709056184880 ERROR traceback [-] return fmt.format(record)
2020-05-04 14:18:49,500 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/logging/formatters.py", line 241, in format
2020-05-04 14:18:49,501 140709056184880 ERROR traceback [-] attributes = self._format_extra_attributes(attributes=attributes)
2020-05-04 14:18:49,501 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/logging/formatters.py", line 151, in _format_extra_attributes
2020-05-04 14:18:49,502 140709056184880 ERROR traceback [-] value = serialize_object(obj=value)
2020-05-04 14:18:49,503 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/logging/formatters.py", line 65, in serialize_object
2020-05-04 14:18:49,503 140709056184880 ERROR traceback [-] value = obj.to_serializable_dict(mask_secrets=True)
2020-05-04 14:18:49,504 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/models/db/stormbase.py", line 105, in to_serializable_dict
2020-05-04 14:18:49,504 140709056184880 ERROR traceback [-] serializable_dict = self.mask_secrets(value=serializable_dict)
2020-05-04 14:18:49,505 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/models/db/rule.py", line 122, in mask_secrets
2020-05-04 14:18:49,505 140709056184880 ERROR traceback [-] action_ref = result.get('action', {}).get('ref', None)
2020-05-04 14:18:49,506 140709056184880 ERROR traceback [-] AttributeError: 'str' object has no attribute 'get'
2020-05-04 14:18:49,506 140709056184880 ERROR handlers [-] Logged from file filter.py, line 72
2020-05-04 14:18:49,508 140709056184880 INFO matcher [-] 1 rule(s) found to enforce for 38d1bbdd-a659-4038-b3f4-ce250eb79db8.
2020-05-04 14:18:49,508 140709056184880 INFO engine [-] Matched 1 rule(s) for trigger_instance 5eb05c896077d5bca5610507 (trigger=core.38d1bbdd-a659-4038-b3f4-ce250eb79db8)
2020-05-04 14:18:49,524 140709056184880 INFO enforcer [-] Invoking action core.local for trigger_instance 5eb05c896077d5bca5610507 with params {"cmd": "echo \"{{trigger.executed_at}}\""}.
2020-05-04 14:18:49,739 140709056184880 ERROR traceback [-] Traceback (most recent call last):
2020-05-04 14:18:49,740 140709056184880 ERROR traceback [-] File "/usr/lib64/python2.7/logging/__init__.py", line 851, in emit
2020-05-04 14:18:49,740 140709056184880 ERROR traceback [-] msg = self.format(record)
2020-05-04 14:18:49,741 140709056184880 ERROR traceback [-] File "/usr/lib64/python2.7/logging/__init__.py", line 724, in format
2020-05-04 14:18:49,741 140709056184880 ERROR traceback [-] return fmt.format(record)
2020-05-04 14:18:49,742 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/logging/formatters.py", line 241, in format
2020-05-04 14:18:49,743 140709056184880 ERROR traceback [-] attributes = self._format_extra_attributes(attributes=attributes)
2020-05-04 14:18:49,743 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/logging/formatters.py", line 151, in _format_extra_attributes
2020-05-04 14:18:49,744 140709056184880 ERROR traceback [-] value = serialize_object(obj=value)
2020-05-04 14:18:49,744 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/logging/formatters.py", line 65, in serialize_object
2020-05-04 14:18:49,745 140709056184880 ERROR traceback [-] value = obj.to_serializable_dict(mask_secrets=True)
2020-05-04 14:18:49,745 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/models/db/stormbase.py", line 105, in to_serializable_dict
2020-05-04 14:18:49,746 140709056184880 ERROR traceback [-] serializable_dict = self.mask_secrets(value=serializable_dict)
2020-05-04 14:18:49,748 140709056184880 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/models/db/rule.py", line 122, in mask_secrets
2020-05-04 14:18:49,748 140709056184880 ERROR traceback [-] action_ref = result.get('action', {}).get('ref', None)
2020-05-04 14:18:49,749 140709056184880 ERROR traceback [-] AttributeError: 'str' object has no attribute 'get'
2020-05-04 14:18:49,750 140709056184880 ERROR handlers [-] Logged from file enforcer.py, line 118
2020-05-04 14:18:50,419 140709057006000 INFO engine [-] Found 0 rules defined for trigger core.st2.generic.actiontrigger
2020-05-04 14:18:50,424 140709057006000 INFO engine [-] No matching rules found for trigger instance 5eb05c8a6077d5bca561050c.
|
AttributeError
|
def close(self):
if self.socket:
self.socket.close()
if self.client:
self.client.close()
if self.sftp_client:
self.sftp_client.close()
if self.bastion_socket:
self.bastion_socket.close()
if self.bastion_client:
self.bastion_client.close()
return True
|
def close(self):
self.logger.debug("Closing server connection")
self.client.close()
if self.socket:
self.logger.debug("Closing proxycommand socket connection")
# https://github.com/paramiko/paramiko/issues/789 Avoid zombie ssh processes
self.socket.process.kill()
self.socket.process.poll()
if self.sftp_client:
self.sftp_client.close()
if self.bastion_client:
self.bastion_client.close()
return True
|
https://github.com/StackStorm/st2/issues/4973
|
st2actionrunner-59c47ddcc8-9jjbl r1 2020-06-25 15:30:17,009 ERROR [-] Failed shutting down SSH connection to host: rndcas402
st2actionrunner-59c47ddcc8-9jjbl r1 Traceback (most recent call last):
st2actionrunner-59c47ddcc8-9jjbl r1 File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/runners/parallel_ssh.py", line 218, in close
st2actionrunner-59c47ddcc8-9jjbl r1 self._hosts_client[host].close()
st2actionrunner-59c47ddcc8-9jjbl r1 File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/runners/paramiko_ssh.py", line 462, in close
st2actionrunner-59c47ddcc8-9jjbl r1 self.socket.process.kill()
st2actionrunner-59c47ddcc8-9jjbl r1 AttributeError: 'Channel' object has no attribute 'process'
|
AttributeError
|
def process(self, message):
handler_function = self.message_types.get(type(message), None)
if not handler_function:
msg = 'Handler function for message type "%s" is not defined.' % type(message)
raise ValueError(msg)
try:
handler_function(message)
except Exception as e:
# If the exception is caused by DB connection error, then the following
# error handling routine will fail as well because it will try to update
# the database and fail the workflow execution gracefully. In this case,
# the garbage collector will find and cancel these workflow executions.
self.fail_workflow_execution(message, e)
|
def process(self, message):
handler_function = self.message_types.get(type(message), None)
if not handler_function:
msg = 'Handler function for message type "%s" is not defined.' % type(message)
raise ValueError(msg)
handler_function(message)
|
https://github.com/StackStorm/st2/issues/4704
|
2019-06-04 17:08:05,405 139749901439856 ERROR consumers [-] VariableMessageQueueConsumer failed to process message: ActionExecutionDB(action={u'notify': {}, u'description': u'This executes a task in the net-task library', u'runner_type': u'python-script', u'tags': [], u'enabled': True, u'name': u'net-task', u'entry_point': u'python/net-task.py', u'metadata_file': u'actions/net-task.yaml', u'output_schema': {}, u'uid': u'action:shigo:net-task', u'parameters': {u'args': {u'position': 1, u'required': False, u'type': u'array', u'description': u'Args to be passed to the task'}, u'task_name': {u'position': 0, u'required': True, u'type': u'string', u'description': u'The name of the task'}, u'kwargs': {u'position': 2, u'required': False, u'type': u'object', u'description': u'Kwargs to be passed to the task'}}, u'ref': u'shigo.net-task', u'id': u'5c1af2cedb95700082de9860', u'pack': u'shigo'}, children=[], context={u'orquesta': {u'workflow_execution_id': u'5cf6a55f5f85230022d4bf06', u'task_id': u'get_nettask_version', u'task_execution_id': u'5cf6a563d0fa0000229108c0', u'task_name': u'get_nettask_version', u'task_route': 0}, u'user': u'st2admin', u'parent': {u'user': u'st2admin', u'execution_id': u'5cf6a55ea036ab0033dc934c', u'pack': u'shigo'}, u'pack': u'shigo'}, delay=None, end_timestamp="2019-06-04 17:08:05.182673+00:00", id=5cf6a563d0fa0000229108c2, liveaction={u'callback': {}, u'runner_info': {u'hostname': u'4051aa3ae5dd', u'pid': 34}, u'parameters': {u'task_name': u'net_task.version'}, u'action': u'shigo.net-task', u'action_is_workflow': False, u'id': u'5cf6a563d0fa0000229108c1'}, log=[{u'status': u'requested', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 47, 469000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'scheduled', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 199000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'running', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 332000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'succeeded', u'timestamp': datetime.datetime(2019, 6, 4, 17, 8, 5, 217000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}], parameters={u'task_name': u'net_task.version'}, parent="5cf6a55ea036ab0033dc934c", result={u'result': {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'optical-tech': u'0.6.4', u'network-state-service': u'0.4.3', u'net-devices2': u'1.4.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'device_grouping': u'0.0.2', u'starlab': u'0.1.60', u'wan-tech': u'0.7.7', u'icm-connector': u'0.8.12', u'azconfigure-tasks': u'1.0.13', u'celery': u'4.2.1', u'quality-checks': u'0.1.22', u'atom': u'0.9.44', u'net-task-contrib': u'0.4.72', u'net-assist': u'0.1.10', u'fabric-tools': u'0.1.6', u'ask-mode-service': u'0.3.2', u'safety-check-service': u'0.2.8', u'apache-airflow': u'2.0.0dev0+3.incubating', u'net-task': u'0.13.0'}}, u'exit_code': 0, u'stderr': u"st2.actions.python.NetTaskWrapper: DEBUG json data: {}\nst2.actions.python.NetTaskWrapper: INFO NetTask UUID: 8726afab-915d-4f97-8205-5c7f73ffa4ae\nst2.actions.python.NetTaskWrapper: INFO task net_task.version completed, result is {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'net-task': u'0.13.0', u'device_grouping': u'0.0.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'quality-checks': u'0.1.22', u'azconfigure-tasks': u'1.0.13', u'starlab': u'0.1.60', u'ask-mode-service': u'0.3.2', u'icm-connector': u'0.8.12', u'apache-airflow': u'2.0.0dev0+3.incubating', u'celery': u'4.2.1', u'net-task-contrib': u'0.4.72', u'network-state-service': u'0.4.3', u'wan-tech': u'0.7.7', u'net-assist': u'0.1.10', u'atom': u'0.9.44', u'safety-check-service': u'0.2.8', u'fabric-tools': u'0.1.6', u'net-devices2': u'1.4.2', u'optical-tech': u'0.6.4'}}\nst2.actions.python.NetTaskWrapper: WARNING task did not return data in (status, result) format, as expected by stackstorm\n", u'stdout': u''}, rule={}, runner={u'runner_module': u'python_runner', u'uid': u'runner_type:python-script', u'runner_package': u'python_runner', u'enabled': True, u'name': u'python-script', u'output_key': u'result', u'output_schema': {u'exit_code': {u'required': True, u'type': u'integer'}, u'result': {u'anyOf': [{u'type': u'object'}, {u'type': u'string'}, {u'type': u'integer'}, {u'type': u'number'}, {u'type': u'boolean'}, {u'type': u'array'}, {u'type': u'null'}]}, u'stderr': {u'required': True, u'type': u'string'}, u'stdout': {u'required': True, u'type': u'string'}}, u'runner_parameters': {u'debug': {u'default': False, u'required': False, u'type': u'boolean', u'description': u'Enable runner debug mode.'}, u'content_version': {u'required': False, u'type': u'string', u'description': u'Git revision of the pack content to use for this action execution (git commit sha / tag / branch). Only applies to packs which are git repositories.'}, u'log_level': {u'default': u'DEBUG', u'enum': [u'AUDIT', u'CRITICAL', u'ERROR', u'WARNING', u'INFO', u'DEBUG'], u'type': u'string', u'description': u'Default log level for Python runner actions.'}, u'timeout': {u'default': 600, u'type': u'integer', u'description': u"Action timeout in seconds. Action will get killed if it doesn't finish in timeout seconds."}, u'env': {u'type': u'object', u'description': u'Environment variables which will be available to the script.'}}, u'id': u'5c1af28f26395d0024fc2741', u'description': u'A runner for launching python actions.'}, start_timestamp="2019-06-04 17:07:47.443217+00:00", status="succeeded", task_execution="5cf6a563d0fa0000229108c0", trigger={}, trigger_instance={}, trigger_type={}, web_url="https://befdd34ce4a4/#/history/5cf6a563d0fa0000229108c2/general", workflow_execution="5cf6a55f5f85230022d4bf06")
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/transport/consumers.py", line 72, in _process_message
self._handler.process(body)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 83, in process
handler_function(message)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 61, in handle_action_execution_with_instrumentation
return self.handle_action_execution(ac_ex_db=ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 134, in handle_action_execution
wf_svc.handle_action_execution_completion(ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/services/workflows.py", line 733, in handle_action_execution_completion
with coord_svc.get_coordinator(start_heart=True).get_lock(wf_ex_id):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/locking.py", line 52, in __enter__
acquired = self.acquire(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 93, in acquire
return acquired
File "/usr/lib/python2.7/contextlib.py", line 35, in __exit__
self.gen.throw(type, value, traceback)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 51, in _translate_failures
cause=e)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/utils.py", line 225, in raise_with_cause
excutils.raise_with_cause(exc_cls, message, *args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/oslo_utils/excutils.py", line 143, in raise_with_cause
six.raise_from(exc_cls(message, *args, **kwargs), kwargs.get('cause'))
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/six.py", line 737, in raise_from
raise value
ToozConnectionError: Error while reading from socket: (u'Connection closed by server.',)
|
ToozConnectionError
|
def register_opts(ignore_errors=False):
rbac_opts = [
cfg.BoolOpt("enable", default=False, help="Enable RBAC."),
cfg.StrOpt("backend", default="noop", help="RBAC backend to use."),
cfg.BoolOpt(
"sync_remote_groups",
default=False,
help="True to synchronize remote groups returned by the auth backed for each "
"StackStorm user with local StackStorm roles based on the group to role "
"mapping definition files.",
),
cfg.BoolOpt(
"permission_isolation",
default=False,
help="Isolate resources by user. For now, these resources only include rules and "
"executions. All resources can only be viewed or executed by the owning user "
"except the admin and system_user who can view or run everything.",
),
]
do_register_opts(rbac_opts, "rbac", ignore_errors)
system_user_opts = [
cfg.StrOpt("user", default="stanley", help="Default system user."),
cfg.StrOpt(
"ssh_key_file",
default="/home/stanley/.ssh/stanley_rsa",
help="SSH private key for the system user.",
),
]
do_register_opts(system_user_opts, "system_user", ignore_errors)
schema_opts = [
cfg.IntOpt("version", default=4, help="Version of JSON schema to use."),
cfg.StrOpt(
"draft",
default="http://json-schema.org/draft-04/schema#",
help="URL to the JSON schema draft.",
),
]
do_register_opts(schema_opts, "schema", ignore_errors)
system_opts = [
cfg.BoolOpt("debug", default=False, help="Enable debug mode."),
cfg.StrOpt(
"base_path",
default="/opt/stackstorm",
help="Base path to all st2 artifacts.",
),
cfg.BoolOpt(
"validate_trigger_parameters",
default=True,
help="True to validate parameters for non-system trigger types when creating"
"a rule. By default, only parameters for system triggers are validated.",
),
cfg.BoolOpt(
"validate_trigger_payload",
default=True,
help="True to validate payload for non-system trigger types when dispatching a trigger "
"inside the sensor. By default, only payload for system triggers is validated.",
),
cfg.BoolOpt(
"validate_output_schema",
default=False,
help="True to validate action and runner output against schema.",
),
]
do_register_opts(system_opts, "system", ignore_errors)
system_packs_base_path = os.path.join(cfg.CONF.system.base_path, "packs")
system_runners_base_path = os.path.join(cfg.CONF.system.base_path, "runners")
content_opts = [
cfg.StrOpt(
"pack_group",
default="st2packs",
help="User group that can write to packs directory.",
),
cfg.StrOpt(
"system_packs_base_path",
default=system_packs_base_path,
help="Path to the directory which contains system packs.",
),
cfg.StrOpt(
"system_runners_base_path",
default=system_runners_base_path,
help="Path to the directory which contains system runners. "
"NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0",
),
cfg.StrOpt(
"packs_base_paths",
default=None,
help="Paths which will be searched for integration packs.",
),
cfg.StrOpt(
"runners_base_paths",
default=None,
help="Paths which will be searched for runners. "
"NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0",
),
cfg.ListOpt(
"index_url",
default=["https://index.stackstorm.org/v1/index.json"],
help="A URL pointing to the pack index. StackStorm Exchange is used by "
"default. Use a comma-separated list for multiple indexes if you "
'want to get other packs discovered with "st2 pack search".',
),
]
do_register_opts(content_opts, "content", ignore_errors)
webui_opts = [
cfg.StrOpt(
"webui_base_url",
default="https://%s" % socket.getfqdn(),
help="Base https URL to access st2 Web UI. This is used to construct history URLs "
"that are sent out when chatops is used to kick off executions.",
)
]
do_register_opts(webui_opts, "webui", ignore_errors)
db_opts = [
cfg.StrOpt("host", default="127.0.0.1", help="host of db server"),
cfg.IntOpt("port", default=27017, help="port of db server"),
cfg.StrOpt("db_name", default="st2", help="name of database"),
cfg.StrOpt("username", help="username for db login"),
cfg.StrOpt("password", help="password for db login"),
cfg.IntOpt(
"connection_retry_max_delay_m",
default=3,
help="Connection retry total time (minutes).",
),
cfg.IntOpt(
"connection_retry_backoff_max_s",
default=10,
help="Connection retry backoff max (seconds).",
),
cfg.IntOpt(
"connection_retry_backoff_mul",
default=1,
help="Backoff multiplier (seconds).",
),
cfg.BoolOpt(
"ssl", default=False, help="Create the connection to mongodb using SSL"
),
cfg.StrOpt(
"ssl_keyfile",
default=None,
help="Private keyfile used to identify the local connection against MongoDB.",
),
cfg.StrOpt(
"ssl_certfile",
default=None,
help="Certificate file used to identify the localconnection",
),
cfg.StrOpt(
"ssl_cert_reqs",
default=None,
choices="none, optional, required",
help="Specifies whether a certificate is required from the other side of the "
"connection, and whether it will be validated if provided",
),
cfg.StrOpt(
"ssl_ca_certs",
default=None,
help="ca_certs file contains a set of concatenated CA certificates, which are "
"used to validate certificates passed from MongoDB.",
),
cfg.BoolOpt(
"ssl_match_hostname",
default=True,
help="If True and `ssl_cert_reqs` is not None, enables hostname verification",
),
cfg.StrOpt(
"authentication_mechanism",
default=None,
help="Specifies database authentication mechanisms. "
"By default, it use SCRAM-SHA-1 with MongoDB 3.0 and later, "
"MONGODB-CR (MongoDB Challenge Response protocol) for older servers.",
),
]
do_register_opts(db_opts, "database", ignore_errors)
messaging_opts = [
# It would be nice to be able to deprecate url and completely switch to using
# url. However, this will be a breaking change and will have impact so allowing both.
cfg.StrOpt(
"url",
default="amqp://guest:guest@127.0.0.1:5672//",
help="URL of the messaging server.",
),
cfg.ListOpt(
"cluster_urls",
default=[],
help="URL of all the nodes in a messaging service cluster.",
),
cfg.IntOpt(
"connection_retries",
default=10,
help="How many times should we retry connection before failing.",
),
cfg.IntOpt(
"connection_retry_wait",
default=10000,
help="How long should we wait between connection retries.",
),
cfg.BoolOpt(
"ssl",
default=False,
help="Use SSL / TLS to connect to the messaging server. Same as "
'appending "?ssl=true" at the end of the connection URL string.',
),
cfg.StrOpt(
"ssl_keyfile",
default=None,
help="Private keyfile used to identify the local connection against RabbitMQ.",
),
cfg.StrOpt(
"ssl_certfile",
default=None,
help="Certificate file used to identify the local connection (client).",
),
cfg.StrOpt(
"ssl_cert_reqs",
default=None,
choices="none, optional, required",
help="Specifies whether a certificate is required from the other side of the "
"connection, and whether it will be validated if provided.",
),
cfg.StrOpt(
"ssl_ca_certs",
default=None,
help="ca_certs file contains a set of concatenated CA certificates, which are "
"used to validate certificates passed from RabbitMQ.",
),
cfg.StrOpt(
"login_method",
default=None,
help="Login method to use (AMQPLAIN, PLAIN, EXTERNAL, etc.).",
),
]
do_register_opts(messaging_opts, "messaging", ignore_errors)
syslog_opts = [
cfg.StrOpt("host", default="127.0.0.1", help="Host for the syslog server."),
cfg.IntOpt("port", default=514, help="Port for the syslog server."),
cfg.StrOpt("facility", default="local7", help="Syslog facility level."),
cfg.StrOpt(
"protocol", default="udp", help="Transport protocol to use (udp / tcp)."
),
]
do_register_opts(syslog_opts, "syslog", ignore_errors)
log_opts = [
cfg.ListOpt("excludes", default="", help="Exclusion list of loggers to omit."),
cfg.BoolOpt(
"redirect_stderr",
default=False,
help="Controls if stderr should be redirected to the logs.",
),
cfg.BoolOpt(
"mask_secrets", default=True, help="True to mask secrets in the log files."
),
cfg.ListOpt(
"mask_secrets_blacklist",
default=[],
help="Blacklist of additional attribute names to mask in the log messages.",
),
]
do_register_opts(log_opts, "log", ignore_errors)
# Common API options
api_opts = [
cfg.StrOpt("host", default="127.0.0.1", help="StackStorm API server host"),
cfg.IntOpt("port", default=9101, help="StackStorm API server port"),
cfg.ListOpt(
"allow_origin",
default=["http://127.0.0.1:3000"],
help="List of origins allowed for api, auth and stream",
),
cfg.BoolOpt(
"mask_secrets",
default=True,
help="True to mask secrets in the API responses",
),
]
do_register_opts(api_opts, "api", ignore_errors)
# Key Value store options
keyvalue_opts = [
cfg.BoolOpt(
"enable_encryption",
default=True,
help='Allow encryption of values in key value stored qualified as "secret".',
),
cfg.StrOpt(
"encryption_key_path",
default="",
help="Location of the symmetric encryption key for encrypting values in kvstore. "
"This key should be in JSON and should've been generated using "
"st2-generate-symmetric-crypto-key tool.",
),
]
do_register_opts(keyvalue_opts, group="keyvalue")
# Common auth options
auth_opts = [
cfg.StrOpt(
"api_url",
default=None,
help="Base URL to the API endpoint excluding the version",
),
cfg.BoolOpt("enable", default=True, help="Enable authentication middleware."),
cfg.IntOpt(
"token_ttl", default=(24 * 60 * 60), help="Access token ttl in seconds."
),
# This TTL is used for tokens which belong to StackStorm services
cfg.IntOpt(
"service_token_ttl",
default=(24 * 60 * 60),
help="Service token ttl in seconds.",
),
]
do_register_opts(auth_opts, "auth", ignore_errors)
# Runner options
default_python_bin_path = sys.executable
default_python3_bin_path = find_executable("python3")
base_dir = os.path.dirname(os.path.realpath(default_python_bin_path))
default_virtualenv_bin_path = os.path.join(base_dir, "virtualenv")
action_runner_opts = [
# Common runner options
cfg.StrOpt(
"logging",
default="/etc/st2/logging.actionrunner.conf",
help="location of the logging.conf file",
),
# Python runner options
cfg.StrOpt(
"python_binary",
default=default_python_bin_path,
help="Python binary which will be used by Python actions.",
),
cfg.StrOpt(
"python3_binary",
default=default_python3_bin_path,
help="Python 3 binary which will be used by Python actions for packs which "
"use Python 3 virtual environment.",
),
cfg.StrOpt(
"python3_prefix",
default=None,
help="Prefix for Python 3 installation (e.g. /opt/python3.6). If not specified, it "
"tries to find Python 3 libraries in /usr/lib and /usr/local/lib.",
),
cfg.StrOpt(
"virtualenv_binary",
default=default_virtualenv_bin_path,
help="Virtualenv binary which should be used to create pack virtualenvs.",
),
cfg.StrOpt(
"python_runner_log_level",
default=PYTHON_RUNNER_DEFAULT_LOG_LEVEL,
help="Default log level to use for Python runner actions. Can be overriden on "
'invocation basis using "log_level" runner parameter.',
),
cfg.ListOpt(
"virtualenv_opts",
default=["--system-site-packages"],
help='List of virtualenv options to be passsed to "virtualenv" command that '
"creates pack virtualenv.",
),
cfg.BoolOpt(
"stream_output",
default=True,
help="True to store and stream action output (stdout and stderr) in real-time.",
),
]
do_register_opts(action_runner_opts, group="actionrunner")
dispatcher_pool_opts = [
cfg.IntOpt(
"workflows_pool_size",
default=40,
help="Internal pool size for dispatcher used by workflow actions.",
),
cfg.IntOpt(
"actions_pool_size",
default=60,
help="Internal pool size for dispatcher used by regular actions.",
),
]
do_register_opts(dispatcher_pool_opts, group="actionrunner")
ssh_runner_opts = [
cfg.StrOpt(
"remote_dir",
default="/tmp",
help="Location of the script on the remote filesystem.",
),
cfg.BoolOpt(
"allow_partial_failure",
default=False,
help="How partial success of actions run on multiple nodes should be treated.",
),
cfg.IntOpt(
"max_parallel_actions",
default=50,
help="Max number of parallel remote SSH actions that should be run. "
"Works only with Paramiko SSH runner.",
),
cfg.BoolOpt(
"use_ssh_config",
default=False,
help="Use the .ssh/config file. Useful to override ports etc.",
),
cfg.StrOpt(
"ssh_config_file_path",
default="~/.ssh/config",
help="Path to the ssh config file.",
),
]
do_register_opts(ssh_runner_opts, group="ssh_runner")
# Common options (used by action runner and sensor container)
action_sensor_opts = [
cfg.BoolOpt(
"enable",
default=True,
help="Whether to enable or disable the ability to post a trigger on action.",
),
cfg.ListOpt(
"emit_when",
default=LIVEACTION_COMPLETED_STATES,
help="List of execution statuses for which a trigger will be emitted. ",
),
]
do_register_opts(action_sensor_opts, group="action_sensor")
# Common options for content
pack_lib_opts = [
cfg.BoolOpt(
"enable_common_libs",
default=False,
help="Enable/Disable support for pack common libs. "
"Setting this config to ``True`` would allow you to "
"place common library code for sensors and actions in lib/ folder "
"in packs and use them in python sensors and actions. "
"See https://docs.stackstorm.com/reference/"
"sharing_code_sensors_actions.html "
"for details.",
)
]
do_register_opts(pack_lib_opts, group="packs")
# Coordination options
coord_opts = [
cfg.StrOpt("url", default=None, help="Endpoint for the coordination server."),
cfg.IntOpt(
"lock_timeout", default=60, help="TTL for the lock if backend suports it."
),
cfg.BoolOpt(
"service_registry",
default=False,
help="True to register StackStorm services in a service registry.",
),
]
do_register_opts(coord_opts, "coordination", ignore_errors)
# Mistral options
mistral_opts = [
cfg.StrOpt(
"v2_base_url",
default="http://127.0.0.1:8989/v2",
help="v2 API root endpoint.",
),
cfg.IntOpt(
"retry_exp_msec",
default=1000,
help="Multiplier for the exponential backoff.",
),
cfg.IntOpt(
"retry_exp_max_msec",
default=300000,
help="Max time for each set of backoff.",
),
cfg.IntOpt(
"retry_stop_max_msec", default=600000, help="Max time to stop retrying."
),
cfg.StrOpt(
"keystone_username", default=None, help="Username for authentication."
),
cfg.StrOpt(
"keystone_password", default=None, help="Password for authentication."
),
cfg.StrOpt(
"keystone_project_name", default=None, help="OpenStack project scope."
),
cfg.StrOpt(
"keystone_auth_url", default=None, help="Auth endpoint for Keystone."
),
cfg.StrOpt(
"cacert", default=None, help="Optional certificate to validate endpoint."
),
cfg.BoolOpt(
"insecure", default=False, help="Allow insecure communication with Mistral."
),
cfg.BoolOpt(
"enable_polling",
default=False,
help="Enable results tracking and disable callbacks.",
),
cfg.FloatOpt(
"jitter_interval",
default=0.1,
help="Jitter interval to smooth out HTTP requests "
"to mistral tasks and executions API.",
),
cfg.StrOpt(
"api_url",
default=None,
help="URL Mistral uses to talk back to the API."
"If not provided it defaults to public API URL. "
"Note: This needs to be a base URL without API "
"version (e.g. http://127.0.0.1:9101)",
),
]
do_register_opts(mistral_opts, group="mistral", ignore_errors=ignore_errors)
# Results Tracker query module options
# Note that these are currently used only by mistral query module.
query_opts = [
cfg.IntOpt(
"thread_pool_size",
default=10,
help="Number of threads to use to query external workflow systems.",
),
cfg.FloatOpt(
"query_interval",
default=5,
help="Time interval between queries to external workflow system.",
),
cfg.FloatOpt(
"empty_q_sleep_time",
default=1,
help="Sleep delay in between queries when query queue is empty.",
),
cfg.FloatOpt(
"no_workers_sleep_time",
default=1,
help="Sleep delay for query when there is no more worker in pool.",
),
]
do_register_opts(query_opts, group="resultstracker", ignore_errors=ignore_errors)
# XXX: This is required for us to support deprecated config group results_tracker
query_opts = [
cfg.IntOpt(
"thread_pool_size",
help="Number of threads to use to query external workflow systems.",
),
cfg.FloatOpt(
"query_interval",
help="Time interval between subsequent queries for a context "
"to external workflow system.",
),
]
do_register_opts(query_opts, group="results_tracker", ignore_errors=ignore_errors)
# Common stream options
stream_opts = [
cfg.IntOpt(
"heartbeat",
default=25,
help="Send empty message every N seconds to keep connection open",
)
]
do_register_opts(stream_opts, group="stream", ignore_errors=ignore_errors)
# Common CLI options
cli_opts = [
cfg.BoolOpt(
"debug",
default=False,
help="Enable debug mode. By default this will set all log levels to DEBUG.",
),
cfg.BoolOpt(
"profile",
default=False,
help="Enable profile mode. In the profile mode all the MongoDB queries and "
"related profile data are logged.",
),
cfg.BoolOpt(
"use-debugger",
default=True,
help="Enables debugger. Note that using this option changes how the "
"eventlet library is used to support async IO. This could result in "
"failures that do not occur under normal operation.",
),
]
do_register_cli_opts(cli_opts, ignore_errors=ignore_errors)
# Metrics Options stream options
metrics_opts = [
cfg.StrOpt(
"driver", default="noop", help="Driver type for metrics collection."
),
cfg.StrOpt(
"host",
default="127.0.0.1",
help="Destination server to connect to if driver requires connection.",
),
cfg.IntOpt(
"port",
default=8125,
help="Destination port to connect to if driver requires connection.",
),
cfg.StrOpt(
"prefix",
default=None,
help="Optional prefix which is prepended to all the metric names. Comes handy when "
"you want to submit metrics from various environment to the same metric "
"backend instance.",
),
cfg.FloatOpt(
"sample_rate",
default=1,
help="Randomly sample and only send metrics for X% of metric operations to the "
"backend. Default value of 1 means no sampling is done and all the metrics are "
"sent to the backend. E.g. 0.1 would mean 10% of operations are sampled.",
),
]
do_register_opts(metrics_opts, group="metrics", ignore_errors=ignore_errors)
# Common timers engine options
timer_logging_opts = [
cfg.StrOpt(
"logging",
default=None,
help="Location of the logging configuration file. "
"NOTE: Deprecated in favor of timersengine.logging",
),
]
timers_engine_logging_opts = [
cfg.StrOpt(
"logging",
default="/etc/st2/logging.timersengine.conf",
help="Location of the logging configuration file.",
)
]
do_register_opts(timer_logging_opts, group="timer", ignore_errors=ignore_errors)
do_register_opts(
timers_engine_logging_opts, group="timersengine", ignore_errors=ignore_errors
)
# NOTE: We default old style deprecated "timer" options to None so our code
# works correclty and "timersengine" has precedence over "timers"
# NOTE: "timer" section will be removed in v3.1
timer_opts = [
cfg.StrOpt(
"local_timezone",
default=None,
help="Timezone pertaining to the location where st2 is run. "
"NOTE: Deprecated in favor of timersengine.local_timezone",
),
cfg.BoolOpt(
"enable",
default=None,
help="Specify to enable timer service. "
"NOTE: Deprecated in favor of timersengine.enable",
),
]
timers_engine_opts = [
cfg.StrOpt(
"local_timezone",
default="America/Los_Angeles",
help="Timezone pertaining to the location where st2 is run.",
),
cfg.BoolOpt("enable", default=True, help="Specify to enable timer service."),
]
do_register_opts(timer_opts, group="timer", ignore_errors=ignore_errors)
do_register_opts(
timers_engine_opts, group="timersengine", ignore_errors=ignore_errors
)
# Workflow engine options
workflow_engine_opts = [
cfg.IntOpt(
"retry_stop_max_msec", default=60000, help="Max time to stop retrying."
),
cfg.IntOpt(
"retry_wait_fixed_msec", default=1000, help="Interval inbetween retries."
),
cfg.FloatOpt(
"retry_max_jitter_msec",
default=1000,
help="Max jitter interval to smooth out retries.",
),
cfg.IntOpt(
"gc_max_idle_sec",
default=900,
help="Max seconds to allow workflow execution be idled before it is identified as "
"orphaned and cancelled by the garbage collector.",
),
]
do_register_opts(
workflow_engine_opts, group="workflow_engine", ignore_errors=ignore_errors
)
|
def register_opts(ignore_errors=False):
rbac_opts = [
cfg.BoolOpt("enable", default=False, help="Enable RBAC."),
cfg.StrOpt("backend", default="noop", help="RBAC backend to use."),
cfg.BoolOpt(
"sync_remote_groups",
default=False,
help="True to synchronize remote groups returned by the auth backed for each "
"StackStorm user with local StackStorm roles based on the group to role "
"mapping definition files.",
),
cfg.BoolOpt(
"permission_isolation",
default=False,
help="Isolate resources by user. For now, these resources only include rules and "
"executions. All resources can only be viewed or executed by the owning user "
"except the admin and system_user who can view or run everything.",
),
]
do_register_opts(rbac_opts, "rbac", ignore_errors)
system_user_opts = [
cfg.StrOpt("user", default="stanley", help="Default system user."),
cfg.StrOpt(
"ssh_key_file",
default="/home/stanley/.ssh/stanley_rsa",
help="SSH private key for the system user.",
),
]
do_register_opts(system_user_opts, "system_user", ignore_errors)
schema_opts = [
cfg.IntOpt("version", default=4, help="Version of JSON schema to use."),
cfg.StrOpt(
"draft",
default="http://json-schema.org/draft-04/schema#",
help="URL to the JSON schema draft.",
),
]
do_register_opts(schema_opts, "schema", ignore_errors)
system_opts = [
cfg.BoolOpt("debug", default=False, help="Enable debug mode."),
cfg.StrOpt(
"base_path",
default="/opt/stackstorm",
help="Base path to all st2 artifacts.",
),
cfg.BoolOpt(
"validate_trigger_parameters",
default=True,
help="True to validate parameters for non-system trigger types when creating"
"a rule. By default, only parameters for system triggers are validated.",
),
cfg.BoolOpt(
"validate_trigger_payload",
default=True,
help="True to validate payload for non-system trigger types when dispatching a trigger "
"inside the sensor. By default, only payload for system triggers is validated.",
),
cfg.BoolOpt(
"validate_output_schema",
default=False,
help="True to validate action and runner output against schema.",
),
]
do_register_opts(system_opts, "system", ignore_errors)
system_packs_base_path = os.path.join(cfg.CONF.system.base_path, "packs")
system_runners_base_path = os.path.join(cfg.CONF.system.base_path, "runners")
content_opts = [
cfg.StrOpt(
"pack_group",
default="st2packs",
help="User group that can write to packs directory.",
),
cfg.StrOpt(
"system_packs_base_path",
default=system_packs_base_path,
help="Path to the directory which contains system packs.",
),
cfg.StrOpt(
"system_runners_base_path",
default=system_runners_base_path,
help="Path to the directory which contains system runners. "
"NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0",
),
cfg.StrOpt(
"packs_base_paths",
default=None,
help="Paths which will be searched for integration packs.",
),
cfg.StrOpt(
"runners_base_paths",
default=None,
help="Paths which will be searched for runners. "
"NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0",
),
cfg.ListOpt(
"index_url",
default=["https://index.stackstorm.org/v1/index.json"],
help="A URL pointing to the pack index. StackStorm Exchange is used by "
"default. Use a comma-separated list for multiple indexes if you "
'want to get other packs discovered with "st2 pack search".',
),
]
do_register_opts(content_opts, "content", ignore_errors)
webui_opts = [
cfg.StrOpt(
"webui_base_url",
default="https://%s" % socket.getfqdn(),
help="Base https URL to access st2 Web UI. This is used to construct history URLs "
"that are sent out when chatops is used to kick off executions.",
)
]
do_register_opts(webui_opts, "webui", ignore_errors)
db_opts = [
cfg.StrOpt("host", default="127.0.0.1", help="host of db server"),
cfg.IntOpt("port", default=27017, help="port of db server"),
cfg.StrOpt("db_name", default="st2", help="name of database"),
cfg.StrOpt("username", help="username for db login"),
cfg.StrOpt("password", help="password for db login"),
cfg.IntOpt(
"connection_retry_max_delay_m",
default=3,
help="Connection retry total time (minutes).",
),
cfg.IntOpt(
"connection_retry_backoff_max_s",
default=10,
help="Connection retry backoff max (seconds).",
),
cfg.IntOpt(
"connection_retry_backoff_mul",
default=1,
help="Backoff multiplier (seconds).",
),
cfg.BoolOpt(
"ssl", default=False, help="Create the connection to mongodb using SSL"
),
cfg.StrOpt(
"ssl_keyfile",
default=None,
help="Private keyfile used to identify the local connection against MongoDB.",
),
cfg.StrOpt(
"ssl_certfile",
default=None,
help="Certificate file used to identify the localconnection",
),
cfg.StrOpt(
"ssl_cert_reqs",
default=None,
choices="none, optional, required",
help="Specifies whether a certificate is required from the other side of the "
"connection, and whether it will be validated if provided",
),
cfg.StrOpt(
"ssl_ca_certs",
default=None,
help="ca_certs file contains a set of concatenated CA certificates, which are "
"used to validate certificates passed from MongoDB.",
),
cfg.BoolOpt(
"ssl_match_hostname",
default=True,
help="If True and `ssl_cert_reqs` is not None, enables hostname verification",
),
cfg.StrOpt(
"authentication_mechanism",
default=None,
help="Specifies database authentication mechanisms. "
"By default, it use SCRAM-SHA-1 with MongoDB 3.0 and later, "
"MONGODB-CR (MongoDB Challenge Response protocol) for older servers.",
),
]
do_register_opts(db_opts, "database", ignore_errors)
messaging_opts = [
# It would be nice to be able to deprecate url and completely switch to using
# url. However, this will be a breaking change and will have impact so allowing both.
cfg.StrOpt(
"url",
default="amqp://guest:guest@127.0.0.1:5672//",
help="URL of the messaging server.",
),
cfg.ListOpt(
"cluster_urls",
default=[],
help="URL of all the nodes in a messaging service cluster.",
),
cfg.IntOpt(
"connection_retries",
default=10,
help="How many times should we retry connection before failing.",
),
cfg.IntOpt(
"connection_retry_wait",
default=10000,
help="How long should we wait between connection retries.",
),
cfg.BoolOpt(
"ssl",
default=False,
help="Use SSL / TLS to connect to the messaging server. Same as "
'appending "?ssl=true" at the end of the connection URL string.',
),
cfg.StrOpt(
"ssl_keyfile",
default=None,
help="Private keyfile used to identify the local connection against RabbitMQ.",
),
cfg.StrOpt(
"ssl_certfile",
default=None,
help="Certificate file used to identify the local connection (client).",
),
cfg.StrOpt(
"ssl_cert_reqs",
default=None,
choices="none, optional, required",
help="Specifies whether a certificate is required from the other side of the "
"connection, and whether it will be validated if provided.",
),
cfg.StrOpt(
"ssl_ca_certs",
default=None,
help="ca_certs file contains a set of concatenated CA certificates, which are "
"used to validate certificates passed from RabbitMQ.",
),
cfg.StrOpt(
"login_method",
default=None,
help="Login method to use (AMQPLAIN, PLAIN, EXTERNAL, etc.).",
),
]
do_register_opts(messaging_opts, "messaging", ignore_errors)
syslog_opts = [
cfg.StrOpt("host", default="127.0.0.1", help="Host for the syslog server."),
cfg.IntOpt("port", default=514, help="Port for the syslog server."),
cfg.StrOpt("facility", default="local7", help="Syslog facility level."),
cfg.StrOpt(
"protocol", default="udp", help="Transport protocol to use (udp / tcp)."
),
]
do_register_opts(syslog_opts, "syslog", ignore_errors)
log_opts = [
cfg.ListOpt("excludes", default="", help="Exclusion list of loggers to omit."),
cfg.BoolOpt(
"redirect_stderr",
default=False,
help="Controls if stderr should be redirected to the logs.",
),
cfg.BoolOpt(
"mask_secrets", default=True, help="True to mask secrets in the log files."
),
cfg.ListOpt(
"mask_secrets_blacklist",
default=[],
help="Blacklist of additional attribute names to mask in the log messages.",
),
]
do_register_opts(log_opts, "log", ignore_errors)
# Common API options
api_opts = [
cfg.StrOpt("host", default="127.0.0.1", help="StackStorm API server host"),
cfg.IntOpt("port", default=9101, help="StackStorm API server port"),
cfg.ListOpt(
"allow_origin",
default=["http://127.0.0.1:3000"],
help="List of origins allowed for api, auth and stream",
),
cfg.BoolOpt(
"mask_secrets",
default=True,
help="True to mask secrets in the API responses",
),
]
do_register_opts(api_opts, "api", ignore_errors)
# Key Value store options
keyvalue_opts = [
cfg.BoolOpt(
"enable_encryption",
default=True,
help='Allow encryption of values in key value stored qualified as "secret".',
),
cfg.StrOpt(
"encryption_key_path",
default="",
help="Location of the symmetric encryption key for encrypting values in kvstore. "
"This key should be in JSON and should've been generated using "
"st2-generate-symmetric-crypto-key tool.",
),
]
do_register_opts(keyvalue_opts, group="keyvalue")
# Common auth options
auth_opts = [
cfg.StrOpt(
"api_url",
default=None,
help="Base URL to the API endpoint excluding the version",
),
cfg.BoolOpt("enable", default=True, help="Enable authentication middleware."),
cfg.IntOpt(
"token_ttl", default=(24 * 60 * 60), help="Access token ttl in seconds."
),
# This TTL is used for tokens which belong to StackStorm services
cfg.IntOpt(
"service_token_ttl",
default=(24 * 60 * 60),
help="Service token ttl in seconds.",
),
]
do_register_opts(auth_opts, "auth", ignore_errors)
# Runner options
default_python_bin_path = sys.executable
default_python3_bin_path = find_executable("python3")
base_dir = os.path.dirname(os.path.realpath(default_python_bin_path))
default_virtualenv_bin_path = os.path.join(base_dir, "virtualenv")
action_runner_opts = [
# Common runner options
cfg.StrOpt(
"logging",
default="/etc/st2/logging.actionrunner.conf",
help="location of the logging.conf file",
),
# Python runner options
cfg.StrOpt(
"python_binary",
default=default_python_bin_path,
help="Python binary which will be used by Python actions.",
),
cfg.StrOpt(
"python3_binary",
default=default_python3_bin_path,
help="Python 3 binary which will be used by Python actions for packs which "
"use Python 3 virtual environment.",
),
cfg.StrOpt(
"python3_prefix",
default=None,
help="Prefix for Python 3 installation (e.g. /opt/python3.6). If not specified, it "
"tries to find Python 3 libraries in /usr/lib and /usr/local/lib.",
),
cfg.StrOpt(
"virtualenv_binary",
default=default_virtualenv_bin_path,
help="Virtualenv binary which should be used to create pack virtualenvs.",
),
cfg.StrOpt(
"python_runner_log_level",
default=PYTHON_RUNNER_DEFAULT_LOG_LEVEL,
help="Default log level to use for Python runner actions. Can be overriden on "
'invocation basis using "log_level" runner parameter.',
),
cfg.ListOpt(
"virtualenv_opts",
default=["--system-site-packages"],
help='List of virtualenv options to be passsed to "virtualenv" command that '
"creates pack virtualenv.",
),
cfg.BoolOpt(
"stream_output",
default=True,
help="True to store and stream action output (stdout and stderr) in real-time.",
),
]
do_register_opts(action_runner_opts, group="actionrunner")
dispatcher_pool_opts = [
cfg.IntOpt(
"workflows_pool_size",
default=40,
help="Internal pool size for dispatcher used by workflow actions.",
),
cfg.IntOpt(
"actions_pool_size",
default=60,
help="Internal pool size for dispatcher used by regular actions.",
),
]
do_register_opts(dispatcher_pool_opts, group="actionrunner")
ssh_runner_opts = [
cfg.StrOpt(
"remote_dir",
default="/tmp",
help="Location of the script on the remote filesystem.",
),
cfg.BoolOpt(
"allow_partial_failure",
default=False,
help="How partial success of actions run on multiple nodes should be treated.",
),
cfg.IntOpt(
"max_parallel_actions",
default=50,
help="Max number of parallel remote SSH actions that should be run. "
"Works only with Paramiko SSH runner.",
),
cfg.BoolOpt(
"use_ssh_config",
default=False,
help="Use the .ssh/config file. Useful to override ports etc.",
),
cfg.StrOpt(
"ssh_config_file_path",
default="~/.ssh/config",
help="Path to the ssh config file.",
),
]
do_register_opts(ssh_runner_opts, group="ssh_runner")
# Common options (used by action runner and sensor container)
action_sensor_opts = [
cfg.BoolOpt(
"enable",
default=True,
help="Whether to enable or disable the ability to post a trigger on action.",
),
cfg.ListOpt(
"emit_when",
default=LIVEACTION_COMPLETED_STATES,
help="List of execution statuses for which a trigger will be emitted. ",
),
]
do_register_opts(action_sensor_opts, group="action_sensor")
# Common options for content
pack_lib_opts = [
cfg.BoolOpt(
"enable_common_libs",
default=False,
help="Enable/Disable support for pack common libs. "
"Setting this config to ``True`` would allow you to "
"place common library code for sensors and actions in lib/ folder "
"in packs and use them in python sensors and actions. "
"See https://docs.stackstorm.com/reference/"
"sharing_code_sensors_actions.html "
"for details.",
)
]
do_register_opts(pack_lib_opts, group="packs")
# Coordination options
coord_opts = [
cfg.StrOpt("url", default=None, help="Endpoint for the coordination server."),
cfg.IntOpt(
"lock_timeout", default=60, help="TTL for the lock if backend suports it."
),
cfg.BoolOpt(
"service_registry",
default=False,
help="True to register StackStorm services in a service registry.",
),
]
do_register_opts(coord_opts, "coordination", ignore_errors)
# Mistral options
mistral_opts = [
cfg.StrOpt(
"v2_base_url",
default="http://127.0.0.1:8989/v2",
help="v2 API root endpoint.",
),
cfg.IntOpt(
"retry_exp_msec",
default=1000,
help="Multiplier for the exponential backoff.",
),
cfg.IntOpt(
"retry_exp_max_msec",
default=300000,
help="Max time for each set of backoff.",
),
cfg.IntOpt(
"retry_stop_max_msec", default=600000, help="Max time to stop retrying."
),
cfg.StrOpt(
"keystone_username", default=None, help="Username for authentication."
),
cfg.StrOpt(
"keystone_password", default=None, help="Password for authentication."
),
cfg.StrOpt(
"keystone_project_name", default=None, help="OpenStack project scope."
),
cfg.StrOpt(
"keystone_auth_url", default=None, help="Auth endpoint for Keystone."
),
cfg.StrOpt(
"cacert", default=None, help="Optional certificate to validate endpoint."
),
cfg.BoolOpt(
"insecure", default=False, help="Allow insecure communication with Mistral."
),
cfg.BoolOpt(
"enable_polling",
default=False,
help="Enable results tracking and disable callbacks.",
),
cfg.FloatOpt(
"jitter_interval",
default=0.1,
help="Jitter interval to smooth out HTTP requests "
"to mistral tasks and executions API.",
),
cfg.StrOpt(
"api_url",
default=None,
help="URL Mistral uses to talk back to the API."
"If not provided it defaults to public API URL. "
"Note: This needs to be a base URL without API "
"version (e.g. http://127.0.0.1:9101)",
),
]
do_register_opts(mistral_opts, group="mistral", ignore_errors=ignore_errors)
# Results Tracker query module options
# Note that these are currently used only by mistral query module.
query_opts = [
cfg.IntOpt(
"thread_pool_size",
default=10,
help="Number of threads to use to query external workflow systems.",
),
cfg.FloatOpt(
"query_interval",
default=5,
help="Time interval between queries to external workflow system.",
),
cfg.FloatOpt(
"empty_q_sleep_time",
default=1,
help="Sleep delay in between queries when query queue is empty.",
),
cfg.FloatOpt(
"no_workers_sleep_time",
default=1,
help="Sleep delay for query when there is no more worker in pool.",
),
]
do_register_opts(query_opts, group="resultstracker", ignore_errors=ignore_errors)
# XXX: This is required for us to support deprecated config group results_tracker
query_opts = [
cfg.IntOpt(
"thread_pool_size",
help="Number of threads to use to query external workflow systems.",
),
cfg.FloatOpt(
"query_interval",
help="Time interval between subsequent queries for a context "
"to external workflow system.",
),
]
do_register_opts(query_opts, group="results_tracker", ignore_errors=ignore_errors)
# Common stream options
stream_opts = [
cfg.IntOpt(
"heartbeat",
default=25,
help="Send empty message every N seconds to keep connection open",
)
]
do_register_opts(stream_opts, group="stream", ignore_errors=ignore_errors)
# Common CLI options
cli_opts = [
cfg.BoolOpt(
"debug",
default=False,
help="Enable debug mode. By default this will set all log levels to DEBUG.",
),
cfg.BoolOpt(
"profile",
default=False,
help="Enable profile mode. In the profile mode all the MongoDB queries and "
"related profile data are logged.",
),
cfg.BoolOpt(
"use-debugger",
default=True,
help="Enables debugger. Note that using this option changes how the "
"eventlet library is used to support async IO. This could result in "
"failures that do not occur under normal operation.",
),
]
do_register_cli_opts(cli_opts, ignore_errors=ignore_errors)
# Metrics Options stream options
metrics_opts = [
cfg.StrOpt(
"driver", default="noop", help="Driver type for metrics collection."
),
cfg.StrOpt(
"host",
default="127.0.0.1",
help="Destination server to connect to if driver requires connection.",
),
cfg.IntOpt(
"port",
default=8125,
help="Destination port to connect to if driver requires connection.",
),
cfg.StrOpt(
"prefix",
default=None,
help="Optional prefix which is prepended to all the metric names. Comes handy when "
"you want to submit metrics from various environment to the same metric "
"backend instance.",
),
cfg.FloatOpt(
"sample_rate",
default=1,
help="Randomly sample and only send metrics for X% of metric operations to the "
"backend. Default value of 1 means no sampling is done and all the metrics are "
"sent to the backend. E.g. 0.1 would mean 10% of operations are sampled.",
),
]
do_register_opts(metrics_opts, group="metrics", ignore_errors=ignore_errors)
# Common timers engine options
timer_logging_opts = [
cfg.StrOpt(
"logging",
default=None,
help="Location of the logging configuration file. "
"NOTE: Deprecated in favor of timersengine.logging",
),
]
timers_engine_logging_opts = [
cfg.StrOpt(
"logging",
default="/etc/st2/logging.timersengine.conf",
help="Location of the logging configuration file.",
)
]
do_register_opts(timer_logging_opts, group="timer", ignore_errors=ignore_errors)
do_register_opts(
timers_engine_logging_opts, group="timersengine", ignore_errors=ignore_errors
)
# NOTE: We default old style deprecated "timer" options to None so our code
# works correclty and "timersengine" has precedence over "timers"
# NOTE: "timer" section will be removed in v3.1
timer_opts = [
cfg.StrOpt(
"local_timezone",
default=None,
help="Timezone pertaining to the location where st2 is run. "
"NOTE: Deprecated in favor of timersengine.local_timezone",
),
cfg.BoolOpt(
"enable",
default=None,
help="Specify to enable timer service. "
"NOTE: Deprecated in favor of timersengine.enable",
),
]
timers_engine_opts = [
cfg.StrOpt(
"local_timezone",
default="America/Los_Angeles",
help="Timezone pertaining to the location where st2 is run.",
),
cfg.BoolOpt("enable", default=True, help="Specify to enable timer service."),
]
do_register_opts(timer_opts, group="timer", ignore_errors=ignore_errors)
do_register_opts(
timers_engine_opts, group="timersengine", ignore_errors=ignore_errors
)
|
https://github.com/StackStorm/st2/issues/4704
|
2019-06-04 17:08:05,405 139749901439856 ERROR consumers [-] VariableMessageQueueConsumer failed to process message: ActionExecutionDB(action={u'notify': {}, u'description': u'This executes a task in the net-task library', u'runner_type': u'python-script', u'tags': [], u'enabled': True, u'name': u'net-task', u'entry_point': u'python/net-task.py', u'metadata_file': u'actions/net-task.yaml', u'output_schema': {}, u'uid': u'action:shigo:net-task', u'parameters': {u'args': {u'position': 1, u'required': False, u'type': u'array', u'description': u'Args to be passed to the task'}, u'task_name': {u'position': 0, u'required': True, u'type': u'string', u'description': u'The name of the task'}, u'kwargs': {u'position': 2, u'required': False, u'type': u'object', u'description': u'Kwargs to be passed to the task'}}, u'ref': u'shigo.net-task', u'id': u'5c1af2cedb95700082de9860', u'pack': u'shigo'}, children=[], context={u'orquesta': {u'workflow_execution_id': u'5cf6a55f5f85230022d4bf06', u'task_id': u'get_nettask_version', u'task_execution_id': u'5cf6a563d0fa0000229108c0', u'task_name': u'get_nettask_version', u'task_route': 0}, u'user': u'st2admin', u'parent': {u'user': u'st2admin', u'execution_id': u'5cf6a55ea036ab0033dc934c', u'pack': u'shigo'}, u'pack': u'shigo'}, delay=None, end_timestamp="2019-06-04 17:08:05.182673+00:00", id=5cf6a563d0fa0000229108c2, liveaction={u'callback': {}, u'runner_info': {u'hostname': u'4051aa3ae5dd', u'pid': 34}, u'parameters': {u'task_name': u'net_task.version'}, u'action': u'shigo.net-task', u'action_is_workflow': False, u'id': u'5cf6a563d0fa0000229108c1'}, log=[{u'status': u'requested', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 47, 469000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'scheduled', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 199000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'running', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 332000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'succeeded', u'timestamp': datetime.datetime(2019, 6, 4, 17, 8, 5, 217000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}], parameters={u'task_name': u'net_task.version'}, parent="5cf6a55ea036ab0033dc934c", result={u'result': {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'optical-tech': u'0.6.4', u'network-state-service': u'0.4.3', u'net-devices2': u'1.4.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'device_grouping': u'0.0.2', u'starlab': u'0.1.60', u'wan-tech': u'0.7.7', u'icm-connector': u'0.8.12', u'azconfigure-tasks': u'1.0.13', u'celery': u'4.2.1', u'quality-checks': u'0.1.22', u'atom': u'0.9.44', u'net-task-contrib': u'0.4.72', u'net-assist': u'0.1.10', u'fabric-tools': u'0.1.6', u'ask-mode-service': u'0.3.2', u'safety-check-service': u'0.2.8', u'apache-airflow': u'2.0.0dev0+3.incubating', u'net-task': u'0.13.0'}}, u'exit_code': 0, u'stderr': u"st2.actions.python.NetTaskWrapper: DEBUG json data: {}\nst2.actions.python.NetTaskWrapper: INFO NetTask UUID: 8726afab-915d-4f97-8205-5c7f73ffa4ae\nst2.actions.python.NetTaskWrapper: INFO task net_task.version completed, result is {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'net-task': u'0.13.0', u'device_grouping': u'0.0.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'quality-checks': u'0.1.22', u'azconfigure-tasks': u'1.0.13', u'starlab': u'0.1.60', u'ask-mode-service': u'0.3.2', u'icm-connector': u'0.8.12', u'apache-airflow': u'2.0.0dev0+3.incubating', u'celery': u'4.2.1', u'net-task-contrib': u'0.4.72', u'network-state-service': u'0.4.3', u'wan-tech': u'0.7.7', u'net-assist': u'0.1.10', u'atom': u'0.9.44', u'safety-check-service': u'0.2.8', u'fabric-tools': u'0.1.6', u'net-devices2': u'1.4.2', u'optical-tech': u'0.6.4'}}\nst2.actions.python.NetTaskWrapper: WARNING task did not return data in (status, result) format, as expected by stackstorm\n", u'stdout': u''}, rule={}, runner={u'runner_module': u'python_runner', u'uid': u'runner_type:python-script', u'runner_package': u'python_runner', u'enabled': True, u'name': u'python-script', u'output_key': u'result', u'output_schema': {u'exit_code': {u'required': True, u'type': u'integer'}, u'result': {u'anyOf': [{u'type': u'object'}, {u'type': u'string'}, {u'type': u'integer'}, {u'type': u'number'}, {u'type': u'boolean'}, {u'type': u'array'}, {u'type': u'null'}]}, u'stderr': {u'required': True, u'type': u'string'}, u'stdout': {u'required': True, u'type': u'string'}}, u'runner_parameters': {u'debug': {u'default': False, u'required': False, u'type': u'boolean', u'description': u'Enable runner debug mode.'}, u'content_version': {u'required': False, u'type': u'string', u'description': u'Git revision of the pack content to use for this action execution (git commit sha / tag / branch). Only applies to packs which are git repositories.'}, u'log_level': {u'default': u'DEBUG', u'enum': [u'AUDIT', u'CRITICAL', u'ERROR', u'WARNING', u'INFO', u'DEBUG'], u'type': u'string', u'description': u'Default log level for Python runner actions.'}, u'timeout': {u'default': 600, u'type': u'integer', u'description': u"Action timeout in seconds. Action will get killed if it doesn't finish in timeout seconds."}, u'env': {u'type': u'object', u'description': u'Environment variables which will be available to the script.'}}, u'id': u'5c1af28f26395d0024fc2741', u'description': u'A runner for launching python actions.'}, start_timestamp="2019-06-04 17:07:47.443217+00:00", status="succeeded", task_execution="5cf6a563d0fa0000229108c0", trigger={}, trigger_instance={}, trigger_type={}, web_url="https://befdd34ce4a4/#/history/5cf6a563d0fa0000229108c2/general", workflow_execution="5cf6a55f5f85230022d4bf06")
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/transport/consumers.py", line 72, in _process_message
self._handler.process(body)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 83, in process
handler_function(message)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 61, in handle_action_execution_with_instrumentation
return self.handle_action_execution(ac_ex_db=ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 134, in handle_action_execution
wf_svc.handle_action_execution_completion(ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/services/workflows.py", line 733, in handle_action_execution_completion
with coord_svc.get_coordinator(start_heart=True).get_lock(wf_ex_id):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/locking.py", line 52, in __enter__
acquired = self.acquire(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 93, in acquire
return acquired
File "/usr/lib/python2.7/contextlib.py", line 35, in __exit__
self.gen.throw(type, value, traceback)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 51, in _translate_failures
cause=e)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/utils.py", line 225, in raise_with_cause
excutils.raise_with_cause(exc_cls, message, *args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/oslo_utils/excutils.py", line 143, in raise_with_cause
six.raise_from(exc_cls(message, *args, **kwargs), kwargs.get('cause'))
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/six.py", line 737, in raise_from
raise value
ToozConnectionError: Error while reading from socket: (u'Connection closed by server.',)
|
ToozConnectionError
|
def __init__(
self,
collection_interval=DEFAULT_COLLECTION_INTERVAL,
sleep_delay=DEFAULT_SLEEP_DELAY,
):
"""
:param collection_interval: How often to check database for old data and perform garbage
collection.
:type collection_interval: ``int``
:param sleep_delay: How long to sleep (in seconds) between collection of different object
types.
:type sleep_delay: ``int``
"""
self._collection_interval = collection_interval
self._action_executions_ttl = cfg.CONF.garbagecollector.action_executions_ttl
self._action_executions_output_ttl = (
cfg.CONF.garbagecollector.action_executions_output_ttl
)
self._trigger_instances_ttl = cfg.CONF.garbagecollector.trigger_instances_ttl
self._purge_inquiries = cfg.CONF.garbagecollector.purge_inquiries
self._workflow_execution_max_idle = cfg.CONF.workflow_engine.gc_max_idle_sec
self._validate_ttl_values()
self._sleep_delay = sleep_delay
|
def __init__(
self,
collection_interval=DEFAULT_COLLECTION_INTERVAL,
sleep_delay=DEFAULT_SLEEP_DELAY,
):
"""
:param collection_interval: How often to check database for old data and perform garbage
collection.
:type collection_interval: ``int``
:param sleep_delay: How long to sleep (in seconds) between collection of different object
types.
:type sleep_delay: ``int``
"""
self._collection_interval = collection_interval
self._action_executions_ttl = cfg.CONF.garbagecollector.action_executions_ttl
self._action_executions_output_ttl = (
cfg.CONF.garbagecollector.action_executions_output_ttl
)
self._trigger_instances_ttl = cfg.CONF.garbagecollector.trigger_instances_ttl
self._purge_inquiries = cfg.CONF.garbagecollector.purge_inquiries
self._validate_ttl_values()
self._sleep_delay = sleep_delay
|
https://github.com/StackStorm/st2/issues/4704
|
2019-06-04 17:08:05,405 139749901439856 ERROR consumers [-] VariableMessageQueueConsumer failed to process message: ActionExecutionDB(action={u'notify': {}, u'description': u'This executes a task in the net-task library', u'runner_type': u'python-script', u'tags': [], u'enabled': True, u'name': u'net-task', u'entry_point': u'python/net-task.py', u'metadata_file': u'actions/net-task.yaml', u'output_schema': {}, u'uid': u'action:shigo:net-task', u'parameters': {u'args': {u'position': 1, u'required': False, u'type': u'array', u'description': u'Args to be passed to the task'}, u'task_name': {u'position': 0, u'required': True, u'type': u'string', u'description': u'The name of the task'}, u'kwargs': {u'position': 2, u'required': False, u'type': u'object', u'description': u'Kwargs to be passed to the task'}}, u'ref': u'shigo.net-task', u'id': u'5c1af2cedb95700082de9860', u'pack': u'shigo'}, children=[], context={u'orquesta': {u'workflow_execution_id': u'5cf6a55f5f85230022d4bf06', u'task_id': u'get_nettask_version', u'task_execution_id': u'5cf6a563d0fa0000229108c0', u'task_name': u'get_nettask_version', u'task_route': 0}, u'user': u'st2admin', u'parent': {u'user': u'st2admin', u'execution_id': u'5cf6a55ea036ab0033dc934c', u'pack': u'shigo'}, u'pack': u'shigo'}, delay=None, end_timestamp="2019-06-04 17:08:05.182673+00:00", id=5cf6a563d0fa0000229108c2, liveaction={u'callback': {}, u'runner_info': {u'hostname': u'4051aa3ae5dd', u'pid': 34}, u'parameters': {u'task_name': u'net_task.version'}, u'action': u'shigo.net-task', u'action_is_workflow': False, u'id': u'5cf6a563d0fa0000229108c1'}, log=[{u'status': u'requested', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 47, 469000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'scheduled', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 199000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'running', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 332000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'succeeded', u'timestamp': datetime.datetime(2019, 6, 4, 17, 8, 5, 217000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}], parameters={u'task_name': u'net_task.version'}, parent="5cf6a55ea036ab0033dc934c", result={u'result': {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'optical-tech': u'0.6.4', u'network-state-service': u'0.4.3', u'net-devices2': u'1.4.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'device_grouping': u'0.0.2', u'starlab': u'0.1.60', u'wan-tech': u'0.7.7', u'icm-connector': u'0.8.12', u'azconfigure-tasks': u'1.0.13', u'celery': u'4.2.1', u'quality-checks': u'0.1.22', u'atom': u'0.9.44', u'net-task-contrib': u'0.4.72', u'net-assist': u'0.1.10', u'fabric-tools': u'0.1.6', u'ask-mode-service': u'0.3.2', u'safety-check-service': u'0.2.8', u'apache-airflow': u'2.0.0dev0+3.incubating', u'net-task': u'0.13.0'}}, u'exit_code': 0, u'stderr': u"st2.actions.python.NetTaskWrapper: DEBUG json data: {}\nst2.actions.python.NetTaskWrapper: INFO NetTask UUID: 8726afab-915d-4f97-8205-5c7f73ffa4ae\nst2.actions.python.NetTaskWrapper: INFO task net_task.version completed, result is {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'net-task': u'0.13.0', u'device_grouping': u'0.0.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'quality-checks': u'0.1.22', u'azconfigure-tasks': u'1.0.13', u'starlab': u'0.1.60', u'ask-mode-service': u'0.3.2', u'icm-connector': u'0.8.12', u'apache-airflow': u'2.0.0dev0+3.incubating', u'celery': u'4.2.1', u'net-task-contrib': u'0.4.72', u'network-state-service': u'0.4.3', u'wan-tech': u'0.7.7', u'net-assist': u'0.1.10', u'atom': u'0.9.44', u'safety-check-service': u'0.2.8', u'fabric-tools': u'0.1.6', u'net-devices2': u'1.4.2', u'optical-tech': u'0.6.4'}}\nst2.actions.python.NetTaskWrapper: WARNING task did not return data in (status, result) format, as expected by stackstorm\n", u'stdout': u''}, rule={}, runner={u'runner_module': u'python_runner', u'uid': u'runner_type:python-script', u'runner_package': u'python_runner', u'enabled': True, u'name': u'python-script', u'output_key': u'result', u'output_schema': {u'exit_code': {u'required': True, u'type': u'integer'}, u'result': {u'anyOf': [{u'type': u'object'}, {u'type': u'string'}, {u'type': u'integer'}, {u'type': u'number'}, {u'type': u'boolean'}, {u'type': u'array'}, {u'type': u'null'}]}, u'stderr': {u'required': True, u'type': u'string'}, u'stdout': {u'required': True, u'type': u'string'}}, u'runner_parameters': {u'debug': {u'default': False, u'required': False, u'type': u'boolean', u'description': u'Enable runner debug mode.'}, u'content_version': {u'required': False, u'type': u'string', u'description': u'Git revision of the pack content to use for this action execution (git commit sha / tag / branch). Only applies to packs which are git repositories.'}, u'log_level': {u'default': u'DEBUG', u'enum': [u'AUDIT', u'CRITICAL', u'ERROR', u'WARNING', u'INFO', u'DEBUG'], u'type': u'string', u'description': u'Default log level for Python runner actions.'}, u'timeout': {u'default': 600, u'type': u'integer', u'description': u"Action timeout in seconds. Action will get killed if it doesn't finish in timeout seconds."}, u'env': {u'type': u'object', u'description': u'Environment variables which will be available to the script.'}}, u'id': u'5c1af28f26395d0024fc2741', u'description': u'A runner for launching python actions.'}, start_timestamp="2019-06-04 17:07:47.443217+00:00", status="succeeded", task_execution="5cf6a563d0fa0000229108c0", trigger={}, trigger_instance={}, trigger_type={}, web_url="https://befdd34ce4a4/#/history/5cf6a563d0fa0000229108c2/general", workflow_execution="5cf6a55f5f85230022d4bf06")
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/transport/consumers.py", line 72, in _process_message
self._handler.process(body)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 83, in process
handler_function(message)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 61, in handle_action_execution_with_instrumentation
return self.handle_action_execution(ac_ex_db=ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 134, in handle_action_execution
wf_svc.handle_action_execution_completion(ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/services/workflows.py", line 733, in handle_action_execution_completion
with coord_svc.get_coordinator(start_heart=True).get_lock(wf_ex_id):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/locking.py", line 52, in __enter__
acquired = self.acquire(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 93, in acquire
return acquired
File "/usr/lib/python2.7/contextlib.py", line 35, in __exit__
self.gen.throw(type, value, traceback)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 51, in _translate_failures
cause=e)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/utils.py", line 225, in raise_with_cause
excutils.raise_with_cause(exc_cls, message, *args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/oslo_utils/excutils.py", line 143, in raise_with_cause
six.raise_from(exc_cls(message, *args, **kwargs), kwargs.get('cause'))
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/six.py", line 737, in raise_from
raise value
ToozConnectionError: Error while reading from socket: (u'Connection closed by server.',)
|
ToozConnectionError
|
def _perform_garbage_collection(self):
LOG.info("Performing garbage collection...")
proc_message = "Performing garbage collection for %s."
skip_message = "Skipping garbage collection for %s since it's not configured."
# Note: We sleep for a bit between garbage collection of each object type to prevent busy
# waiting
obj_type = "action executions"
if self._action_executions_ttl and self._action_executions_ttl >= MINIMUM_TTL_DAYS:
LOG.info(proc_message, obj_type)
self._purge_action_executions()
eventlet.sleep(self._sleep_delay)
else:
LOG.debug(skip_message, obj_type)
obj_type = "action executions output"
if (
self._action_executions_output_ttl
and self._action_executions_output_ttl >= MINIMUM_TTL_DAYS_EXECUTION_OUTPUT
):
LOG.info(proc_message, obj_type)
self._purge_action_executions_output()
eventlet.sleep(self._sleep_delay)
else:
LOG.debug(skip_message, obj_type)
obj_type = "trigger instances"
if self._trigger_instances_ttl and self._trigger_instances_ttl >= MINIMUM_TTL_DAYS:
LOG.info(proc_message, obj_type)
self._purge_trigger_instances()
eventlet.sleep(self._sleep_delay)
else:
LOG.debug(skip_message, obj_type)
obj_type = "inquiries"
if self._purge_inquiries:
LOG.info(proc_message, obj_type)
self._timeout_inquiries()
eventlet.sleep(self._sleep_delay)
else:
LOG.debug(skip_message, obj_type)
obj_type = "orphaned workflow executions"
if self._workflow_execution_max_idle > 0:
LOG.info(proc_message, obj_type)
self._purge_orphaned_workflow_executions()
eventlet.sleep(self._sleep_delay)
else:
LOG.debug(skip_message, obj_type)
|
def _perform_garbage_collection(self):
LOG.info("Performing garbage collection...")
# Note: We sleep for a bit between garbage collection of each object type to prevent busy
# waiting
if self._action_executions_ttl and self._action_executions_ttl >= MINIMUM_TTL_DAYS:
self._purge_action_executions()
eventlet.sleep(self._sleep_delay)
else:
LOG.debug(
"Skipping garbage collection for action executions since it's not "
"configured"
)
if (
self._action_executions_output_ttl
and self._action_executions_output_ttl >= MINIMUM_TTL_DAYS_EXECUTION_OUTPUT
):
self._purge_action_executions_output()
eventlet.sleep(self._sleep_delay)
else:
LOG.debug(
"Skipping garbage collection for action executions output since it's not "
"configured"
)
if self._trigger_instances_ttl and self._trigger_instances_ttl >= MINIMUM_TTL_DAYS:
self._purge_trigger_instances()
eventlet.sleep(self._sleep_delay)
else:
LOG.debug(
"Skipping garbage collection for trigger instances since it's not "
"configured"
)
if self._purge_inquiries:
self._timeout_inquiries()
eventlet.sleep(self._sleep_delay)
else:
LOG.debug("Skipping garbage collection for Inquiries since it's not configured")
|
https://github.com/StackStorm/st2/issues/4704
|
2019-06-04 17:08:05,405 139749901439856 ERROR consumers [-] VariableMessageQueueConsumer failed to process message: ActionExecutionDB(action={u'notify': {}, u'description': u'This executes a task in the net-task library', u'runner_type': u'python-script', u'tags': [], u'enabled': True, u'name': u'net-task', u'entry_point': u'python/net-task.py', u'metadata_file': u'actions/net-task.yaml', u'output_schema': {}, u'uid': u'action:shigo:net-task', u'parameters': {u'args': {u'position': 1, u'required': False, u'type': u'array', u'description': u'Args to be passed to the task'}, u'task_name': {u'position': 0, u'required': True, u'type': u'string', u'description': u'The name of the task'}, u'kwargs': {u'position': 2, u'required': False, u'type': u'object', u'description': u'Kwargs to be passed to the task'}}, u'ref': u'shigo.net-task', u'id': u'5c1af2cedb95700082de9860', u'pack': u'shigo'}, children=[], context={u'orquesta': {u'workflow_execution_id': u'5cf6a55f5f85230022d4bf06', u'task_id': u'get_nettask_version', u'task_execution_id': u'5cf6a563d0fa0000229108c0', u'task_name': u'get_nettask_version', u'task_route': 0}, u'user': u'st2admin', u'parent': {u'user': u'st2admin', u'execution_id': u'5cf6a55ea036ab0033dc934c', u'pack': u'shigo'}, u'pack': u'shigo'}, delay=None, end_timestamp="2019-06-04 17:08:05.182673+00:00", id=5cf6a563d0fa0000229108c2, liveaction={u'callback': {}, u'runner_info': {u'hostname': u'4051aa3ae5dd', u'pid': 34}, u'parameters': {u'task_name': u'net_task.version'}, u'action': u'shigo.net-task', u'action_is_workflow': False, u'id': u'5cf6a563d0fa0000229108c1'}, log=[{u'status': u'requested', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 47, 469000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'scheduled', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 199000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'running', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 332000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'succeeded', u'timestamp': datetime.datetime(2019, 6, 4, 17, 8, 5, 217000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}], parameters={u'task_name': u'net_task.version'}, parent="5cf6a55ea036ab0033dc934c", result={u'result': {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'optical-tech': u'0.6.4', u'network-state-service': u'0.4.3', u'net-devices2': u'1.4.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'device_grouping': u'0.0.2', u'starlab': u'0.1.60', u'wan-tech': u'0.7.7', u'icm-connector': u'0.8.12', u'azconfigure-tasks': u'1.0.13', u'celery': u'4.2.1', u'quality-checks': u'0.1.22', u'atom': u'0.9.44', u'net-task-contrib': u'0.4.72', u'net-assist': u'0.1.10', u'fabric-tools': u'0.1.6', u'ask-mode-service': u'0.3.2', u'safety-check-service': u'0.2.8', u'apache-airflow': u'2.0.0dev0+3.incubating', u'net-task': u'0.13.0'}}, u'exit_code': 0, u'stderr': u"st2.actions.python.NetTaskWrapper: DEBUG json data: {}\nst2.actions.python.NetTaskWrapper: INFO NetTask UUID: 8726afab-915d-4f97-8205-5c7f73ffa4ae\nst2.actions.python.NetTaskWrapper: INFO task net_task.version completed, result is {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'net-task': u'0.13.0', u'device_grouping': u'0.0.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'quality-checks': u'0.1.22', u'azconfigure-tasks': u'1.0.13', u'starlab': u'0.1.60', u'ask-mode-service': u'0.3.2', u'icm-connector': u'0.8.12', u'apache-airflow': u'2.0.0dev0+3.incubating', u'celery': u'4.2.1', u'net-task-contrib': u'0.4.72', u'network-state-service': u'0.4.3', u'wan-tech': u'0.7.7', u'net-assist': u'0.1.10', u'atom': u'0.9.44', u'safety-check-service': u'0.2.8', u'fabric-tools': u'0.1.6', u'net-devices2': u'1.4.2', u'optical-tech': u'0.6.4'}}\nst2.actions.python.NetTaskWrapper: WARNING task did not return data in (status, result) format, as expected by stackstorm\n", u'stdout': u''}, rule={}, runner={u'runner_module': u'python_runner', u'uid': u'runner_type:python-script', u'runner_package': u'python_runner', u'enabled': True, u'name': u'python-script', u'output_key': u'result', u'output_schema': {u'exit_code': {u'required': True, u'type': u'integer'}, u'result': {u'anyOf': [{u'type': u'object'}, {u'type': u'string'}, {u'type': u'integer'}, {u'type': u'number'}, {u'type': u'boolean'}, {u'type': u'array'}, {u'type': u'null'}]}, u'stderr': {u'required': True, u'type': u'string'}, u'stdout': {u'required': True, u'type': u'string'}}, u'runner_parameters': {u'debug': {u'default': False, u'required': False, u'type': u'boolean', u'description': u'Enable runner debug mode.'}, u'content_version': {u'required': False, u'type': u'string', u'description': u'Git revision of the pack content to use for this action execution (git commit sha / tag / branch). Only applies to packs which are git repositories.'}, u'log_level': {u'default': u'DEBUG', u'enum': [u'AUDIT', u'CRITICAL', u'ERROR', u'WARNING', u'INFO', u'DEBUG'], u'type': u'string', u'description': u'Default log level for Python runner actions.'}, u'timeout': {u'default': 600, u'type': u'integer', u'description': u"Action timeout in seconds. Action will get killed if it doesn't finish in timeout seconds."}, u'env': {u'type': u'object', u'description': u'Environment variables which will be available to the script.'}}, u'id': u'5c1af28f26395d0024fc2741', u'description': u'A runner for launching python actions.'}, start_timestamp="2019-06-04 17:07:47.443217+00:00", status="succeeded", task_execution="5cf6a563d0fa0000229108c0", trigger={}, trigger_instance={}, trigger_type={}, web_url="https://befdd34ce4a4/#/history/5cf6a563d0fa0000229108c2/general", workflow_execution="5cf6a55f5f85230022d4bf06")
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/transport/consumers.py", line 72, in _process_message
self._handler.process(body)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 83, in process
handler_function(message)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 61, in handle_action_execution_with_instrumentation
return self.handle_action_execution(ac_ex_db=ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 134, in handle_action_execution
wf_svc.handle_action_execution_completion(ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/services/workflows.py", line 733, in handle_action_execution_completion
with coord_svc.get_coordinator(start_heart=True).get_lock(wf_ex_id):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/locking.py", line 52, in __enter__
acquired = self.acquire(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 93, in acquire
return acquired
File "/usr/lib/python2.7/contextlib.py", line 35, in __exit__
self.gen.throw(type, value, traceback)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 51, in _translate_failures
cause=e)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/utils.py", line 225, in raise_with_cause
excutils.raise_with_cause(exc_cls, message, *args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/oslo_utils/excutils.py", line 143, in raise_with_cause
six.raise_from(exc_cls(message, *args, **kwargs), kwargs.get('cause'))
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/six.py", line 737, in raise_from
raise value
ToozConnectionError: Error while reading from socket: (u'Connection closed by server.',)
|
ToozConnectionError
|
def _purge_action_executions(self):
"""
Purge action executions and corresponding live action, stdout and stderr object which match
the criteria defined in the config.
"""
utc_now = get_datetime_utc_now()
timestamp = utc_now - datetime.timedelta(days=self._action_executions_ttl)
# Another sanity check to make sure we don't delete new executions
if timestamp > (utc_now - datetime.timedelta(days=MINIMUM_TTL_DAYS)):
raise ValueError(
"Calculated timestamp would violate the minimum TTL constraint"
)
timestamp_str = isotime.format(dt=timestamp)
LOG.info("Deleting action executions older than: %s" % (timestamp_str))
assert timestamp < utc_now
try:
purge_executions(logger=LOG, timestamp=timestamp)
except Exception as e:
LOG.exception("Failed to delete executions: %s" % (six.text_type(e)))
return True
|
def _purge_action_executions(self):
"""
Purge action executions and corresponding live action, stdout and stderr object which match
the criteria defined in the config.
"""
LOG.info("Performing garbage collection for action executions and related objects")
utc_now = get_datetime_utc_now()
timestamp = utc_now - datetime.timedelta(days=self._action_executions_ttl)
# Another sanity check to make sure we don't delete new executions
if timestamp > (utc_now - datetime.timedelta(days=MINIMUM_TTL_DAYS)):
raise ValueError(
"Calculated timestamp would violate the minimum TTL constraint"
)
timestamp_str = isotime.format(dt=timestamp)
LOG.info("Deleting action executions older than: %s" % (timestamp_str))
assert timestamp < utc_now
try:
purge_executions(logger=LOG, timestamp=timestamp)
except Exception as e:
LOG.exception("Failed to delete executions: %s" % (six.text_type(e)))
return True
|
https://github.com/StackStorm/st2/issues/4704
|
2019-06-04 17:08:05,405 139749901439856 ERROR consumers [-] VariableMessageQueueConsumer failed to process message: ActionExecutionDB(action={u'notify': {}, u'description': u'This executes a task in the net-task library', u'runner_type': u'python-script', u'tags': [], u'enabled': True, u'name': u'net-task', u'entry_point': u'python/net-task.py', u'metadata_file': u'actions/net-task.yaml', u'output_schema': {}, u'uid': u'action:shigo:net-task', u'parameters': {u'args': {u'position': 1, u'required': False, u'type': u'array', u'description': u'Args to be passed to the task'}, u'task_name': {u'position': 0, u'required': True, u'type': u'string', u'description': u'The name of the task'}, u'kwargs': {u'position': 2, u'required': False, u'type': u'object', u'description': u'Kwargs to be passed to the task'}}, u'ref': u'shigo.net-task', u'id': u'5c1af2cedb95700082de9860', u'pack': u'shigo'}, children=[], context={u'orquesta': {u'workflow_execution_id': u'5cf6a55f5f85230022d4bf06', u'task_id': u'get_nettask_version', u'task_execution_id': u'5cf6a563d0fa0000229108c0', u'task_name': u'get_nettask_version', u'task_route': 0}, u'user': u'st2admin', u'parent': {u'user': u'st2admin', u'execution_id': u'5cf6a55ea036ab0033dc934c', u'pack': u'shigo'}, u'pack': u'shigo'}, delay=None, end_timestamp="2019-06-04 17:08:05.182673+00:00", id=5cf6a563d0fa0000229108c2, liveaction={u'callback': {}, u'runner_info': {u'hostname': u'4051aa3ae5dd', u'pid': 34}, u'parameters': {u'task_name': u'net_task.version'}, u'action': u'shigo.net-task', u'action_is_workflow': False, u'id': u'5cf6a563d0fa0000229108c1'}, log=[{u'status': u'requested', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 47, 469000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'scheduled', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 199000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'running', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 332000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'succeeded', u'timestamp': datetime.datetime(2019, 6, 4, 17, 8, 5, 217000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}], parameters={u'task_name': u'net_task.version'}, parent="5cf6a55ea036ab0033dc934c", result={u'result': {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'optical-tech': u'0.6.4', u'network-state-service': u'0.4.3', u'net-devices2': u'1.4.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'device_grouping': u'0.0.2', u'starlab': u'0.1.60', u'wan-tech': u'0.7.7', u'icm-connector': u'0.8.12', u'azconfigure-tasks': u'1.0.13', u'celery': u'4.2.1', u'quality-checks': u'0.1.22', u'atom': u'0.9.44', u'net-task-contrib': u'0.4.72', u'net-assist': u'0.1.10', u'fabric-tools': u'0.1.6', u'ask-mode-service': u'0.3.2', u'safety-check-service': u'0.2.8', u'apache-airflow': u'2.0.0dev0+3.incubating', u'net-task': u'0.13.0'}}, u'exit_code': 0, u'stderr': u"st2.actions.python.NetTaskWrapper: DEBUG json data: {}\nst2.actions.python.NetTaskWrapper: INFO NetTask UUID: 8726afab-915d-4f97-8205-5c7f73ffa4ae\nst2.actions.python.NetTaskWrapper: INFO task net_task.version completed, result is {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'net-task': u'0.13.0', u'device_grouping': u'0.0.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'quality-checks': u'0.1.22', u'azconfigure-tasks': u'1.0.13', u'starlab': u'0.1.60', u'ask-mode-service': u'0.3.2', u'icm-connector': u'0.8.12', u'apache-airflow': u'2.0.0dev0+3.incubating', u'celery': u'4.2.1', u'net-task-contrib': u'0.4.72', u'network-state-service': u'0.4.3', u'wan-tech': u'0.7.7', u'net-assist': u'0.1.10', u'atom': u'0.9.44', u'safety-check-service': u'0.2.8', u'fabric-tools': u'0.1.6', u'net-devices2': u'1.4.2', u'optical-tech': u'0.6.4'}}\nst2.actions.python.NetTaskWrapper: WARNING task did not return data in (status, result) format, as expected by stackstorm\n", u'stdout': u''}, rule={}, runner={u'runner_module': u'python_runner', u'uid': u'runner_type:python-script', u'runner_package': u'python_runner', u'enabled': True, u'name': u'python-script', u'output_key': u'result', u'output_schema': {u'exit_code': {u'required': True, u'type': u'integer'}, u'result': {u'anyOf': [{u'type': u'object'}, {u'type': u'string'}, {u'type': u'integer'}, {u'type': u'number'}, {u'type': u'boolean'}, {u'type': u'array'}, {u'type': u'null'}]}, u'stderr': {u'required': True, u'type': u'string'}, u'stdout': {u'required': True, u'type': u'string'}}, u'runner_parameters': {u'debug': {u'default': False, u'required': False, u'type': u'boolean', u'description': u'Enable runner debug mode.'}, u'content_version': {u'required': False, u'type': u'string', u'description': u'Git revision of the pack content to use for this action execution (git commit sha / tag / branch). Only applies to packs which are git repositories.'}, u'log_level': {u'default': u'DEBUG', u'enum': [u'AUDIT', u'CRITICAL', u'ERROR', u'WARNING', u'INFO', u'DEBUG'], u'type': u'string', u'description': u'Default log level for Python runner actions.'}, u'timeout': {u'default': 600, u'type': u'integer', u'description': u"Action timeout in seconds. Action will get killed if it doesn't finish in timeout seconds."}, u'env': {u'type': u'object', u'description': u'Environment variables which will be available to the script.'}}, u'id': u'5c1af28f26395d0024fc2741', u'description': u'A runner for launching python actions.'}, start_timestamp="2019-06-04 17:07:47.443217+00:00", status="succeeded", task_execution="5cf6a563d0fa0000229108c0", trigger={}, trigger_instance={}, trigger_type={}, web_url="https://befdd34ce4a4/#/history/5cf6a563d0fa0000229108c2/general", workflow_execution="5cf6a55f5f85230022d4bf06")
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/transport/consumers.py", line 72, in _process_message
self._handler.process(body)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 83, in process
handler_function(message)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 61, in handle_action_execution_with_instrumentation
return self.handle_action_execution(ac_ex_db=ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 134, in handle_action_execution
wf_svc.handle_action_execution_completion(ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/services/workflows.py", line 733, in handle_action_execution_completion
with coord_svc.get_coordinator(start_heart=True).get_lock(wf_ex_id):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/locking.py", line 52, in __enter__
acquired = self.acquire(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 93, in acquire
return acquired
File "/usr/lib/python2.7/contextlib.py", line 35, in __exit__
self.gen.throw(type, value, traceback)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 51, in _translate_failures
cause=e)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/utils.py", line 225, in raise_with_cause
excutils.raise_with_cause(exc_cls, message, *args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/oslo_utils/excutils.py", line 143, in raise_with_cause
six.raise_from(exc_cls(message, *args, **kwargs), kwargs.get('cause'))
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/six.py", line 737, in raise_from
raise value
ToozConnectionError: Error while reading from socket: (u'Connection closed by server.',)
|
ToozConnectionError
|
def _purge_action_executions_output(self):
utc_now = get_datetime_utc_now()
timestamp = utc_now - datetime.timedelta(days=self._action_executions_output_ttl)
# Another sanity check to make sure we don't delete new objects
if timestamp > (
utc_now - datetime.timedelta(days=MINIMUM_TTL_DAYS_EXECUTION_OUTPUT)
):
raise ValueError(
"Calculated timestamp would violate the minimum TTL constraint"
)
timestamp_str = isotime.format(dt=timestamp)
LOG.info(
"Deleting action executions output objects older than: %s" % (timestamp_str)
)
assert timestamp < utc_now
try:
purge_execution_output_objects(logger=LOG, timestamp=timestamp)
except Exception as e:
LOG.exception(
"Failed to delete execution output objects: %s" % (six.text_type(e))
)
return True
|
def _purge_action_executions_output(self):
LOG.info("Performing garbage collection for action executions output objects")
utc_now = get_datetime_utc_now()
timestamp = utc_now - datetime.timedelta(days=self._action_executions_output_ttl)
# Another sanity check to make sure we don't delete new objects
if timestamp > (
utc_now - datetime.timedelta(days=MINIMUM_TTL_DAYS_EXECUTION_OUTPUT)
):
raise ValueError(
"Calculated timestamp would violate the minimum TTL constraint"
)
timestamp_str = isotime.format(dt=timestamp)
LOG.info(
"Deleting action executions output objects older than: %s" % (timestamp_str)
)
assert timestamp < utc_now
try:
purge_execution_output_objects(logger=LOG, timestamp=timestamp)
except Exception as e:
LOG.exception(
"Failed to delete execution output objects: %s" % (six.text_type(e))
)
return True
|
https://github.com/StackStorm/st2/issues/4704
|
2019-06-04 17:08:05,405 139749901439856 ERROR consumers [-] VariableMessageQueueConsumer failed to process message: ActionExecutionDB(action={u'notify': {}, u'description': u'This executes a task in the net-task library', u'runner_type': u'python-script', u'tags': [], u'enabled': True, u'name': u'net-task', u'entry_point': u'python/net-task.py', u'metadata_file': u'actions/net-task.yaml', u'output_schema': {}, u'uid': u'action:shigo:net-task', u'parameters': {u'args': {u'position': 1, u'required': False, u'type': u'array', u'description': u'Args to be passed to the task'}, u'task_name': {u'position': 0, u'required': True, u'type': u'string', u'description': u'The name of the task'}, u'kwargs': {u'position': 2, u'required': False, u'type': u'object', u'description': u'Kwargs to be passed to the task'}}, u'ref': u'shigo.net-task', u'id': u'5c1af2cedb95700082de9860', u'pack': u'shigo'}, children=[], context={u'orquesta': {u'workflow_execution_id': u'5cf6a55f5f85230022d4bf06', u'task_id': u'get_nettask_version', u'task_execution_id': u'5cf6a563d0fa0000229108c0', u'task_name': u'get_nettask_version', u'task_route': 0}, u'user': u'st2admin', u'parent': {u'user': u'st2admin', u'execution_id': u'5cf6a55ea036ab0033dc934c', u'pack': u'shigo'}, u'pack': u'shigo'}, delay=None, end_timestamp="2019-06-04 17:08:05.182673+00:00", id=5cf6a563d0fa0000229108c2, liveaction={u'callback': {}, u'runner_info': {u'hostname': u'4051aa3ae5dd', u'pid': 34}, u'parameters': {u'task_name': u'net_task.version'}, u'action': u'shigo.net-task', u'action_is_workflow': False, u'id': u'5cf6a563d0fa0000229108c1'}, log=[{u'status': u'requested', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 47, 469000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'scheduled', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 199000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'running', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 332000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'succeeded', u'timestamp': datetime.datetime(2019, 6, 4, 17, 8, 5, 217000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}], parameters={u'task_name': u'net_task.version'}, parent="5cf6a55ea036ab0033dc934c", result={u'result': {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'optical-tech': u'0.6.4', u'network-state-service': u'0.4.3', u'net-devices2': u'1.4.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'device_grouping': u'0.0.2', u'starlab': u'0.1.60', u'wan-tech': u'0.7.7', u'icm-connector': u'0.8.12', u'azconfigure-tasks': u'1.0.13', u'celery': u'4.2.1', u'quality-checks': u'0.1.22', u'atom': u'0.9.44', u'net-task-contrib': u'0.4.72', u'net-assist': u'0.1.10', u'fabric-tools': u'0.1.6', u'ask-mode-service': u'0.3.2', u'safety-check-service': u'0.2.8', u'apache-airflow': u'2.0.0dev0+3.incubating', u'net-task': u'0.13.0'}}, u'exit_code': 0, u'stderr': u"st2.actions.python.NetTaskWrapper: DEBUG json data: {}\nst2.actions.python.NetTaskWrapper: INFO NetTask UUID: 8726afab-915d-4f97-8205-5c7f73ffa4ae\nst2.actions.python.NetTaskWrapper: INFO task net_task.version completed, result is {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'net-task': u'0.13.0', u'device_grouping': u'0.0.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'quality-checks': u'0.1.22', u'azconfigure-tasks': u'1.0.13', u'starlab': u'0.1.60', u'ask-mode-service': u'0.3.2', u'icm-connector': u'0.8.12', u'apache-airflow': u'2.0.0dev0+3.incubating', u'celery': u'4.2.1', u'net-task-contrib': u'0.4.72', u'network-state-service': u'0.4.3', u'wan-tech': u'0.7.7', u'net-assist': u'0.1.10', u'atom': u'0.9.44', u'safety-check-service': u'0.2.8', u'fabric-tools': u'0.1.6', u'net-devices2': u'1.4.2', u'optical-tech': u'0.6.4'}}\nst2.actions.python.NetTaskWrapper: WARNING task did not return data in (status, result) format, as expected by stackstorm\n", u'stdout': u''}, rule={}, runner={u'runner_module': u'python_runner', u'uid': u'runner_type:python-script', u'runner_package': u'python_runner', u'enabled': True, u'name': u'python-script', u'output_key': u'result', u'output_schema': {u'exit_code': {u'required': True, u'type': u'integer'}, u'result': {u'anyOf': [{u'type': u'object'}, {u'type': u'string'}, {u'type': u'integer'}, {u'type': u'number'}, {u'type': u'boolean'}, {u'type': u'array'}, {u'type': u'null'}]}, u'stderr': {u'required': True, u'type': u'string'}, u'stdout': {u'required': True, u'type': u'string'}}, u'runner_parameters': {u'debug': {u'default': False, u'required': False, u'type': u'boolean', u'description': u'Enable runner debug mode.'}, u'content_version': {u'required': False, u'type': u'string', u'description': u'Git revision of the pack content to use for this action execution (git commit sha / tag / branch). Only applies to packs which are git repositories.'}, u'log_level': {u'default': u'DEBUG', u'enum': [u'AUDIT', u'CRITICAL', u'ERROR', u'WARNING', u'INFO', u'DEBUG'], u'type': u'string', u'description': u'Default log level for Python runner actions.'}, u'timeout': {u'default': 600, u'type': u'integer', u'description': u"Action timeout in seconds. Action will get killed if it doesn't finish in timeout seconds."}, u'env': {u'type': u'object', u'description': u'Environment variables which will be available to the script.'}}, u'id': u'5c1af28f26395d0024fc2741', u'description': u'A runner for launching python actions.'}, start_timestamp="2019-06-04 17:07:47.443217+00:00", status="succeeded", task_execution="5cf6a563d0fa0000229108c0", trigger={}, trigger_instance={}, trigger_type={}, web_url="https://befdd34ce4a4/#/history/5cf6a563d0fa0000229108c2/general", workflow_execution="5cf6a55f5f85230022d4bf06")
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/transport/consumers.py", line 72, in _process_message
self._handler.process(body)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 83, in process
handler_function(message)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 61, in handle_action_execution_with_instrumentation
return self.handle_action_execution(ac_ex_db=ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 134, in handle_action_execution
wf_svc.handle_action_execution_completion(ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/services/workflows.py", line 733, in handle_action_execution_completion
with coord_svc.get_coordinator(start_heart=True).get_lock(wf_ex_id):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/locking.py", line 52, in __enter__
acquired = self.acquire(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 93, in acquire
return acquired
File "/usr/lib/python2.7/contextlib.py", line 35, in __exit__
self.gen.throw(type, value, traceback)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 51, in _translate_failures
cause=e)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/utils.py", line 225, in raise_with_cause
excutils.raise_with_cause(exc_cls, message, *args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/oslo_utils/excutils.py", line 143, in raise_with_cause
six.raise_from(exc_cls(message, *args, **kwargs), kwargs.get('cause'))
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/six.py", line 737, in raise_from
raise value
ToozConnectionError: Error while reading from socket: (u'Connection closed by server.',)
|
ToozConnectionError
|
def _purge_trigger_instances(self):
"""
Purge trigger instances which match the criteria defined in the config.
"""
utc_now = get_datetime_utc_now()
timestamp = utc_now - datetime.timedelta(days=self._trigger_instances_ttl)
# Another sanity check to make sure we don't delete new executions
if timestamp > (utc_now - datetime.timedelta(days=MINIMUM_TTL_DAYS)):
raise ValueError(
"Calculated timestamp would violate the minimum TTL constraint"
)
timestamp_str = isotime.format(dt=timestamp)
LOG.info("Deleting trigger instances older than: %s" % (timestamp_str))
assert timestamp < utc_now
try:
purge_trigger_instances(logger=LOG, timestamp=timestamp)
except Exception as e:
LOG.exception("Failed to trigger instances: %s" % (six.text_type(e)))
return True
|
def _purge_trigger_instances(self):
"""
Purge trigger instances which match the criteria defined in the config.
"""
LOG.info("Performing garbage collection for trigger instances")
utc_now = get_datetime_utc_now()
timestamp = utc_now - datetime.timedelta(days=self._trigger_instances_ttl)
# Another sanity check to make sure we don't delete new executions
if timestamp > (utc_now - datetime.timedelta(days=MINIMUM_TTL_DAYS)):
raise ValueError(
"Calculated timestamp would violate the minimum TTL constraint"
)
timestamp_str = isotime.format(dt=timestamp)
LOG.info("Deleting trigger instances older than: %s" % (timestamp_str))
assert timestamp < utc_now
try:
purge_trigger_instances(logger=LOG, timestamp=timestamp)
except Exception as e:
LOG.exception("Failed to trigger instances: %s" % (six.text_type(e)))
return True
|
https://github.com/StackStorm/st2/issues/4704
|
2019-06-04 17:08:05,405 139749901439856 ERROR consumers [-] VariableMessageQueueConsumer failed to process message: ActionExecutionDB(action={u'notify': {}, u'description': u'This executes a task in the net-task library', u'runner_type': u'python-script', u'tags': [], u'enabled': True, u'name': u'net-task', u'entry_point': u'python/net-task.py', u'metadata_file': u'actions/net-task.yaml', u'output_schema': {}, u'uid': u'action:shigo:net-task', u'parameters': {u'args': {u'position': 1, u'required': False, u'type': u'array', u'description': u'Args to be passed to the task'}, u'task_name': {u'position': 0, u'required': True, u'type': u'string', u'description': u'The name of the task'}, u'kwargs': {u'position': 2, u'required': False, u'type': u'object', u'description': u'Kwargs to be passed to the task'}}, u'ref': u'shigo.net-task', u'id': u'5c1af2cedb95700082de9860', u'pack': u'shigo'}, children=[], context={u'orquesta': {u'workflow_execution_id': u'5cf6a55f5f85230022d4bf06', u'task_id': u'get_nettask_version', u'task_execution_id': u'5cf6a563d0fa0000229108c0', u'task_name': u'get_nettask_version', u'task_route': 0}, u'user': u'st2admin', u'parent': {u'user': u'st2admin', u'execution_id': u'5cf6a55ea036ab0033dc934c', u'pack': u'shigo'}, u'pack': u'shigo'}, delay=None, end_timestamp="2019-06-04 17:08:05.182673+00:00", id=5cf6a563d0fa0000229108c2, liveaction={u'callback': {}, u'runner_info': {u'hostname': u'4051aa3ae5dd', u'pid': 34}, u'parameters': {u'task_name': u'net_task.version'}, u'action': u'shigo.net-task', u'action_is_workflow': False, u'id': u'5cf6a563d0fa0000229108c1'}, log=[{u'status': u'requested', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 47, 469000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'scheduled', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 199000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'running', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 332000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'succeeded', u'timestamp': datetime.datetime(2019, 6, 4, 17, 8, 5, 217000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}], parameters={u'task_name': u'net_task.version'}, parent="5cf6a55ea036ab0033dc934c", result={u'result': {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'optical-tech': u'0.6.4', u'network-state-service': u'0.4.3', u'net-devices2': u'1.4.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'device_grouping': u'0.0.2', u'starlab': u'0.1.60', u'wan-tech': u'0.7.7', u'icm-connector': u'0.8.12', u'azconfigure-tasks': u'1.0.13', u'celery': u'4.2.1', u'quality-checks': u'0.1.22', u'atom': u'0.9.44', u'net-task-contrib': u'0.4.72', u'net-assist': u'0.1.10', u'fabric-tools': u'0.1.6', u'ask-mode-service': u'0.3.2', u'safety-check-service': u'0.2.8', u'apache-airflow': u'2.0.0dev0+3.incubating', u'net-task': u'0.13.0'}}, u'exit_code': 0, u'stderr': u"st2.actions.python.NetTaskWrapper: DEBUG json data: {}\nst2.actions.python.NetTaskWrapper: INFO NetTask UUID: 8726afab-915d-4f97-8205-5c7f73ffa4ae\nst2.actions.python.NetTaskWrapper: INFO task net_task.version completed, result is {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'net-task': u'0.13.0', u'device_grouping': u'0.0.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'quality-checks': u'0.1.22', u'azconfigure-tasks': u'1.0.13', u'starlab': u'0.1.60', u'ask-mode-service': u'0.3.2', u'icm-connector': u'0.8.12', u'apache-airflow': u'2.0.0dev0+3.incubating', u'celery': u'4.2.1', u'net-task-contrib': u'0.4.72', u'network-state-service': u'0.4.3', u'wan-tech': u'0.7.7', u'net-assist': u'0.1.10', u'atom': u'0.9.44', u'safety-check-service': u'0.2.8', u'fabric-tools': u'0.1.6', u'net-devices2': u'1.4.2', u'optical-tech': u'0.6.4'}}\nst2.actions.python.NetTaskWrapper: WARNING task did not return data in (status, result) format, as expected by stackstorm\n", u'stdout': u''}, rule={}, runner={u'runner_module': u'python_runner', u'uid': u'runner_type:python-script', u'runner_package': u'python_runner', u'enabled': True, u'name': u'python-script', u'output_key': u'result', u'output_schema': {u'exit_code': {u'required': True, u'type': u'integer'}, u'result': {u'anyOf': [{u'type': u'object'}, {u'type': u'string'}, {u'type': u'integer'}, {u'type': u'number'}, {u'type': u'boolean'}, {u'type': u'array'}, {u'type': u'null'}]}, u'stderr': {u'required': True, u'type': u'string'}, u'stdout': {u'required': True, u'type': u'string'}}, u'runner_parameters': {u'debug': {u'default': False, u'required': False, u'type': u'boolean', u'description': u'Enable runner debug mode.'}, u'content_version': {u'required': False, u'type': u'string', u'description': u'Git revision of the pack content to use for this action execution (git commit sha / tag / branch). Only applies to packs which are git repositories.'}, u'log_level': {u'default': u'DEBUG', u'enum': [u'AUDIT', u'CRITICAL', u'ERROR', u'WARNING', u'INFO', u'DEBUG'], u'type': u'string', u'description': u'Default log level for Python runner actions.'}, u'timeout': {u'default': 600, u'type': u'integer', u'description': u"Action timeout in seconds. Action will get killed if it doesn't finish in timeout seconds."}, u'env': {u'type': u'object', u'description': u'Environment variables which will be available to the script.'}}, u'id': u'5c1af28f26395d0024fc2741', u'description': u'A runner for launching python actions.'}, start_timestamp="2019-06-04 17:07:47.443217+00:00", status="succeeded", task_execution="5cf6a563d0fa0000229108c0", trigger={}, trigger_instance={}, trigger_type={}, web_url="https://befdd34ce4a4/#/history/5cf6a563d0fa0000229108c2/general", workflow_execution="5cf6a55f5f85230022d4bf06")
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/transport/consumers.py", line 72, in _process_message
self._handler.process(body)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 83, in process
handler_function(message)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 61, in handle_action_execution_with_instrumentation
return self.handle_action_execution(ac_ex_db=ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 134, in handle_action_execution
wf_svc.handle_action_execution_completion(ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/services/workflows.py", line 733, in handle_action_execution_completion
with coord_svc.get_coordinator(start_heart=True).get_lock(wf_ex_id):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/locking.py", line 52, in __enter__
acquired = self.acquire(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 93, in acquire
return acquired
File "/usr/lib/python2.7/contextlib.py", line 35, in __exit__
self.gen.throw(type, value, traceback)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 51, in _translate_failures
cause=e)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/utils.py", line 225, in raise_with_cause
excutils.raise_with_cause(exc_cls, message, *args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/oslo_utils/excutils.py", line 143, in raise_with_cause
six.raise_from(exc_cls(message, *args, **kwargs), kwargs.get('cause'))
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/six.py", line 737, in raise_from
raise value
ToozConnectionError: Error while reading from socket: (u'Connection closed by server.',)
|
ToozConnectionError
|
def _timeout_inquiries(self):
"""Mark Inquiries as "timeout" that have exceeded their TTL"""
try:
purge_inquiries(logger=LOG)
except Exception as e:
LOG.exception("Failed to purge inquiries: %s" % (six.text_type(e)))
return True
|
def _timeout_inquiries(self):
"""Mark Inquiries as "timeout" that have exceeded their TTL"""
LOG.info("Performing garbage collection for Inquiries")
try:
purge_inquiries(logger=LOG)
except Exception as e:
LOG.exception("Failed to purge inquiries: %s" % (six.text_type(e)))
return True
|
https://github.com/StackStorm/st2/issues/4704
|
2019-06-04 17:08:05,405 139749901439856 ERROR consumers [-] VariableMessageQueueConsumer failed to process message: ActionExecutionDB(action={u'notify': {}, u'description': u'This executes a task in the net-task library', u'runner_type': u'python-script', u'tags': [], u'enabled': True, u'name': u'net-task', u'entry_point': u'python/net-task.py', u'metadata_file': u'actions/net-task.yaml', u'output_schema': {}, u'uid': u'action:shigo:net-task', u'parameters': {u'args': {u'position': 1, u'required': False, u'type': u'array', u'description': u'Args to be passed to the task'}, u'task_name': {u'position': 0, u'required': True, u'type': u'string', u'description': u'The name of the task'}, u'kwargs': {u'position': 2, u'required': False, u'type': u'object', u'description': u'Kwargs to be passed to the task'}}, u'ref': u'shigo.net-task', u'id': u'5c1af2cedb95700082de9860', u'pack': u'shigo'}, children=[], context={u'orquesta': {u'workflow_execution_id': u'5cf6a55f5f85230022d4bf06', u'task_id': u'get_nettask_version', u'task_execution_id': u'5cf6a563d0fa0000229108c0', u'task_name': u'get_nettask_version', u'task_route': 0}, u'user': u'st2admin', u'parent': {u'user': u'st2admin', u'execution_id': u'5cf6a55ea036ab0033dc934c', u'pack': u'shigo'}, u'pack': u'shigo'}, delay=None, end_timestamp="2019-06-04 17:08:05.182673+00:00", id=5cf6a563d0fa0000229108c2, liveaction={u'callback': {}, u'runner_info': {u'hostname': u'4051aa3ae5dd', u'pid': 34}, u'parameters': {u'task_name': u'net_task.version'}, u'action': u'shigo.net-task', u'action_is_workflow': False, u'id': u'5cf6a563d0fa0000229108c1'}, log=[{u'status': u'requested', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 47, 469000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'scheduled', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 199000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'running', u'timestamp': datetime.datetime(2019, 6, 4, 17, 7, 48, 332000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}, {u'status': u'succeeded', u'timestamp': datetime.datetime(2019, 6, 4, 17, 8, 5, 217000, tzinfo=<bson.tz_util.FixedOffset object at 0x7f1a0f8212d0>)}], parameters={u'task_name': u'net_task.version'}, parent="5cf6a55ea036ab0033dc934c", result={u'result': {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'optical-tech': u'0.6.4', u'network-state-service': u'0.4.3', u'net-devices2': u'1.4.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'device_grouping': u'0.0.2', u'starlab': u'0.1.60', u'wan-tech': u'0.7.7', u'icm-connector': u'0.8.12', u'azconfigure-tasks': u'1.0.13', u'celery': u'4.2.1', u'quality-checks': u'0.1.22', u'atom': u'0.9.44', u'net-task-contrib': u'0.4.72', u'net-assist': u'0.1.10', u'fabric-tools': u'0.1.6', u'ask-mode-service': u'0.3.2', u'safety-check-service': u'0.2.8', u'apache-airflow': u'2.0.0dev0+3.incubating', u'net-task': u'0.13.0'}}, u'exit_code': 0, u'stderr': u"st2.actions.python.NetTaskWrapper: DEBUG json data: {}\nst2.actions.python.NetTaskWrapper: INFO NetTask UUID: 8726afab-915d-4f97-8205-5c7f73ffa4ae\nst2.actions.python.NetTaskWrapper: INFO task net_task.version completed, result is {u'task-id': u'8726afab-915d-4f97-8205-5c7f73ffa4ae', u'state': u'SUCCESS', u'result': {u'net-task': u'0.13.0', u'device_grouping': u'0.0.2', u'kusto-proxy': u'0.1.1', u'phynet-credentials': u'0.8.2', u'quality-checks': u'0.1.22', u'azconfigure-tasks': u'1.0.13', u'starlab': u'0.1.60', u'ask-mode-service': u'0.3.2', u'icm-connector': u'0.8.12', u'apache-airflow': u'2.0.0dev0+3.incubating', u'celery': u'4.2.1', u'net-task-contrib': u'0.4.72', u'network-state-service': u'0.4.3', u'wan-tech': u'0.7.7', u'net-assist': u'0.1.10', u'atom': u'0.9.44', u'safety-check-service': u'0.2.8', u'fabric-tools': u'0.1.6', u'net-devices2': u'1.4.2', u'optical-tech': u'0.6.4'}}\nst2.actions.python.NetTaskWrapper: WARNING task did not return data in (status, result) format, as expected by stackstorm\n", u'stdout': u''}, rule={}, runner={u'runner_module': u'python_runner', u'uid': u'runner_type:python-script', u'runner_package': u'python_runner', u'enabled': True, u'name': u'python-script', u'output_key': u'result', u'output_schema': {u'exit_code': {u'required': True, u'type': u'integer'}, u'result': {u'anyOf': [{u'type': u'object'}, {u'type': u'string'}, {u'type': u'integer'}, {u'type': u'number'}, {u'type': u'boolean'}, {u'type': u'array'}, {u'type': u'null'}]}, u'stderr': {u'required': True, u'type': u'string'}, u'stdout': {u'required': True, u'type': u'string'}}, u'runner_parameters': {u'debug': {u'default': False, u'required': False, u'type': u'boolean', u'description': u'Enable runner debug mode.'}, u'content_version': {u'required': False, u'type': u'string', u'description': u'Git revision of the pack content to use for this action execution (git commit sha / tag / branch). Only applies to packs which are git repositories.'}, u'log_level': {u'default': u'DEBUG', u'enum': [u'AUDIT', u'CRITICAL', u'ERROR', u'WARNING', u'INFO', u'DEBUG'], u'type': u'string', u'description': u'Default log level for Python runner actions.'}, u'timeout': {u'default': 600, u'type': u'integer', u'description': u"Action timeout in seconds. Action will get killed if it doesn't finish in timeout seconds."}, u'env': {u'type': u'object', u'description': u'Environment variables which will be available to the script.'}}, u'id': u'5c1af28f26395d0024fc2741', u'description': u'A runner for launching python actions.'}, start_timestamp="2019-06-04 17:07:47.443217+00:00", status="succeeded", task_execution="5cf6a563d0fa0000229108c0", trigger={}, trigger_instance={}, trigger_type={}, web_url="https://befdd34ce4a4/#/history/5cf6a563d0fa0000229108c2/general", workflow_execution="5cf6a55f5f85230022d4bf06")
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/transport/consumers.py", line 72, in _process_message
self._handler.process(body)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 83, in process
handler_function(message)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 61, in handle_action_execution_with_instrumentation
return self.handle_action_execution(ac_ex_db=ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/workflows/workflows.py", line 134, in handle_action_execution
wf_svc.handle_action_execution_completion(ac_ex_db)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/services/workflows.py", line 733, in handle_action_execution_completion
with coord_svc.get_coordinator(start_heart=True).get_lock(wf_ex_id):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/locking.py", line 52, in __enter__
acquired = self.acquire(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 93, in acquire
return acquired
File "/usr/lib/python2.7/contextlib.py", line 35, in __exit__
self.gen.throw(type, value, traceback)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/drivers/redis.py", line 51, in _translate_failures
cause=e)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/tooz/utils.py", line 225, in raise_with_cause
excutils.raise_with_cause(exc_cls, message, *args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/oslo_utils/excutils.py", line 143, in raise_with_cause
six.raise_from(exc_cls(message, *args, **kwargs), kwargs.get('cause'))
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/six.py", line 737, in raise_from
raise value
ToozConnectionError: Error while reading from socket: (u'Connection closed by server.',)
|
ToozConnectionError
|
def create_or_update_trigger_db(trigger, log_not_unique_error_as_debug=False):
"""
Create a new TriggerDB model if one doesn't exist yet or update existing
one.
:param trigger: Trigger info.
:type trigger: ``dict``
"""
assert isinstance(trigger, dict)
existing_trigger_db = _get_trigger_db(trigger)
if existing_trigger_db:
is_update = True
else:
is_update = False
trigger_api = TriggerAPI(**trigger)
trigger_api.validate()
trigger_db = TriggerAPI.to_model(trigger_api)
if is_update:
trigger_db.id = existing_trigger_db.id
trigger_db = Trigger.add_or_update(
trigger_db, log_not_unique_error_as_debug=log_not_unique_error_as_debug
)
extra = {"trigger_db": trigger_db}
if is_update:
LOG.audit("Trigger updated. Trigger.id=%s" % (trigger_db.id), extra=extra)
else:
LOG.audit("Trigger created. Trigger.id=%s" % (trigger_db.id), extra=extra)
return trigger_db
|
def create_or_update_trigger_db(trigger):
"""
Create a new TriggerDB model if one doesn't exist yet or update existing
one.
:param trigger: Trigger info.
:type trigger: ``dict``
"""
assert isinstance(trigger, dict)
existing_trigger_db = _get_trigger_db(trigger)
if existing_trigger_db:
is_update = True
else:
is_update = False
trigger_api = TriggerAPI(**trigger)
trigger_api.validate()
trigger_db = TriggerAPI.to_model(trigger_api)
if is_update:
trigger_db.id = existing_trigger_db.id
trigger_db = Trigger.add_or_update(trigger_db)
extra = {"trigger_db": trigger_db}
if is_update:
LOG.audit("Trigger updated. Trigger.id=%s" % (trigger_db.id), extra=extra)
else:
LOG.audit("Trigger created. Trigger.id=%s" % (trigger_db.id), extra=extra)
return trigger_db
|
https://github.com/StackStorm/st2/issues/3933
|
Registering content...[flags = --config-file /etc/st2/st2.conf --register-all]
2017-11-10 16:04:14,401 INFO [-] Connecting to database "st2" @ "127.0.0.1:27017" as user "stackstorm".
2017-11-10 16:04:20,355 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_type_d_b index: uid_1 dup key: { : "trigger_type:core:st2.key_value_pair.update" })
2017-11-10 16:04:20,432 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_d_b index: uid_1 dup key: { : "trigger:core:st2.key_value_pair.value_change:99914b932bd37a50b983c5e7c90ae93b" })
2017-11-10 16:04:20,451 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_type_d_b index: uid_1 dup key: { : "trigger_type:core:st2.key_value_pair.delete" })
2017-11-10 16:04:20,567 INFO [-] =========================================================
2017-11-10 16:04:20,568 INFO [-] ############## Registering triggers #####################
2017-11-10 16:04:20,568 INFO [-] =========================================================
2017-11-10 16:04:20,673 INFO [-] Registered 0 triggers.
|
NotUniqueError
|
def create_trigger_type_db(trigger_type, log_not_unique_error_as_debug=False):
"""
Creates a trigger type db object in the db given trigger_type definition as dict.
:param trigger_type: Trigger type model.
:type trigger_type: ``dict``
:param log_not_unique_error_as_debug: True to lot NotUnique errors under debug instead of
error log level. This is to be used in scenarios where
failure is non-fatal (e.g. when services register
internal trigger types which is an idempotent
operation).
:type log_not_unique_error_as_debug: ``bool``
:rtype: ``object``
"""
trigger_type_api = TriggerTypeAPI(**trigger_type)
trigger_type_api.validate()
ref = ResourceReference.to_string_reference(
name=trigger_type_api.name, pack=trigger_type_api.pack
)
trigger_type_db = get_trigger_type_db(ref)
if not trigger_type_db:
trigger_type_db = TriggerTypeAPI.to_model(trigger_type_api)
LOG.debug("verified trigger and formulated TriggerDB=%s", trigger_type_db)
trigger_type_db = TriggerType.add_or_update(
trigger_type_db, log_not_unique_error_as_debug=log_not_unique_error_as_debug
)
return trigger_type_db
|
def create_trigger_type_db(trigger_type):
"""
Creates a trigger type db object in the db given trigger_type definition as dict.
:param trigger_type: Trigger type model.
:type trigger_type: ``dict``
:rtype: ``object``
"""
trigger_type_api = TriggerTypeAPI(**trigger_type)
trigger_type_api.validate()
ref = ResourceReference.to_string_reference(
name=trigger_type_api.name, pack=trigger_type_api.pack
)
trigger_type_db = get_trigger_type_db(ref)
if not trigger_type_db:
trigger_type_db = TriggerTypeAPI.to_model(trigger_type_api)
LOG.debug("verified trigger and formulated TriggerDB=%s", trigger_type_db)
trigger_type_db = TriggerType.add_or_update(trigger_type_db)
return trigger_type_db
|
https://github.com/StackStorm/st2/issues/3933
|
Registering content...[flags = --config-file /etc/st2/st2.conf --register-all]
2017-11-10 16:04:14,401 INFO [-] Connecting to database "st2" @ "127.0.0.1:27017" as user "stackstorm".
2017-11-10 16:04:20,355 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_type_d_b index: uid_1 dup key: { : "trigger_type:core:st2.key_value_pair.update" })
2017-11-10 16:04:20,432 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_d_b index: uid_1 dup key: { : "trigger:core:st2.key_value_pair.value_change:99914b932bd37a50b983c5e7c90ae93b" })
2017-11-10 16:04:20,451 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_type_d_b index: uid_1 dup key: { : "trigger_type:core:st2.key_value_pair.delete" })
2017-11-10 16:04:20,567 INFO [-] =========================================================
2017-11-10 16:04:20,568 INFO [-] ############## Registering triggers #####################
2017-11-10 16:04:20,568 INFO [-] =========================================================
2017-11-10 16:04:20,673 INFO [-] Registered 0 triggers.
|
NotUniqueError
|
def create_shadow_trigger(trigger_type_db, log_not_unique_error_as_debug=False):
"""
Create a shadow trigger for TriggerType with no parameters.
:param log_not_unique_error_as_debug: True to lot NotUnique errors under debug instead of
error log level. This is to be used in scenarios where
failure is non-fatal (e.g. when services register
internal trigger types which is an idempotent
operation).
:type log_not_unique_error_as_debug: ``bool``
"""
trigger_type_ref = trigger_type_db.get_reference().ref
if trigger_type_db.parameters_schema:
LOG.debug(
"Skip shadow trigger for TriggerType with parameters %s.", trigger_type_ref
)
return None
trigger = {
"name": trigger_type_db.name,
"pack": trigger_type_db.pack,
"type": trigger_type_ref,
"parameters": {},
}
return create_or_update_trigger_db(
trigger, log_not_unique_error_as_debug=log_not_unique_error_as_debug
)
|
def create_shadow_trigger(trigger_type_db):
"""
Create a shadow trigger for TriggerType with no parameters.
"""
trigger_type_ref = trigger_type_db.get_reference().ref
if trigger_type_db.parameters_schema:
LOG.debug(
"Skip shadow trigger for TriggerType with parameters %s.", trigger_type_ref
)
return None
trigger = {
"name": trigger_type_db.name,
"pack": trigger_type_db.pack,
"type": trigger_type_ref,
"parameters": {},
}
return create_or_update_trigger_db(trigger)
|
https://github.com/StackStorm/st2/issues/3933
|
Registering content...[flags = --config-file /etc/st2/st2.conf --register-all]
2017-11-10 16:04:14,401 INFO [-] Connecting to database "st2" @ "127.0.0.1:27017" as user "stackstorm".
2017-11-10 16:04:20,355 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_type_d_b index: uid_1 dup key: { : "trigger_type:core:st2.key_value_pair.update" })
2017-11-10 16:04:20,432 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_d_b index: uid_1 dup key: { : "trigger:core:st2.key_value_pair.value_change:99914b932bd37a50b983c5e7c90ae93b" })
2017-11-10 16:04:20,451 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_type_d_b index: uid_1 dup key: { : "trigger_type:core:st2.key_value_pair.delete" })
2017-11-10 16:04:20,567 INFO [-] =========================================================
2017-11-10 16:04:20,568 INFO [-] ############## Registering triggers #####################
2017-11-10 16:04:20,568 INFO [-] =========================================================
2017-11-10 16:04:20,673 INFO [-] Registered 0 triggers.
|
NotUniqueError
|
def create_or_update_trigger_type_db(trigger_type, log_not_unique_error_as_debug=False):
"""
Create or update a trigger type db object in the db given trigger_type definition as dict.
:param trigger_type: Trigger type model.
:type trigger_type: ``dict``
:param log_not_unique_error_as_debug: True to lot NotUnique errors under debug instead of
error log level. This is to be used in scenarios where
failure is non-fatal (e.g. when services register
internal trigger types which is an idempotent
operation).
:type log_not_unique_error_as_debug: ``bool``
:rtype: ``object``
"""
assert isinstance(trigger_type, dict)
trigger_type_api = TriggerTypeAPI(**trigger_type)
trigger_type_api.validate()
trigger_type_api = TriggerTypeAPI.to_model(trigger_type_api)
ref = ResourceReference.to_string_reference(
name=trigger_type_api.name, pack=trigger_type_api.pack
)
existing_trigger_type_db = get_trigger_type_db(ref)
if existing_trigger_type_db:
is_update = True
else:
is_update = False
if is_update:
trigger_type_api.id = existing_trigger_type_db.id
try:
trigger_type_db = TriggerType.add_or_update(
trigger_type_api,
log_not_unique_error_as_debug=log_not_unique_error_as_debug,
)
except StackStormDBObjectConflictError:
# Operation is idempotent and trigger could have already been created by
# another process. Ignore object already exists because it simply means
# there was a race and object is already in the database.
trigger_type_db = get_trigger_type_db(ref)
is_update = True
extra = {"trigger_type_db": trigger_type_db}
if is_update:
LOG.audit(
"TriggerType updated. TriggerType.id=%s" % (trigger_type_db.id), extra=extra
)
else:
LOG.audit(
"TriggerType created. TriggerType.id=%s" % (trigger_type_db.id), extra=extra
)
return trigger_type_db
|
def create_or_update_trigger_type_db(trigger_type):
"""
Create or update a trigger type db object in the db given trigger_type definition as dict.
:param trigger_type: Trigger type model.
:type trigger_type: ``dict``
:rtype: ``object``
"""
assert isinstance(trigger_type, dict)
trigger_type_api = TriggerTypeAPI(**trigger_type)
trigger_type_api.validate()
trigger_type_api = TriggerTypeAPI.to_model(trigger_type_api)
ref = ResourceReference.to_string_reference(
name=trigger_type_api.name, pack=trigger_type_api.pack
)
existing_trigger_type_db = get_trigger_type_db(ref)
if existing_trigger_type_db:
is_update = True
else:
is_update = False
if is_update:
trigger_type_api.id = existing_trigger_type_db.id
try:
trigger_type_db = TriggerType.add_or_update(trigger_type_api)
except StackStormDBObjectConflictError:
# Operation is idempotent and trigger could have already been created by
# another process. Ignore object already exists because it simply means
# there was a race and object is already in the database.
trigger_type_db = get_trigger_type_db(ref)
is_update = True
extra = {"trigger_type_db": trigger_type_db}
if is_update:
LOG.audit(
"TriggerType updated. TriggerType.id=%s" % (trigger_type_db.id), extra=extra
)
else:
LOG.audit(
"TriggerType created. TriggerType.id=%s" % (trigger_type_db.id), extra=extra
)
return trigger_type_db
|
https://github.com/StackStorm/st2/issues/3933
|
Registering content...[flags = --config-file /etc/st2/st2.conf --register-all]
2017-11-10 16:04:14,401 INFO [-] Connecting to database "st2" @ "127.0.0.1:27017" as user "stackstorm".
2017-11-10 16:04:20,355 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_type_d_b index: uid_1 dup key: { : "trigger_type:core:st2.key_value_pair.update" })
2017-11-10 16:04:20,432 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_d_b index: uid_1 dup key: { : "trigger:core:st2.key_value_pair.value_change:99914b932bd37a50b983c5e7c90ae93b" })
2017-11-10 16:04:20,451 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_type_d_b index: uid_1 dup key: { : "trigger_type:core:st2.key_value_pair.delete" })
2017-11-10 16:04:20,567 INFO [-] =========================================================
2017-11-10 16:04:20,568 INFO [-] ############## Registering triggers #####################
2017-11-10 16:04:20,568 INFO [-] =========================================================
2017-11-10 16:04:20,673 INFO [-] Registered 0 triggers.
|
NotUniqueError
|
def _register_internal_trigger_type(trigger_definition):
try:
trigger_type_db = create_trigger_type_db(
trigger_type=trigger_definition, log_not_unique_error_as_debug=True
)
except (NotUniqueError, StackStormDBObjectConflictError):
# We ignore conflict error since this operation is idempotent and race is not an issue
LOG.debug(
'Internal trigger type "%s" already exists, ignoring error...'
% (trigger_definition["name"])
)
ref = ResourceReference.to_string_reference(
name=trigger_definition["name"], pack=trigger_definition["pack"]
)
trigger_type_db = get_trigger_type_db(ref)
if trigger_type_db:
LOG.debug("Registered internal trigger: %s.", trigger_definition["name"])
# trigger types with parameters do no require a shadow trigger.
if trigger_type_db and not trigger_type_db.parameters_schema:
try:
trigger_db = create_shadow_trigger(
trigger_type_db, log_not_unique_error_as_debug=True
)
extra = {"trigger_db": trigger_db}
LOG.audit(
"Trigger created for parameter-less internal TriggerType. Trigger.id=%s"
% (trigger_db.id),
extra=extra,
)
except (NotUniqueError, StackStormDBObjectConflictError):
LOG.debug(
'Shadow trigger "%s" already exists. Ignoring.',
trigger_type_db.get_reference().ref,
exc_info=True,
)
except (ValidationError, ValueError):
LOG.exception(
"Validation failed in shadow trigger. TriggerType=%s.",
trigger_type_db.get_reference().ref,
)
raise
return trigger_type_db
|
def _register_internal_trigger_type(trigger_definition):
try:
trigger_type_db = create_trigger_type_db(trigger_type=trigger_definition)
except (NotUniqueError, StackStormDBObjectConflictError):
# We ignore conflict error since this operation is idempotent and race is not an issue
LOG.debug(
'Internal trigger type "%s" already exists, ignoring...'
% (trigger_definition["name"]),
exc_info=True,
)
ref = ResourceReference.to_string_reference(
name=trigger_definition["name"], pack=trigger_definition["pack"]
)
trigger_type_db = get_trigger_type_db(ref)
if trigger_type_db:
LOG.debug("Registered internal trigger: %s.", trigger_definition["name"])
# trigger types with parameters do no require a shadow trigger.
if trigger_type_db and not trigger_type_db.parameters_schema:
try:
trigger_db = create_shadow_trigger(trigger_type_db)
extra = {"trigger_db": trigger_db}
LOG.audit(
"Trigger created for parameter-less internal TriggerType. Trigger.id=%s"
% (trigger_db.id),
extra=extra,
)
except StackStormDBObjectConflictError:
LOG.debug(
'Shadow trigger "%s" already exists. Ignoring.',
trigger_type_db.get_reference().ref,
exc_info=True,
)
except (ValidationError, ValueError):
LOG.exception(
"Validation failed in shadow trigger. TriggerType=%s.",
trigger_type_db.get_reference().ref,
)
raise
return trigger_type_db
|
https://github.com/StackStorm/st2/issues/3933
|
Registering content...[flags = --config-file /etc/st2/st2.conf --register-all]
2017-11-10 16:04:14,401 INFO [-] Connecting to database "st2" @ "127.0.0.1:27017" as user "stackstorm".
2017-11-10 16:04:20,355 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_type_d_b index: uid_1 dup key: { : "trigger_type:core:st2.key_value_pair.update" })
2017-11-10 16:04:20,432 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_d_b index: uid_1 dup key: { : "trigger:core:st2.key_value_pair.value_change:99914b932bd37a50b983c5e7c90ae93b" })
2017-11-10 16:04:20,451 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_type_d_b index: uid_1 dup key: { : "trigger_type:core:st2.key_value_pair.delete" })
2017-11-10 16:04:20,567 INFO [-] =========================================================
2017-11-10 16:04:20,568 INFO [-] ############## Registering triggers #####################
2017-11-10 16:04:20,568 INFO [-] =========================================================
2017-11-10 16:04:20,673 INFO [-] Registered 0 triggers.
|
NotUniqueError
|
def register_internal_trigger_types():
"""
Register internal trigger types.
NOTE 1: This method blocks until all the trigger types have been registered.
NOTE 2: We log "NotUniqueError" errors under debug and not error. Those errors are not fatal
because this operation is idempotent and NotUniqueError simply means internal trigger type
has already been registered by some other service.
"""
action_sensor_enabled = cfg.CONF.action_sensor.enable
registered_trigger_types_db = []
for _, trigger_definitions in six.iteritems(INTERNAL_TRIGGER_TYPES):
for trigger_definition in trigger_definitions:
LOG.debug("Registering internal trigger: %s", trigger_definition["name"])
is_action_trigger = (
trigger_definition["name"] == ACTION_SENSOR_TRIGGER["name"]
)
if is_action_trigger and not action_sensor_enabled:
continue
try:
trigger_type_db = _register_internal_trigger_type(
trigger_definition=trigger_definition
)
except Exception:
LOG.exception(
"Failed registering internal trigger: %s.", trigger_definition
)
raise
else:
registered_trigger_types_db.append(trigger_type_db)
return registered_trigger_types_db
|
def register_internal_trigger_types():
"""
Register internal trigger types.
Note: This method blocks until all the trigger types have been registered.
"""
action_sensor_enabled = cfg.CONF.action_sensor.enable
registered_trigger_types_db = []
for _, trigger_definitions in six.iteritems(INTERNAL_TRIGGER_TYPES):
for trigger_definition in trigger_definitions:
LOG.debug("Registering internal trigger: %s", trigger_definition["name"])
is_action_trigger = (
trigger_definition["name"] == ACTION_SENSOR_TRIGGER["name"]
)
if is_action_trigger and not action_sensor_enabled:
continue
try:
trigger_type_db = _register_internal_trigger_type(
trigger_definition=trigger_definition
)
except Exception:
LOG.exception(
"Failed registering internal trigger: %s.", trigger_definition
)
raise
else:
registered_trigger_types_db.append(trigger_type_db)
return registered_trigger_types_db
|
https://github.com/StackStorm/st2/issues/3933
|
Registering content...[flags = --config-file /etc/st2/st2.conf --register-all]
2017-11-10 16:04:14,401 INFO [-] Connecting to database "st2" @ "127.0.0.1:27017" as user "stackstorm".
2017-11-10 16:04:20,355 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_type_d_b index: uid_1 dup key: { : "trigger_type:core:st2.key_value_pair.update" })
2017-11-10 16:04:20,432 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_d_b index: uid_1 dup key: { : "trigger:core:st2.key_value_pair.value_change:99914b932bd37a50b983c5e7c90ae93b" })
2017-11-10 16:04:20,451 ERROR [-] Conflict while trying to save in DB.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/persistence/base.py", line 173, in add_or_update
model_object = cls._get_impl().add_or_update(model_object)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/db/__init__.py", line 336, in add_or_update
instance.save()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/mongoengine/document.py", line 364, in save
raise NotUniqueError(message % six.text_type(err))
NotUniqueError: Tried to save duplicate unique keys (E11000 duplicate key error collection: st2.trigger_type_d_b index: uid_1 dup key: { : "trigger_type:core:st2.key_value_pair.delete" })
2017-11-10 16:04:20,567 INFO [-] =========================================================
2017-11-10 16:04:20,568 INFO [-] ############## Registering triggers #####################
2017-11-10 16:04:20,568 INFO [-] =========================================================
2017-11-10 16:04:20,673 INFO [-] Registered 0 triggers.
|
NotUniqueError
|
def main():
try:
_setup()
return _run_scheduler()
except SystemExit as exit_code:
sys.exit(exit_code)
except:
LOG.exception("(PID=%s) Scheduler quit due to exception.", os.getpid())
return 1
finally:
_teardown()
|
def main():
try:
_setup()
return _run_queuer()
except SystemExit as exit_code:
sys.exit(exit_code)
except:
LOG.exception("(PID=%s) Scheduler quit due to exception.", os.getpid())
return 1
finally:
_teardown()
|
https://github.com/StackStorm/st2/issues/4539
|
2019-02-04 14:48:45,229 140516293681552 INFO scheduler [-] (PID=8536) Scheduler started.
2019-02-04 14:48:45,418 140516293681552 INFO consumers [-] Starting SchedulerEntrypoint...
2019-02-04 14:48:45,429 140516109106224 INFO mixins [-] Connected to amqp://xxx:**@127.0.0.1:5672//
2019-02-04 14:51:05,599 140516198248688 ERROR traceback [-] Traceback (most recent call last):
2019-02-04 14:51:05,599 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/eventlet/hubs/poll.py", line 109, in wait
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] listener.cb(fileno)
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/eventlet/greenthread.py", line 219, in main
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] result = function(*args, **kwargs)
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2actions/scheduler/handler.py", line 67, in run
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] execution_queue_item_db = self._get_next_execution()
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2actions/scheduler/handler.py", line 125, in _get_next_execution
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] execution_queue_item_db = ActionExecutionSchedulingQueue.query(**query).first()
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/mongoengine/queryset/base.py", line 294, in first
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] result = queryset[0]
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/mongoengine/queryset/base.py", line 194, in __getitem__
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] queryset._cursor[key],
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 610, in __getitem__
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] for doc in clone:
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 1189, in next
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] if len(self.__data) or self._refresh():
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 1104, in _refresh
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] self.__send_message(q)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 982, in __send_message
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] helpers._check_command_response(first)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/helpers.py", line 132, in _check_command_response
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] raise NotMasterError(errmsg, response)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] NotMasterError: interrupted at shutdown
2019-02-04 14:51:05,605 140516198248688 ERROR hub [-] Removing descriptor: 9
2019-02-05 03:28:30,413 140516198248688 INFO misc [-] Re-opening log file "/var/log/st2/st2scheduler.log" with mode "a"
2019-02-05 03:28:30,422 140516198248688 INFO misc [-] Re-opening log file "/var/log/st2/st2scheduler.audit.log" with mode "a"
|
NotMasterError
|
def _register_service_opts():
scheduler_opts = [
cfg.StrOpt(
"logging",
default="/etc/st2/logging.scheduler.conf",
help="Location of the logging configuration file.",
),
cfg.IntOpt(
"pool_size",
default=10,
help="The size of the pool used by the scheduler for scheduling executions.",
),
cfg.FloatOpt(
"sleep_interval",
default=0.10,
help="How long (in seconds) to sleep between each action scheduler main loop run "
"interval.",
),
cfg.FloatOpt(
"gc_interval",
default=10,
help="How often (in seconds) to look for zombie execution requests before rescheduling "
"them.",
),
cfg.IntOpt(
"retry_max_attempt",
default=10,
help="The maximum number of attempts that the scheduler retries on error.",
),
cfg.IntOpt(
"retry_wait_msec",
default=3000,
help="The number of milliseconds to wait in between retries.",
),
]
cfg.CONF.register_opts(scheduler_opts, group="scheduler")
|
def _register_service_opts():
scheduler_opts = [
cfg.StrOpt(
"logging",
default="/etc/st2/logging.scheduler.conf",
help="Location of the logging configuration file.",
),
cfg.IntOpt(
"pool_size",
default=10,
help="The size of the pool used by the scheduler for scheduling executions.",
),
cfg.FloatOpt(
"sleep_interval",
default=0.10,
help="How long (in seconds) to sleep between each action scheduler main loop run "
"interval.",
),
cfg.FloatOpt(
"gc_interval",
default=10,
help="How often (in seconds) to look for zombie execution requests before rescheduling "
"them.",
),
]
cfg.CONF.register_opts(scheduler_opts, group="scheduler")
|
https://github.com/StackStorm/st2/issues/4539
|
2019-02-04 14:48:45,229 140516293681552 INFO scheduler [-] (PID=8536) Scheduler started.
2019-02-04 14:48:45,418 140516293681552 INFO consumers [-] Starting SchedulerEntrypoint...
2019-02-04 14:48:45,429 140516109106224 INFO mixins [-] Connected to amqp://xxx:**@127.0.0.1:5672//
2019-02-04 14:51:05,599 140516198248688 ERROR traceback [-] Traceback (most recent call last):
2019-02-04 14:51:05,599 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/eventlet/hubs/poll.py", line 109, in wait
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] listener.cb(fileno)
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/eventlet/greenthread.py", line 219, in main
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] result = function(*args, **kwargs)
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2actions/scheduler/handler.py", line 67, in run
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] execution_queue_item_db = self._get_next_execution()
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2actions/scheduler/handler.py", line 125, in _get_next_execution
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] execution_queue_item_db = ActionExecutionSchedulingQueue.query(**query).first()
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/mongoengine/queryset/base.py", line 294, in first
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] result = queryset[0]
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/mongoengine/queryset/base.py", line 194, in __getitem__
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] queryset._cursor[key],
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 610, in __getitem__
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] for doc in clone:
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 1189, in next
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] if len(self.__data) or self._refresh():
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 1104, in _refresh
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] self.__send_message(q)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 982, in __send_message
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] helpers._check_command_response(first)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/helpers.py", line 132, in _check_command_response
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] raise NotMasterError(errmsg, response)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] NotMasterError: interrupted at shutdown
2019-02-04 14:51:05,605 140516198248688 ERROR hub [-] Removing descriptor: 9
2019-02-05 03:28:30,413 140516198248688 INFO misc [-] Re-opening log file "/var/log/st2/st2scheduler.log" with mode "a"
2019-02-05 03:28:30,422 140516198248688 INFO misc [-] Re-opening log file "/var/log/st2/st2scheduler.audit.log" with mode "a"
|
NotMasterError
|
def __init__(self):
self.message_type = LiveActionDB
self._shutdown = False
self._pool = eventlet.GreenPool(size=cfg.CONF.scheduler.pool_size)
self._coordinator = coordination_service.get_coordinator()
self._main_thread = None
self._cleanup_thread = None
|
def __init__(self):
self.message_type = LiveActionDB
self._shutdown = False
self._pool = eventlet.GreenPool(size=cfg.CONF.scheduler.pool_size)
self._coordinator = coordination_service.get_coordinator()
|
https://github.com/StackStorm/st2/issues/4539
|
2019-02-04 14:48:45,229 140516293681552 INFO scheduler [-] (PID=8536) Scheduler started.
2019-02-04 14:48:45,418 140516293681552 INFO consumers [-] Starting SchedulerEntrypoint...
2019-02-04 14:48:45,429 140516109106224 INFO mixins [-] Connected to amqp://xxx:**@127.0.0.1:5672//
2019-02-04 14:51:05,599 140516198248688 ERROR traceback [-] Traceback (most recent call last):
2019-02-04 14:51:05,599 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/eventlet/hubs/poll.py", line 109, in wait
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] listener.cb(fileno)
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/eventlet/greenthread.py", line 219, in main
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] result = function(*args, **kwargs)
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2actions/scheduler/handler.py", line 67, in run
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] execution_queue_item_db = self._get_next_execution()
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2actions/scheduler/handler.py", line 125, in _get_next_execution
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] execution_queue_item_db = ActionExecutionSchedulingQueue.query(**query).first()
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/mongoengine/queryset/base.py", line 294, in first
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] result = queryset[0]
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/mongoengine/queryset/base.py", line 194, in __getitem__
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] queryset._cursor[key],
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 610, in __getitem__
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] for doc in clone:
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 1189, in next
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] if len(self.__data) or self._refresh():
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 1104, in _refresh
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] self.__send_message(q)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 982, in __send_message
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] helpers._check_command_response(first)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/helpers.py", line 132, in _check_command_response
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] raise NotMasterError(errmsg, response)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] NotMasterError: interrupted at shutdown
2019-02-04 14:51:05,605 140516198248688 ERROR hub [-] Removing descriptor: 9
2019-02-05 03:28:30,413 140516198248688 INFO misc [-] Re-opening log file "/var/log/st2/st2scheduler.log" with mode "a"
2019-02-05 03:28:30,422 140516198248688 INFO misc [-] Re-opening log file "/var/log/st2/st2scheduler.audit.log" with mode "a"
|
NotMasterError
|
def run(self):
LOG.debug("Starting scheduler handler...")
while not self._shutdown:
eventlet.greenthread.sleep(cfg.CONF.scheduler.sleep_interval)
self.process()
|
def run(self):
LOG.debug("Entering scheduler loop")
while not self._shutdown:
eventlet.greenthread.sleep(cfg.CONF.scheduler.sleep_interval)
execution_queue_item_db = self._get_next_execution()
if execution_queue_item_db:
self._pool.spawn(self._handle_execution, execution_queue_item_db)
|
https://github.com/StackStorm/st2/issues/4539
|
2019-02-04 14:48:45,229 140516293681552 INFO scheduler [-] (PID=8536) Scheduler started.
2019-02-04 14:48:45,418 140516293681552 INFO consumers [-] Starting SchedulerEntrypoint...
2019-02-04 14:48:45,429 140516109106224 INFO mixins [-] Connected to amqp://xxx:**@127.0.0.1:5672//
2019-02-04 14:51:05,599 140516198248688 ERROR traceback [-] Traceback (most recent call last):
2019-02-04 14:51:05,599 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/eventlet/hubs/poll.py", line 109, in wait
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] listener.cb(fileno)
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/eventlet/greenthread.py", line 219, in main
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] result = function(*args, **kwargs)
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2actions/scheduler/handler.py", line 67, in run
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] execution_queue_item_db = self._get_next_execution()
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2actions/scheduler/handler.py", line 125, in _get_next_execution
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] execution_queue_item_db = ActionExecutionSchedulingQueue.query(**query).first()
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/mongoengine/queryset/base.py", line 294, in first
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] result = queryset[0]
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/mongoengine/queryset/base.py", line 194, in __getitem__
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] queryset._cursor[key],
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 610, in __getitem__
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] for doc in clone:
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 1189, in next
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] if len(self.__data) or self._refresh():
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 1104, in _refresh
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] self.__send_message(q)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 982, in __send_message
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] helpers._check_command_response(first)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/helpers.py", line 132, in _check_command_response
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] raise NotMasterError(errmsg, response)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] NotMasterError: interrupted at shutdown
2019-02-04 14:51:05,605 140516198248688 ERROR hub [-] Removing descriptor: 9
2019-02-05 03:28:30,413 140516198248688 INFO misc [-] Re-opening log file "/var/log/st2/st2scheduler.log" with mode "a"
2019-02-05 03:28:30,422 140516198248688 INFO misc [-] Re-opening log file "/var/log/st2/st2scheduler.audit.log" with mode "a"
|
NotMasterError
|
def cleanup(self):
LOG.debug("Starting scheduler garbage collection...")
while not self._shutdown:
eventlet.greenthread.sleep(cfg.CONF.scheduler.gc_interval)
self._handle_garbage_collection()
|
def cleanup(self):
LOG.debug("Starting scheduler garbage collection")
while not self._shutdown:
eventlet.greenthread.sleep(cfg.CONF.scheduler.gc_interval)
self._handle_garbage_collection()
|
https://github.com/StackStorm/st2/issues/4539
|
2019-02-04 14:48:45,229 140516293681552 INFO scheduler [-] (PID=8536) Scheduler started.
2019-02-04 14:48:45,418 140516293681552 INFO consumers [-] Starting SchedulerEntrypoint...
2019-02-04 14:48:45,429 140516109106224 INFO mixins [-] Connected to amqp://xxx:**@127.0.0.1:5672//
2019-02-04 14:51:05,599 140516198248688 ERROR traceback [-] Traceback (most recent call last):
2019-02-04 14:51:05,599 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/eventlet/hubs/poll.py", line 109, in wait
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] listener.cb(fileno)
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/eventlet/greenthread.py", line 219, in main
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] result = function(*args, **kwargs)
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2actions/scheduler/handler.py", line 67, in run
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] execution_queue_item_db = self._get_next_execution()
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2actions/scheduler/handler.py", line 125, in _get_next_execution
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] execution_queue_item_db = ActionExecutionSchedulingQueue.query(**query).first()
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/mongoengine/queryset/base.py", line 294, in first
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] result = queryset[0]
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/mongoengine/queryset/base.py", line 194, in __getitem__
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] queryset._cursor[key],
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 610, in __getitem__
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] for doc in clone:
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 1189, in next
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] if len(self.__data) or self._refresh():
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 1104, in _refresh
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] self.__send_message(q)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 982, in __send_message
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] helpers._check_command_response(first)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/helpers.py", line 132, in _check_command_response
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] raise NotMasterError(errmsg, response)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] NotMasterError: interrupted at shutdown
2019-02-04 14:51:05,605 140516198248688 ERROR hub [-] Removing descriptor: 9
2019-02-05 03:28:30,413 140516198248688 INFO misc [-] Re-opening log file "/var/log/st2/st2scheduler.log" with mode "a"
2019-02-05 03:28:30,422 140516198248688 INFO misc [-] Re-opening log file "/var/log/st2/st2scheduler.audit.log" with mode "a"
|
NotMasterError
|
def start(self):
self._shutdown = False
# Spawn the worker threads.
self._main_thread = eventlet.spawn(self.run)
self._cleanup_thread = eventlet.spawn(self.cleanup)
# Link the threads to the shutdown function. If either of the threads exited with error,
# then initiate shutdown which will allow the waits below to throw exception to the
# main process.
self._main_thread.link(self.shutdown)
self._cleanup_thread.link(self.shutdown)
|
def start(self):
self._shutdown = False
eventlet.spawn(self.run)
eventlet.spawn(self.cleanup)
|
https://github.com/StackStorm/st2/issues/4539
|
2019-02-04 14:48:45,229 140516293681552 INFO scheduler [-] (PID=8536) Scheduler started.
2019-02-04 14:48:45,418 140516293681552 INFO consumers [-] Starting SchedulerEntrypoint...
2019-02-04 14:48:45,429 140516109106224 INFO mixins [-] Connected to amqp://xxx:**@127.0.0.1:5672//
2019-02-04 14:51:05,599 140516198248688 ERROR traceback [-] Traceback (most recent call last):
2019-02-04 14:51:05,599 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/eventlet/hubs/poll.py", line 109, in wait
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] listener.cb(fileno)
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/eventlet/greenthread.py", line 219, in main
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] result = function(*args, **kwargs)
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2actions/scheduler/handler.py", line 67, in run
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] execution_queue_item_db = self._get_next_execution()
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2actions/scheduler/handler.py", line 125, in _get_next_execution
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] execution_queue_item_db = ActionExecutionSchedulingQueue.query(**query).first()
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/mongoengine/queryset/base.py", line 294, in first
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] result = queryset[0]
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/mongoengine/queryset/base.py", line 194, in __getitem__
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] queryset._cursor[key],
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 610, in __getitem__
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] for doc in clone:
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 1189, in next
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] if len(self.__data) or self._refresh():
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 1104, in _refresh
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] self.__send_message(q)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 982, in __send_message
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] helpers._check_command_response(first)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/helpers.py", line 132, in _check_command_response
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] raise NotMasterError(errmsg, response)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] NotMasterError: interrupted at shutdown
2019-02-04 14:51:05,605 140516198248688 ERROR hub [-] Removing descriptor: 9
2019-02-05 03:28:30,413 140516198248688 INFO misc [-] Re-opening log file "/var/log/st2/st2scheduler.log" with mode "a"
2019-02-05 03:28:30,422 140516198248688 INFO misc [-] Re-opening log file "/var/log/st2/st2scheduler.audit.log" with mode "a"
|
NotMasterError
|
def shutdown(self, *args, **kwargs):
if not self._shutdown:
self._shutdown = True
|
def shutdown(self):
self._shutdown = True
|
https://github.com/StackStorm/st2/issues/4539
|
2019-02-04 14:48:45,229 140516293681552 INFO scheduler [-] (PID=8536) Scheduler started.
2019-02-04 14:48:45,418 140516293681552 INFO consumers [-] Starting SchedulerEntrypoint...
2019-02-04 14:48:45,429 140516109106224 INFO mixins [-] Connected to amqp://xxx:**@127.0.0.1:5672//
2019-02-04 14:51:05,599 140516198248688 ERROR traceback [-] Traceback (most recent call last):
2019-02-04 14:51:05,599 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/eventlet/hubs/poll.py", line 109, in wait
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] listener.cb(fileno)
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/eventlet/greenthread.py", line 219, in main
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] result = function(*args, **kwargs)
2019-02-04 14:51:05,600 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2actions/scheduler/handler.py", line 67, in run
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] execution_queue_item_db = self._get_next_execution()
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2actions/scheduler/handler.py", line 125, in _get_next_execution
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] execution_queue_item_db = ActionExecutionSchedulingQueue.query(**query).first()
2019-02-04 14:51:05,601 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/mongoengine/queryset/base.py", line 294, in first
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] result = queryset[0]
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/mongoengine/queryset/base.py", line 194, in __getitem__
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] queryset._cursor[key],
2019-02-04 14:51:05,602 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 610, in __getitem__
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] for doc in clone:
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 1189, in next
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] if len(self.__data) or self._refresh():
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 1104, in _refresh
2019-02-04 14:51:05,603 140516198248688 ERROR traceback [-] self.__send_message(q)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/cursor.py", line 982, in __send_message
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] helpers._check_command_response(first)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] File "/opt/stackstorm/st2/lib/python2.7/site-packages/pymongo/helpers.py", line 132, in _check_command_response
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] raise NotMasterError(errmsg, response)
2019-02-04 14:51:05,604 140516198248688 ERROR traceback [-] NotMasterError: interrupted at shutdown
2019-02-04 14:51:05,605 140516198248688 ERROR hub [-] Removing descriptor: 9
2019-02-05 03:28:30,413 140516198248688 INFO misc [-] Re-opening log file "/var/log/st2/st2scheduler.log" with mode "a"
2019-02-05 03:28:30,422 140516198248688 INFO misc [-] Re-opening log file "/var/log/st2/st2scheduler.audit.log" with mode "a"
|
NotMasterError
|
def __init__(self, endpoint, cacert=None, debug=False):
self._url = httpclient.get_url_without_trailing_slash(endpoint) + "/stream"
self.debug = debug
self.cacert = cacert
|
def __init__(self, endpoint, cacert, debug):
self._url = httpclient.get_url_without_trailing_slash(endpoint) + "/stream"
self.debug = debug
self.cacert = cacert
|
https://github.com/StackStorm/st2/issues/4361
|
2018-09-25 09:01:02,475 DEBUG - Using cached token from file "/home/ubuntu/.st2/token-st2admin"
Traceback (most recent call last):
File "/home/ubuntu/st2/st2client/st2client/shell.py", line 402, in run
func(args)
File "/home/ubuntu/st2/st2client/st2client/commands/resource.py", line 47, in decorate
return func(*args, **kwargs)
File "/home/ubuntu/st2/st2client/st2client/commands/pack.py", line 252, in run_and_print
instance = super(PackInstallCommand, self).run_and_print(args, **kwargs)
File "/home/ubuntu/st2/st2client/st2client/commands/resource.py", line 47, in decorate
return func(*args, **kwargs)
File "/home/ubuntu/st2/st2client/st2client/commands/pack.py", line 119, in run_and_print
for event in stream_mgr.listen(events, **kwargs):
File "/home/ubuntu/st2/st2client/st2client/models/core.py", line 614, in listen
for message in SSEClient(url):
File "/home/ubuntu/st2/virtualenv/local/lib/python2.7/site-packages/sseclient.py", line 39, in __init__
self._connect()
File "/home/ubuntu/st2/virtualenv/local/lib/python2.7/site-packages/sseclient.py", line 47, in _connect
self.resp = requester.get(self.url, stream=True, **self.requests_kwargs)
File "/home/ubuntu/st2/virtualenv/local/lib/python2.7/site-packages/requests-2.14.2-py2.7.egg/requests/api.py", line 72, in get
return request('get', url, params=params, **kwargs)
File "/home/ubuntu/st2/virtualenv/local/lib/python2.7/site-packages/requests-2.14.2-py2.7.egg/requests/api.py", line 58, in request
return session.request(method=method, url=url, **kwargs)
File "/home/ubuntu/st2/virtualenv/local/lib/python2.7/site-packages/requests-2.14.2-py2.7.egg/requests/sessions.py", line 518, in request
resp = self.send(prep, **send_kwargs)
File "/home/ubuntu/st2/virtualenv/local/lib/python2.7/site-packages/requests-2.14.2-py2.7.egg/requests/sessions.py", line 639, in send
r = adapter.send(request, **kwargs)
File "/home/ubuntu/st2/virtualenv/local/lib/python2.7/site-packages/requests-2.14.2-py2.7.egg/requests/adapters.py", line 512, in send
raise SSLError(e, request=request)
SSLError: ("bad handshake: Error([('SSL routines', 'tls_process_server_certificate', 'certificate verify failed')],)",)
# -------- begin 139990567429072 request ----------
curl -X POST -H 'Connection: keep-alive' -H 'Accept-Encoding: gzip, deflate' -H 'Accept: */*' -H 'User-Agent: python-requests/2.14.2' -H 'content-type: application/json' -H 'X-Auth-Token: 73ea3504cf774a33ad9ac0cd128d3d87' -H 'Content-Length: 18' --data-binary '{"pack": "zabbix"}' http://127.0.0.1:9101/v1/packs/index/search
# -------- begin 139990567429072 response ----------
{
"name": "zabbix",
"author": "Hiroyasu OHYAMA",
"content": {
"tests": {
"count": 12,
"resources": [
"zabbix_base_action_test_case.py",
"test_host_get_status.py",
"test_tool_register_st2_config_to_zabbix.py",
"test_host_delete.py",
"test_host_update_status.py",
"test_maintenance_create_or_update.py",
"test_maintenance_delete.py",
"test_test_credentials.py",
"test_action_base.py",
"test_host_get_id.py",
"test_tool_st2_dispatch.py",
"test_host_get_multiple_ids.py"
]
},
"actions": {
"count": 10,
"resources": [
"host_update_status",
"maintenance_delete",
"host_delete_by_id",
"test_credentials",
"host_delete",
"host_get_status",
"maintenance_create_or_update",
"host_get_multiple_ids",
"ack_event",
"host_get_id"
]
},
"triggers": {
"count": 1,
"resources": [
"event_handler"
]
}
},
"version": "0.1.7",
"repo_url": "https://github.com/StackStorm-Exchange/stackstorm-zabbix",
"keywords": [
"zabbix",
"monitoring"
],
"ref": "zabbix",
"email": "user.localhost2000@gmail.com",
"description": "StackStorm pack that contains Zabbix integrations"
}
# -------- end 139990567429072 response ------------
For the "zabbix" pack, the following content will be registered:
rules | 0
sensors | 0
triggers | 1
actions | 10
aliases | 0
Installation may take a while for packs with many items.
# -------- begin 139990567429072 request ----------
curl -X POST -H 'Connection: keep-alive' -H 'Accept-Encoding: gzip, deflate' -H 'Accept: */*' -H 'User-Agent: python-requests/2.14.2' -H 'content-type: application/json' -H 'X-Auth-Token: 73ea3504cf774a33ad9ac0cd128d3d87' -H 'Content-Length: 55' --data-binary '{"force": false, "python3": false, "packs": ["zabbix"]}' http://127.0.0.1:9101/v1/packs/install
# -------- begin 139990567429072 response ----------
{
"execution_id": "5ba9f94e987b611744cd4ebf"
}
# -------- end 139990567429072 response ------------
ERROR: ("bad handshake: Error([('SSL routines', 'tls_process_server_certificate', 'certificate verify failed')],)",)
CLI settings:
----------------
Config file path: /home/ubuntu/.st2/config
Client settings:
----------------
ST2_BASE_URL: http://127.0.0.1
ST2_AUTH_URL: http://127.0.0.1:9100
ST2_API_URL: http://127.0.0.1:9101/v1
ST2_STREAM_URL: https://ubuntu0/stream/v1
ST2_AUTH_TOKEN: 73ea3504cf774a33ad9ac0cd128d3d87
Proxy settings:
---------------
HTTP_PROXY:
HTTPS_PROXY:
|
SSLError
|
def listen(self, events=None, **kwargs):
# Late import to avoid very expensive in-direct import (~1 second) when this function is
# not called / used
from sseclient import SSEClient
url = self._url
query_params = {}
request_params = {}
if events and isinstance(events, six.string_types):
events = [events]
if "token" in kwargs:
query_params["x-auth-token"] = kwargs.get("token")
if "api_key" in kwargs:
query_params["st2-api-key"] = kwargs.get("api_key")
if events:
query_params["events"] = ",".join(events)
if self.cacert is not None:
request_params["verify"] = self.cacert
query_string = "?" + urllib.parse.urlencode(query_params)
url = url + query_string
for message in SSEClient(url, **request_params):
# If the execution on the API server takes too long, the message
# can be empty. In this case, rerun the query.
if not message.data:
continue
yield json.loads(message.data)
|
def listen(self, events=None, **kwargs):
# Late import to avoid very expensive in-direct import (~1 second) when this function is
# not called / used
from sseclient import SSEClient
url = self._url
query_params = {}
if events and isinstance(events, six.string_types):
events = [events]
if "token" in kwargs:
query_params["x-auth-token"] = kwargs.get("token")
if "api_key" in kwargs:
query_params["st2-api-key"] = kwargs.get("api_key")
if events:
query_params["events"] = ",".join(events)
query_string = "?" + urllib.parse.urlencode(query_params)
url = url + query_string
for message in SSEClient(url):
# If the execution on the API server takes too long, the message
# can be empty. In this case, rerun the query.
if not message.data:
continue
yield json.loads(message.data)
|
https://github.com/StackStorm/st2/issues/4361
|
2018-09-25 09:01:02,475 DEBUG - Using cached token from file "/home/ubuntu/.st2/token-st2admin"
Traceback (most recent call last):
File "/home/ubuntu/st2/st2client/st2client/shell.py", line 402, in run
func(args)
File "/home/ubuntu/st2/st2client/st2client/commands/resource.py", line 47, in decorate
return func(*args, **kwargs)
File "/home/ubuntu/st2/st2client/st2client/commands/pack.py", line 252, in run_and_print
instance = super(PackInstallCommand, self).run_and_print(args, **kwargs)
File "/home/ubuntu/st2/st2client/st2client/commands/resource.py", line 47, in decorate
return func(*args, **kwargs)
File "/home/ubuntu/st2/st2client/st2client/commands/pack.py", line 119, in run_and_print
for event in stream_mgr.listen(events, **kwargs):
File "/home/ubuntu/st2/st2client/st2client/models/core.py", line 614, in listen
for message in SSEClient(url):
File "/home/ubuntu/st2/virtualenv/local/lib/python2.7/site-packages/sseclient.py", line 39, in __init__
self._connect()
File "/home/ubuntu/st2/virtualenv/local/lib/python2.7/site-packages/sseclient.py", line 47, in _connect
self.resp = requester.get(self.url, stream=True, **self.requests_kwargs)
File "/home/ubuntu/st2/virtualenv/local/lib/python2.7/site-packages/requests-2.14.2-py2.7.egg/requests/api.py", line 72, in get
return request('get', url, params=params, **kwargs)
File "/home/ubuntu/st2/virtualenv/local/lib/python2.7/site-packages/requests-2.14.2-py2.7.egg/requests/api.py", line 58, in request
return session.request(method=method, url=url, **kwargs)
File "/home/ubuntu/st2/virtualenv/local/lib/python2.7/site-packages/requests-2.14.2-py2.7.egg/requests/sessions.py", line 518, in request
resp = self.send(prep, **send_kwargs)
File "/home/ubuntu/st2/virtualenv/local/lib/python2.7/site-packages/requests-2.14.2-py2.7.egg/requests/sessions.py", line 639, in send
r = adapter.send(request, **kwargs)
File "/home/ubuntu/st2/virtualenv/local/lib/python2.7/site-packages/requests-2.14.2-py2.7.egg/requests/adapters.py", line 512, in send
raise SSLError(e, request=request)
SSLError: ("bad handshake: Error([('SSL routines', 'tls_process_server_certificate', 'certificate verify failed')],)",)
# -------- begin 139990567429072 request ----------
curl -X POST -H 'Connection: keep-alive' -H 'Accept-Encoding: gzip, deflate' -H 'Accept: */*' -H 'User-Agent: python-requests/2.14.2' -H 'content-type: application/json' -H 'X-Auth-Token: 73ea3504cf774a33ad9ac0cd128d3d87' -H 'Content-Length: 18' --data-binary '{"pack": "zabbix"}' http://127.0.0.1:9101/v1/packs/index/search
# -------- begin 139990567429072 response ----------
{
"name": "zabbix",
"author": "Hiroyasu OHYAMA",
"content": {
"tests": {
"count": 12,
"resources": [
"zabbix_base_action_test_case.py",
"test_host_get_status.py",
"test_tool_register_st2_config_to_zabbix.py",
"test_host_delete.py",
"test_host_update_status.py",
"test_maintenance_create_or_update.py",
"test_maintenance_delete.py",
"test_test_credentials.py",
"test_action_base.py",
"test_host_get_id.py",
"test_tool_st2_dispatch.py",
"test_host_get_multiple_ids.py"
]
},
"actions": {
"count": 10,
"resources": [
"host_update_status",
"maintenance_delete",
"host_delete_by_id",
"test_credentials",
"host_delete",
"host_get_status",
"maintenance_create_or_update",
"host_get_multiple_ids",
"ack_event",
"host_get_id"
]
},
"triggers": {
"count": 1,
"resources": [
"event_handler"
]
}
},
"version": "0.1.7",
"repo_url": "https://github.com/StackStorm-Exchange/stackstorm-zabbix",
"keywords": [
"zabbix",
"monitoring"
],
"ref": "zabbix",
"email": "user.localhost2000@gmail.com",
"description": "StackStorm pack that contains Zabbix integrations"
}
# -------- end 139990567429072 response ------------
For the "zabbix" pack, the following content will be registered:
rules | 0
sensors | 0
triggers | 1
actions | 10
aliases | 0
Installation may take a while for packs with many items.
# -------- begin 139990567429072 request ----------
curl -X POST -H 'Connection: keep-alive' -H 'Accept-Encoding: gzip, deflate' -H 'Accept: */*' -H 'User-Agent: python-requests/2.14.2' -H 'content-type: application/json' -H 'X-Auth-Token: 73ea3504cf774a33ad9ac0cd128d3d87' -H 'Content-Length: 55' --data-binary '{"force": false, "python3": false, "packs": ["zabbix"]}' http://127.0.0.1:9101/v1/packs/install
# -------- begin 139990567429072 response ----------
{
"execution_id": "5ba9f94e987b611744cd4ebf"
}
# -------- end 139990567429072 response ------------
ERROR: ("bad handshake: Error([('SSL routines', 'tls_process_server_certificate', 'certificate verify failed')],)",)
CLI settings:
----------------
Config file path: /home/ubuntu/.st2/config
Client settings:
----------------
ST2_BASE_URL: http://127.0.0.1
ST2_AUTH_URL: http://127.0.0.1:9100
ST2_API_URL: http://127.0.0.1:9101/v1
ST2_STREAM_URL: https://ubuntu0/stream/v1
ST2_AUTH_TOKEN: 73ea3504cf774a33ad9ac0cd128d3d87
Proxy settings:
---------------
HTTP_PROXY:
HTTPS_PROXY:
|
SSLError
|
def setup_app(config={}):
LOG.info("Creating st2api: %s as OpenAPI app.", VERSION_STRING)
is_gunicorn = config.get("is_gunicorn", False)
if is_gunicorn:
# Note: We need to perform monkey patching in the worker. If we do it in
# the master process (gunicorn_config.py), it breaks tons of things
# including shutdown
monkey_patch()
st2api_config.register_opts()
# This should be called in gunicorn case because we only want
# workers to connect to db, rabbbitmq etc. In standalone HTTP
# server case, this setup would have already occurred.
common_setup(
service="api",
config=st2api_config,
setup_db=True,
register_mq_exchanges=True,
register_signal_handlers=True,
register_internal_trigger_types=True,
run_migrations=True,
config_args=config.get("config_args", None),
)
# Additional pre-run time checks
validate_rbac_is_correctly_configured()
router = Router(
debug=cfg.CONF.api.debug, auth=cfg.CONF.auth.enable, is_gunicorn=is_gunicorn
)
spec = spec_loader.load_spec("st2common", "openapi.yaml.j2")
transforms = {
"^/api/v1/": ["/", "/v1/", "/v1"],
"^/api/v1/executions": ["/actionexecutions", "/v1/actionexecutions"],
"^/api/exp/": ["/exp/"],
}
router.add_spec(spec, transforms=transforms)
app = router.as_wsgi
# Order is important. Check middleware for detailed explanation.
app = StreamingMiddleware(app, path_whitelist=["/v1/executions/*/output*"])
app = ErrorHandlingMiddleware(app)
app = CorsMiddleware(app)
app = LoggingMiddleware(app, router)
app = RequestIDMiddleware(app)
return app
|
def setup_app(config={}):
LOG.info("Creating st2api: %s as OpenAPI app.", VERSION_STRING)
is_gunicorn = config.get("is_gunicorn", False)
if is_gunicorn:
# Note: We need to perform monkey patching in the worker. If we do it in
# the master process (gunicorn_config.py), it breaks tons of things
# including shutdown
monkey_patch()
st2api_config.register_opts()
# This should be called in gunicorn case because we only want
# workers to connect to db, rabbbitmq etc. In standalone HTTP
# server case, this setup would have already occurred.
common_setup(
service="api",
config=st2api_config,
setup_db=True,
register_mq_exchanges=True,
register_signal_handlers=True,
register_internal_trigger_types=True,
run_migrations=True,
config_args=config.get("config_args", None),
)
# Additional pre-run time checks
validate_rbac_is_correctly_configured()
router = Router(debug=cfg.CONF.api.debug, auth=cfg.CONF.auth.enable)
spec = spec_loader.load_spec("st2common", "openapi.yaml.j2")
transforms = {
"^/api/v1/": ["/", "/v1/", "/v1"],
"^/api/v1/executions": ["/actionexecutions", "/v1/actionexecutions"],
"^/api/exp/": ["/exp/"],
}
router.add_spec(spec, transforms=transforms)
app = router.as_wsgi
# Order is important. Check middleware for detailed explanation.
app = StreamingMiddleware(app, path_whitelist=["/v1/executions/*/output*"])
app = ErrorHandlingMiddleware(app)
app = CorsMiddleware(app)
app = LoggingMiddleware(app, router)
app = RequestIDMiddleware(app)
return app
|
https://github.com/StackStorm/st2/issues/4254
|
2018-07-19 18:31:46,668 INFO [-] f590ed84-3e54-4634-89f2-120eb3f956e5 - POST /tokens with query={} (remote_addr='127.0.0.1',method='POST',request_id='f590ed84-3e54-4634-89f2-120eb3f956e5',query={},path='/tokens')
2018-07-19 18:31:46,669 DEBUG [-] Recieved call with WebOb: POST /tokens HTTP/1.0
Accept: */*
Authorization: Basic c3QyYWRtaW46Q2hAbmdlTWU=
Content-Type: text/plain
Host: 127.0.0.1:9100
User-Agent: curl/7.47.0
X-Request-Id: f590ed84-3e54-4634-89f2-120eb3f956e5
2018-07-19 18:31:46,669 DEBUG [-] Match path: /tokens
2018-07-19 18:31:46,670 DEBUG [-] Parsed endpoint: {'x-parameters': [{'in': 'environ', 'type': 'string', 'description': 'source of the request', 'name': 'remote_addr'}, {'in': 'environ', 'type': 'string', 'description': 'set externally to indicate user identity in case of proxy auth', 'name': 'remote_user'}], 'responses': {'201': {'headers': {'x-api-url': {'type': 'string'}}, 'schema': {'$ref': '#/definitions/Token'}, 'examples': {'application/json': {'user': 'st2admin', 'token': '5e86421776f946e98faea36c29e5a7c7', 'expiry': '2016-05-28T12:39:28.650231Z', 'id': '574840001878c10d0b6e8fbf', 'metadata': {}}}, 'description': 'New token has been created'}, 'default': {'description': 'Unexpected error', 'schema': {'$ref': '#/definitions/Error'}}, '401': {'schema': {'$ref': '#/definitions/Error'}, 'examples': {'application/json': {'faultstring': 'Invalid or missing credentials'}}, 'description': 'Invalid or missing credentials has been provided'}}, 'parameters': [{'in': 'header', 'type': 'string', 'description': 'base64 encoded string containing login and password', 'name': 'authorization'}, {'in': 'header', 'type': 'string', 'description': 'set externally to indicate real source of the request', 'name': 'x-forwarded-for'}, {'in': 'body', 'description': 'Lifespan of the token', 'name': 'request', 'schema': {'$ref': '#/definitions/TokenRequest'}}], 'x-log-result': False, 'security': [], 'description': 'Authenticates a user with `Authorization` header and returns a `token`\nobject.\n', 'operationId': 'st2auth.controllers.v1.auth:token_controller.post'}
2018-07-19 18:31:46,670 DEBUG [-] Parsed path_vars: {}
2018-07-19 18:31:46,700 DEBUG [-] API call failed: Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/router.py", line 372, in __call__
CustomValidator(schema, resolver=self.spec_resolver).validate(data)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/jsonschema/validators.py", line 130, in validate
raise error
ValidationError: '{}' is not of type 'object'
Failed validating 'type' in schema:
{'properties': {'ttl': {'minimum': 1, 'type': ['integer', 'null']}},
'type': 'object'}
On instance:
'{}'
(exception_message="'{}' is not of type 'object'",exception_data={'comment': 'Traceback (most recent call last):\n File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/router.py", line 372, in __call__\n CustomValidator(schema, resolver=self.spec_resolver).validate(data)\n File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/jsonschema/validators.py", line 130, in validate\n raise error\nValidationError: \'{}\' is not of type \'object\'\n\nFailed validating \'type\' in schema:\n {\'properties\': {\'ttl\': {\'minimum\': 1, \'type\': [\'integer\', \'null\']}},\n \'type\': \'object\'}\n\nOn instance:\n \'{}\'\n', '_headers': None, 'conditional_response': False, 'detail': "'{}' is not of type 'object'", '_app_iter': [''], '_status': '400 Bad Request', '_headerlist': [('Content-Type', 'text/html; charset=UTF-8'), ('Content-Length', '0')]},exception_class='HTTPBadRequest')
2018-07-19 18:31:46,701 DEBUG [-] Match path: /tokens
2018-07-19 18:31:46,701 INFO [-] f590ed84-3e54-4634-89f2-120eb3f956e5 - 400 53 33.332ms (content_length=53,request_id='f590ed84-3e54-4634-89f2-120eb3f956e5',runtime=33.332,remote_addr='127.0.0.1',status=400,method='POST',path='/tokens')
|
ValidationError
|
def setup_app(config={}):
LOG.info("Creating st2auth: %s as OpenAPI app.", VERSION_STRING)
is_gunicorn = config.get("is_gunicorn", False)
if is_gunicorn:
# Note: We need to perform monkey patching in the worker. If we do it in
# the master process (gunicorn_config.py), it breaks tons of things
# including shutdown
monkey_patch()
# This should be called in gunicorn case because we only want
# workers to connect to db, rabbbitmq etc. In standalone HTTP
# server case, this setup would have already occurred.
st2auth_config.register_opts()
common_setup(
service="auth",
config=st2auth_config,
setup_db=True,
register_mq_exchanges=False,
register_signal_handlers=True,
register_internal_trigger_types=False,
run_migrations=False,
config_args=config.get("config_args", None),
)
# Additional pre-run time checks
validate_auth_backend_is_correctly_configured()
router = Router(debug=cfg.CONF.auth.debug, is_gunicorn=is_gunicorn)
spec = spec_loader.load_spec("st2common", "openapi.yaml.j2")
transforms = {"^/auth/v1/": ["/", "/v1/"]}
router.add_spec(spec, transforms=transforms)
app = router.as_wsgi
# Order is important. Check middleware for detailed explanation.
app = ErrorHandlingMiddleware(app)
app = CorsMiddleware(app)
app = LoggingMiddleware(app, router)
app = RequestIDMiddleware(app)
return app
|
def setup_app(config={}):
LOG.info("Creating st2auth: %s as OpenAPI app.", VERSION_STRING)
is_gunicorn = config.get("is_gunicorn", False)
if is_gunicorn:
# Note: We need to perform monkey patching in the worker. If we do it in
# the master process (gunicorn_config.py), it breaks tons of things
# including shutdown
monkey_patch()
# This should be called in gunicorn case because we only want
# workers to connect to db, rabbbitmq etc. In standalone HTTP
# server case, this setup would have already occurred.
st2auth_config.register_opts()
common_setup(
service="auth",
config=st2auth_config,
setup_db=True,
register_mq_exchanges=False,
register_signal_handlers=True,
register_internal_trigger_types=False,
run_migrations=False,
config_args=config.get("config_args", None),
)
# Additional pre-run time checks
validate_auth_backend_is_correctly_configured()
router = Router(debug=cfg.CONF.auth.debug)
spec = spec_loader.load_spec("st2common", "openapi.yaml.j2")
transforms = {"^/auth/v1/": ["/", "/v1/"]}
router.add_spec(spec, transforms=transforms)
app = router.as_wsgi
# Order is important. Check middleware for detailed explanation.
app = ErrorHandlingMiddleware(app)
app = CorsMiddleware(app)
app = LoggingMiddleware(app, router)
app = RequestIDMiddleware(app)
return app
|
https://github.com/StackStorm/st2/issues/4254
|
2018-07-19 18:31:46,668 INFO [-] f590ed84-3e54-4634-89f2-120eb3f956e5 - POST /tokens with query={} (remote_addr='127.0.0.1',method='POST',request_id='f590ed84-3e54-4634-89f2-120eb3f956e5',query={},path='/tokens')
2018-07-19 18:31:46,669 DEBUG [-] Recieved call with WebOb: POST /tokens HTTP/1.0
Accept: */*
Authorization: Basic c3QyYWRtaW46Q2hAbmdlTWU=
Content-Type: text/plain
Host: 127.0.0.1:9100
User-Agent: curl/7.47.0
X-Request-Id: f590ed84-3e54-4634-89f2-120eb3f956e5
2018-07-19 18:31:46,669 DEBUG [-] Match path: /tokens
2018-07-19 18:31:46,670 DEBUG [-] Parsed endpoint: {'x-parameters': [{'in': 'environ', 'type': 'string', 'description': 'source of the request', 'name': 'remote_addr'}, {'in': 'environ', 'type': 'string', 'description': 'set externally to indicate user identity in case of proxy auth', 'name': 'remote_user'}], 'responses': {'201': {'headers': {'x-api-url': {'type': 'string'}}, 'schema': {'$ref': '#/definitions/Token'}, 'examples': {'application/json': {'user': 'st2admin', 'token': '5e86421776f946e98faea36c29e5a7c7', 'expiry': '2016-05-28T12:39:28.650231Z', 'id': '574840001878c10d0b6e8fbf', 'metadata': {}}}, 'description': 'New token has been created'}, 'default': {'description': 'Unexpected error', 'schema': {'$ref': '#/definitions/Error'}}, '401': {'schema': {'$ref': '#/definitions/Error'}, 'examples': {'application/json': {'faultstring': 'Invalid or missing credentials'}}, 'description': 'Invalid or missing credentials has been provided'}}, 'parameters': [{'in': 'header', 'type': 'string', 'description': 'base64 encoded string containing login and password', 'name': 'authorization'}, {'in': 'header', 'type': 'string', 'description': 'set externally to indicate real source of the request', 'name': 'x-forwarded-for'}, {'in': 'body', 'description': 'Lifespan of the token', 'name': 'request', 'schema': {'$ref': '#/definitions/TokenRequest'}}], 'x-log-result': False, 'security': [], 'description': 'Authenticates a user with `Authorization` header and returns a `token`\nobject.\n', 'operationId': 'st2auth.controllers.v1.auth:token_controller.post'}
2018-07-19 18:31:46,670 DEBUG [-] Parsed path_vars: {}
2018-07-19 18:31:46,700 DEBUG [-] API call failed: Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/router.py", line 372, in __call__
CustomValidator(schema, resolver=self.spec_resolver).validate(data)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/jsonschema/validators.py", line 130, in validate
raise error
ValidationError: '{}' is not of type 'object'
Failed validating 'type' in schema:
{'properties': {'ttl': {'minimum': 1, 'type': ['integer', 'null']}},
'type': 'object'}
On instance:
'{}'
(exception_message="'{}' is not of type 'object'",exception_data={'comment': 'Traceback (most recent call last):\n File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/router.py", line 372, in __call__\n CustomValidator(schema, resolver=self.spec_resolver).validate(data)\n File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/jsonschema/validators.py", line 130, in validate\n raise error\nValidationError: \'{}\' is not of type \'object\'\n\nFailed validating \'type\' in schema:\n {\'properties\': {\'ttl\': {\'minimum\': 1, \'type\': [\'integer\', \'null\']}},\n \'type\': \'object\'}\n\nOn instance:\n \'{}\'\n', '_headers': None, 'conditional_response': False, 'detail': "'{}' is not of type 'object'", '_app_iter': [''], '_status': '400 Bad Request', '_headerlist': [('Content-Type', 'text/html; charset=UTF-8'), ('Content-Length', '0')]},exception_class='HTTPBadRequest')
2018-07-19 18:31:46,701 DEBUG [-] Match path: /tokens
2018-07-19 18:31:46,701 INFO [-] f590ed84-3e54-4634-89f2-120eb3f956e5 - 400 53 33.332ms (content_length=53,request_id='f590ed84-3e54-4634-89f2-120eb3f956e5',runtime=33.332,remote_addr='127.0.0.1',status=400,method='POST',path='/tokens')
|
ValidationError
|
def __call__(self, req):
"""
The method is invoked on every request and shows the lifecycle of the request received from
the middleware.
Although some middleware may use parts of the API spec, it is safe to assume that if you're
looking for the particular spec property handler, it's most likely a part of this method.
At the time of writing, the only property being utilized by middleware was `x-log-result`.
"""
LOG.debug("Received call with WebOb: %s", req)
endpoint, path_vars = self.match(req)
LOG.debug("Parsed endpoint: %s", endpoint)
LOG.debug("Parsed path_vars: %s", path_vars)
context = copy.copy(getattr(self, "mock_context", {}))
cookie_token = None
# Handle security
if "security" in endpoint:
security = endpoint.get("security")
else:
security = self.spec.get("security", [])
if self.auth and security:
try:
security_definitions = self.spec.get("securityDefinitions", {})
for statement in security:
declaration, options = statement.copy().popitem()
definition = security_definitions[declaration]
if definition["type"] == "apiKey":
if definition["in"] == "header":
token = req.headers.get(definition["name"])
elif definition["in"] == "query":
token = req.GET.get(definition["name"])
elif definition["in"] == "cookie":
token = req.cookies.get(definition["name"])
else:
token = None
if token:
auth_func = op_resolver(definition["x-operationId"])
auth_resp = auth_func(token)
# Include information on how user authenticated inside the context
if "auth-token" in definition["name"].lower():
auth_method = "authentication token"
elif "api-key" in definition["name"].lower():
auth_method = "API key"
context["user"] = User.get_by_name(auth_resp.user)
context["auth_info"] = {
"method": auth_method,
"location": definition["in"],
}
# Also include token expiration time when authenticated via auth token
if "auth-token" in definition["name"].lower():
context["auth_info"]["token_expire"] = auth_resp.expiry
if "x-set-cookie" in definition:
max_age = (
auth_resp.expiry - date_utils.get_datetime_utc_now()
)
cookie_token = cookies.make_cookie(
definition["x-set-cookie"],
token,
max_age=max_age,
httponly=True,
)
break
if "user" not in context:
raise auth_exc.NoAuthSourceProvidedError(
"One of Token or API key required."
)
except (
auth_exc.NoAuthSourceProvidedError,
auth_exc.MultipleAuthSourcesError,
) as e:
LOG.error(str(e))
return abort_unauthorized(str(e))
except auth_exc.TokenNotProvidedError as e:
LOG.exception("Token is not provided.")
return abort_unauthorized(str(e))
except auth_exc.TokenNotFoundError as e:
LOG.exception("Token is not found.")
return abort_unauthorized(str(e))
except auth_exc.TokenExpiredError as e:
LOG.exception("Token has expired.")
return abort_unauthorized(str(e))
except auth_exc.ApiKeyNotProvidedError as e:
LOG.exception("API key is not provided.")
return abort_unauthorized(str(e))
except auth_exc.ApiKeyNotFoundError as e:
LOG.exception("API key is not found.")
return abort_unauthorized(str(e))
except auth_exc.ApiKeyDisabledError as e:
LOG.exception("API key is disabled.")
return abort_unauthorized(str(e))
if cfg.CONF.rbac.enable:
user_db = context["user"]
permission_type = endpoint.get("x-permissions", None)
if permission_type:
resolver = resolvers.get_resolver_for_permission_type(permission_type)
has_permission = resolver.user_has_permission(user_db, permission_type)
if not has_permission:
raise rbac_exc.ResourceTypeAccessDeniedError(
user_db, permission_type
)
# Collect parameters
kw = {}
for param in endpoint.get("parameters", []) + endpoint.get("x-parameters", []):
name = param["name"]
argument_name = param.get("x-as", None) or name
source = param["in"]
default = param.get("default", None)
# Collecting params from different sources
if source == "query":
kw[argument_name] = req.GET.get(name, default)
elif source == "path":
kw[argument_name] = path_vars[name]
elif source == "header":
kw[argument_name] = req.headers.get(name, default)
elif source == "formData":
kw[argument_name] = req.POST.get(name, default)
elif source == "environ":
kw[argument_name] = req.environ.get(name.upper(), default)
elif source == "context":
kw[argument_name] = context.get(name, default)
elif source == "request":
kw[argument_name] = getattr(req, name)
elif source == "body":
content_type = req.headers.get("Content-Type", "application/json")
content_type = parse_content_type_header(content_type=content_type)[0]
schema = param["schema"]
# NOTE: HACK: Workaround for eventlet wsgi server which sets Content-Type to
# text/plain if Content-Type is not provided in the request.
# All ouf our API endpoints except /exp/validation/mistral expect application/json
# so we explicitly set it to that if not provided (set to text/plain by the base
# http server) and if it's not /exp/validation/mistral API endpoint
if not self.is_gunicorn and content_type == "text/plain":
operation_id = endpoint["operationId"]
if "mistral_validation_controller" not in operation_id:
content_type = "application/json"
# Note: We also want to perform validation if no body is explicitly provided - in a
# lot of POST, PUT scenarios, body is mandatory
if not req.body and content_type == "application/json":
req.body = b"{}"
try:
if content_type == "application/json":
data = req.json
elif content_type == "text/plain":
data = req.body
elif content_type in [
"application/x-www-form-urlencoded",
"multipart/form-data",
]:
data = urlparse.parse_qs(req.body)
else:
raise ValueError('Unsupported Content-Type: "%s"' % (content_type))
except Exception as e:
detail = "Failed to parse request body: %s" % str(e)
raise exc.HTTPBadRequest(detail=detail)
# Special case for Python 3
if (
six.PY3
and content_type == "text/plain"
and isinstance(data, six.binary_type)
):
# Convert bytes to text type (string / unicode)
data = data.decode("utf-8")
try:
CustomValidator(schema, resolver=self.spec_resolver).validate(data)
except (jsonschema.ValidationError, ValueError) as e:
raise exc.HTTPBadRequest(
detail=e.message, comment=traceback.format_exc()
)
if content_type == "text/plain":
kw[argument_name] = data
else:
class Body(object):
def __init__(self, **entries):
self.__dict__.update(entries)
ref = schema.get("$ref", None)
if ref:
with self.spec_resolver.resolving(ref) as resolved:
schema = resolved
if "x-api-model" in schema:
input_type = schema.get("type", [])
Model = op_resolver(schema["x-api-model"])
if input_type and not isinstance(input_type, (list, tuple)):
input_type = [input_type]
# root attribute is not an object, we need to use wrapper attribute to
# make it work with **kwarg expansion
if input_type and "array" in input_type:
data = {"data": data}
instance = self._get_model_instance(model_cls=Model, data=data)
# Call validate on the API model - note we should eventually move all
# those model schema definitions into openapi.yaml
try:
instance = instance.validate()
except (jsonschema.ValidationError, ValueError) as e:
raise exc.HTTPBadRequest(
detail=e.message, comment=traceback.format_exc()
)
else:
LOG.debug(
"Missing x-api-model definition for %s, using generic Body "
"model." % (endpoint["operationId"])
)
model = Body
instance = self._get_model_instance(model_cls=model, data=data)
kw[argument_name] = instance
# Making sure all required params are present
required = param.get("required", False)
if required and kw[argument_name] is None:
detail = 'Required parameter "%s" is missing' % name
raise exc.HTTPBadRequest(detail=detail)
# Validating and casting param types
param_type = param.get("type", None)
if kw[argument_name] is not None:
if param_type == "boolean":
positive = ("true", "1", "yes", "y")
negative = ("false", "0", "no", "n")
if str(kw[argument_name]).lower() not in positive + negative:
detail = 'Parameter "%s" is not of type boolean' % argument_name
raise exc.HTTPBadRequest(detail=detail)
kw[argument_name] = str(kw[argument_name]).lower() in positive
elif param_type == "integer":
regex = r"^-?[0-9]+$"
if not re.search(regex, str(kw[argument_name])):
detail = 'Parameter "%s" is not of type integer' % argument_name
raise exc.HTTPBadRequest(detail=detail)
kw[argument_name] = int(kw[argument_name])
elif param_type == "number":
regex = r"^[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?$"
if not re.search(regex, str(kw[argument_name])):
detail = 'Parameter "%s" is not of type float' % argument_name
raise exc.HTTPBadRequest(detail=detail)
kw[argument_name] = float(kw[argument_name])
elif (
param_type == "array"
and param.get("items", {}).get("type", None) == "string"
):
if kw[argument_name] is None:
kw[argument_name] = []
elif isinstance(kw[argument_name], (list, tuple)):
# argument is already an array
pass
else:
kw[argument_name] = kw[argument_name].split(",")
# Call the controller
try:
func = op_resolver(endpoint["operationId"])
except Exception as e:
LOG.exception(
'Failed to load controller for operation "%s": %s'
% (endpoint["operationId"], str(e))
)
raise e
try:
resp = func(**kw)
except Exception as e:
LOG.exception(
'Failed to call controller function "%s" for operation "%s": %s'
% (func.__name__, endpoint["operationId"], str(e))
)
raise e
# Handle response
if resp is None:
resp = Response()
if not hasattr(resp, "__call__"):
resp = Response(json=resp)
responses = endpoint.get("responses", {})
response_spec = responses.get(str(resp.status_code), None)
default_response_spec = responses.get("default", None)
if not response_spec and default_response_spec:
LOG.debug(
'No custom response spec found for endpoint "%s", using a default one'
% (endpoint["operationId"])
)
response_spec_name = "default"
else:
response_spec_name = str(resp.status_code)
response_spec = response_spec or default_response_spec
if response_spec and "schema" in response_spec:
LOG.debug(
'Using response spec "%s" for endpoint %s and status code %s'
% (response_spec_name, endpoint["operationId"], resp.status_code)
)
try:
validator = CustomValidator(
response_spec["schema"], resolver=self.spec_resolver
)
validator.validate(resp.json)
except (jsonschema.ValidationError, ValueError):
LOG.exception("Response validation failed.")
resp.headers.add("Warning", '199 OpenAPI "Response validation failed"')
else:
LOG.debug(
'No response spec found for endpoint "%s"' % (endpoint["operationId"])
)
if cookie_token:
resp.headerlist.append(("Set-Cookie", cookie_token))
return resp
|
def __call__(self, req):
"""
The method is invoked on every request and shows the lifecycle of the request received from
the middleware.
Although some middleware may use parts of the API spec, it is safe to assume that if you're
looking for the particular spec property handler, it's most likely a part of this method.
At the time of writing, the only property being utilized by middleware was `x-log-result`.
"""
LOG.debug("Received call with WebOb: %s", req)
endpoint, path_vars = self.match(req)
LOG.debug("Parsed endpoint: %s", endpoint)
LOG.debug("Parsed path_vars: %s", path_vars)
context = copy.copy(getattr(self, "mock_context", {}))
cookie_token = None
# Handle security
if "security" in endpoint:
security = endpoint.get("security")
else:
security = self.spec.get("security", [])
if self.auth and security:
try:
security_definitions = self.spec.get("securityDefinitions", {})
for statement in security:
declaration, options = statement.copy().popitem()
definition = security_definitions[declaration]
if definition["type"] == "apiKey":
if definition["in"] == "header":
token = req.headers.get(definition["name"])
elif definition["in"] == "query":
token = req.GET.get(definition["name"])
elif definition["in"] == "cookie":
token = req.cookies.get(definition["name"])
else:
token = None
if token:
auth_func = op_resolver(definition["x-operationId"])
auth_resp = auth_func(token)
# Include information on how user authenticated inside the context
if "auth-token" in definition["name"].lower():
auth_method = "authentication token"
elif "api-key" in definition["name"].lower():
auth_method = "API key"
context["user"] = User.get_by_name(auth_resp.user)
context["auth_info"] = {
"method": auth_method,
"location": definition["in"],
}
# Also include token expiration time when authenticated via auth token
if "auth-token" in definition["name"].lower():
context["auth_info"]["token_expire"] = auth_resp.expiry
if "x-set-cookie" in definition:
max_age = (
auth_resp.expiry - date_utils.get_datetime_utc_now()
)
cookie_token = cookies.make_cookie(
definition["x-set-cookie"],
token,
max_age=max_age,
httponly=True,
)
break
if "user" not in context:
raise auth_exc.NoAuthSourceProvidedError(
"One of Token or API key required."
)
except (
auth_exc.NoAuthSourceProvidedError,
auth_exc.MultipleAuthSourcesError,
) as e:
LOG.error(str(e))
return abort_unauthorized(str(e))
except auth_exc.TokenNotProvidedError as e:
LOG.exception("Token is not provided.")
return abort_unauthorized(str(e))
except auth_exc.TokenNotFoundError as e:
LOG.exception("Token is not found.")
return abort_unauthorized(str(e))
except auth_exc.TokenExpiredError as e:
LOG.exception("Token has expired.")
return abort_unauthorized(str(e))
except auth_exc.ApiKeyNotProvidedError as e:
LOG.exception("API key is not provided.")
return abort_unauthorized(str(e))
except auth_exc.ApiKeyNotFoundError as e:
LOG.exception("API key is not found.")
return abort_unauthorized(str(e))
except auth_exc.ApiKeyDisabledError as e:
LOG.exception("API key is disabled.")
return abort_unauthorized(str(e))
if cfg.CONF.rbac.enable:
user_db = context["user"]
permission_type = endpoint.get("x-permissions", None)
if permission_type:
resolver = resolvers.get_resolver_for_permission_type(permission_type)
has_permission = resolver.user_has_permission(user_db, permission_type)
if not has_permission:
raise rbac_exc.ResourceTypeAccessDeniedError(
user_db, permission_type
)
# Collect parameters
kw = {}
for param in endpoint.get("parameters", []) + endpoint.get("x-parameters", []):
name = param["name"]
argument_name = param.get("x-as", None) or name
source = param["in"]
default = param.get("default", None)
# Collecting params from different sources
if source == "query":
kw[argument_name] = req.GET.get(name, default)
elif source == "path":
kw[argument_name] = path_vars[name]
elif source == "header":
kw[argument_name] = req.headers.get(name, default)
elif source == "formData":
kw[argument_name] = req.POST.get(name, default)
elif source == "environ":
kw[argument_name] = req.environ.get(name.upper(), default)
elif source == "context":
kw[argument_name] = context.get(name, default)
elif source == "request":
kw[argument_name] = getattr(req, name)
elif source == "body":
content_type = req.headers.get("Content-Type", "application/json")
content_type = parse_content_type_header(content_type=content_type)[0]
schema = param["schema"]
# Note: We also want to perform validation if no body is explicitly provided - in a
# lot of POST, PUT scenarios, body is mandatory
if not req.body and content_type == "application/json":
req.body = b"{}"
try:
if content_type == "application/json":
data = req.json
elif content_type == "text/plain":
data = req.body
elif content_type in [
"application/x-www-form-urlencoded",
"multipart/form-data",
]:
data = urlparse.parse_qs(req.body)
else:
raise ValueError('Unsupported Content-Type: "%s"' % (content_type))
except Exception as e:
detail = "Failed to parse request body: %s" % str(e)
raise exc.HTTPBadRequest(detail=detail)
# Special case for Python 3
if (
six.PY3
and content_type == "text/plain"
and isinstance(data, six.binary_type)
):
# Convert bytes to text type (string / unicode)
data = data.decode("utf-8")
try:
CustomValidator(schema, resolver=self.spec_resolver).validate(data)
except (jsonschema.ValidationError, ValueError) as e:
raise exc.HTTPBadRequest(
detail=e.message, comment=traceback.format_exc()
)
if content_type == "text/plain":
kw[argument_name] = data
else:
class Body(object):
def __init__(self, **entries):
self.__dict__.update(entries)
ref = schema.get("$ref", None)
if ref:
with self.spec_resolver.resolving(ref) as resolved:
schema = resolved
if "x-api-model" in schema:
input_type = schema.get("type", [])
Model = op_resolver(schema["x-api-model"])
if input_type and not isinstance(input_type, (list, tuple)):
input_type = [input_type]
# root attribute is not an object, we need to use wrapper attribute to
# make it work with **kwarg expansion
if input_type and "array" in input_type:
data = {"data": data}
instance = self._get_model_instance(model_cls=Model, data=data)
# Call validate on the API model - note we should eventually move all
# those model schema definitions into openapi.yaml
try:
instance = instance.validate()
except (jsonschema.ValidationError, ValueError) as e:
raise exc.HTTPBadRequest(
detail=e.message, comment=traceback.format_exc()
)
else:
LOG.debug(
"Missing x-api-model definition for %s, using generic Body "
"model." % (endpoint["operationId"])
)
model = Body
instance = self._get_model_instance(model_cls=model, data=data)
kw[argument_name] = instance
# Making sure all required params are present
required = param.get("required", False)
if required and kw[argument_name] is None:
detail = 'Required parameter "%s" is missing' % name
raise exc.HTTPBadRequest(detail=detail)
# Validating and casting param types
param_type = param.get("type", None)
if kw[argument_name] is not None:
if param_type == "boolean":
positive = ("true", "1", "yes", "y")
negative = ("false", "0", "no", "n")
if str(kw[argument_name]).lower() not in positive + negative:
detail = 'Parameter "%s" is not of type boolean' % argument_name
raise exc.HTTPBadRequest(detail=detail)
kw[argument_name] = str(kw[argument_name]).lower() in positive
elif param_type == "integer":
regex = r"^-?[0-9]+$"
if not re.search(regex, str(kw[argument_name])):
detail = 'Parameter "%s" is not of type integer' % argument_name
raise exc.HTTPBadRequest(detail=detail)
kw[argument_name] = int(kw[argument_name])
elif param_type == "number":
regex = r"^[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?$"
if not re.search(regex, str(kw[argument_name])):
detail = 'Parameter "%s" is not of type float' % argument_name
raise exc.HTTPBadRequest(detail=detail)
kw[argument_name] = float(kw[argument_name])
elif (
param_type == "array"
and param.get("items", {}).get("type", None) == "string"
):
if kw[argument_name] is None:
kw[argument_name] = []
elif isinstance(kw[argument_name], (list, tuple)):
# argument is already an array
pass
else:
kw[argument_name] = kw[argument_name].split(",")
# Call the controller
try:
func = op_resolver(endpoint["operationId"])
except Exception as e:
LOG.exception(
'Failed to load controller for operation "%s": %s'
% (endpoint["operationId"], str(e))
)
raise e
try:
resp = func(**kw)
except Exception as e:
LOG.exception(
'Failed to call controller function "%s" for operation "%s": %s'
% (func.__name__, endpoint["operationId"], str(e))
)
raise e
# Handle response
if resp is None:
resp = Response()
if not hasattr(resp, "__call__"):
resp = Response(json=resp)
responses = endpoint.get("responses", {})
response_spec = responses.get(str(resp.status_code), None)
default_response_spec = responses.get("default", None)
if not response_spec and default_response_spec:
LOG.debug(
'No custom response spec found for endpoint "%s", using a default one'
% (endpoint["operationId"])
)
response_spec_name = "default"
else:
response_spec_name = str(resp.status_code)
response_spec = response_spec or default_response_spec
if response_spec and "schema" in response_spec:
LOG.debug(
'Using response spec "%s" for endpoint %s and status code %s'
% (response_spec_name, endpoint["operationId"], resp.status_code)
)
try:
validator = CustomValidator(
response_spec["schema"], resolver=self.spec_resolver
)
validator.validate(resp.json)
except (jsonschema.ValidationError, ValueError):
LOG.exception("Response validation failed.")
resp.headers.add("Warning", '199 OpenAPI "Response validation failed"')
else:
LOG.debug(
'No response spec found for endpoint "%s"' % (endpoint["operationId"])
)
if cookie_token:
resp.headerlist.append(("Set-Cookie", cookie_token))
return resp
|
https://github.com/StackStorm/st2/issues/4254
|
2018-07-19 18:31:46,668 INFO [-] f590ed84-3e54-4634-89f2-120eb3f956e5 - POST /tokens with query={} (remote_addr='127.0.0.1',method='POST',request_id='f590ed84-3e54-4634-89f2-120eb3f956e5',query={},path='/tokens')
2018-07-19 18:31:46,669 DEBUG [-] Recieved call with WebOb: POST /tokens HTTP/1.0
Accept: */*
Authorization: Basic c3QyYWRtaW46Q2hAbmdlTWU=
Content-Type: text/plain
Host: 127.0.0.1:9100
User-Agent: curl/7.47.0
X-Request-Id: f590ed84-3e54-4634-89f2-120eb3f956e5
2018-07-19 18:31:46,669 DEBUG [-] Match path: /tokens
2018-07-19 18:31:46,670 DEBUG [-] Parsed endpoint: {'x-parameters': [{'in': 'environ', 'type': 'string', 'description': 'source of the request', 'name': 'remote_addr'}, {'in': 'environ', 'type': 'string', 'description': 'set externally to indicate user identity in case of proxy auth', 'name': 'remote_user'}], 'responses': {'201': {'headers': {'x-api-url': {'type': 'string'}}, 'schema': {'$ref': '#/definitions/Token'}, 'examples': {'application/json': {'user': 'st2admin', 'token': '5e86421776f946e98faea36c29e5a7c7', 'expiry': '2016-05-28T12:39:28.650231Z', 'id': '574840001878c10d0b6e8fbf', 'metadata': {}}}, 'description': 'New token has been created'}, 'default': {'description': 'Unexpected error', 'schema': {'$ref': '#/definitions/Error'}}, '401': {'schema': {'$ref': '#/definitions/Error'}, 'examples': {'application/json': {'faultstring': 'Invalid or missing credentials'}}, 'description': 'Invalid or missing credentials has been provided'}}, 'parameters': [{'in': 'header', 'type': 'string', 'description': 'base64 encoded string containing login and password', 'name': 'authorization'}, {'in': 'header', 'type': 'string', 'description': 'set externally to indicate real source of the request', 'name': 'x-forwarded-for'}, {'in': 'body', 'description': 'Lifespan of the token', 'name': 'request', 'schema': {'$ref': '#/definitions/TokenRequest'}}], 'x-log-result': False, 'security': [], 'description': 'Authenticates a user with `Authorization` header and returns a `token`\nobject.\n', 'operationId': 'st2auth.controllers.v1.auth:token_controller.post'}
2018-07-19 18:31:46,670 DEBUG [-] Parsed path_vars: {}
2018-07-19 18:31:46,700 DEBUG [-] API call failed: Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/router.py", line 372, in __call__
CustomValidator(schema, resolver=self.spec_resolver).validate(data)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/jsonschema/validators.py", line 130, in validate
raise error
ValidationError: '{}' is not of type 'object'
Failed validating 'type' in schema:
{'properties': {'ttl': {'minimum': 1, 'type': ['integer', 'null']}},
'type': 'object'}
On instance:
'{}'
(exception_message="'{}' is not of type 'object'",exception_data={'comment': 'Traceback (most recent call last):\n File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/router.py", line 372, in __call__\n CustomValidator(schema, resolver=self.spec_resolver).validate(data)\n File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/jsonschema/validators.py", line 130, in validate\n raise error\nValidationError: \'{}\' is not of type \'object\'\n\nFailed validating \'type\' in schema:\n {\'properties\': {\'ttl\': {\'minimum\': 1, \'type\': [\'integer\', \'null\']}},\n \'type\': \'object\'}\n\nOn instance:\n \'{}\'\n', '_headers': None, 'conditional_response': False, 'detail': "'{}' is not of type 'object'", '_app_iter': [''], '_status': '400 Bad Request', '_headerlist': [('Content-Type', 'text/html; charset=UTF-8'), ('Content-Length', '0')]},exception_class='HTTPBadRequest')
2018-07-19 18:31:46,701 DEBUG [-] Match path: /tokens
2018-07-19 18:31:46,701 INFO [-] f590ed84-3e54-4634-89f2-120eb3f956e5 - 400 53 33.332ms (content_length=53,request_id='f590ed84-3e54-4634-89f2-120eb3f956e5',runtime=33.332,remote_addr='127.0.0.1',status=400,method='POST',path='/tokens')
|
ValidationError
|
def setup_app(config={}):
LOG.info("Creating st2stream: %s as OpenAPI app.", VERSION_STRING)
is_gunicorn = config.get("is_gunicorn", False)
if is_gunicorn:
# Note: We need to perform monkey patching in the worker. If we do it in
# the master process (gunicorn_config.py), it breaks tons of things
# including shutdown
monkey_patch()
st2stream_config.register_opts()
# This should be called in gunicorn case because we only want
# workers to connect to db, rabbbitmq etc. In standalone HTTP
# server case, this setup would have already occurred.
common_setup(
service="stream",
config=st2stream_config,
setup_db=True,
register_mq_exchanges=True,
register_signal_handlers=True,
register_internal_trigger_types=False,
run_migrations=False,
config_args=config.get("config_args", None),
)
router = Router(
debug=cfg.CONF.stream.debug, auth=cfg.CONF.auth.enable, is_gunicorn=is_gunicorn
)
spec = spec_loader.load_spec("st2common", "openapi.yaml.j2")
transforms = {"^/stream/v1/": ["/", "/v1/"]}
router.add_spec(spec, transforms=transforms)
app = router.as_wsgi
# Order is important. Check middleware for detailed explanation.
app = StreamingMiddleware(app)
app = ErrorHandlingMiddleware(app)
app = CorsMiddleware(app)
app = LoggingMiddleware(app, router)
app = RequestIDMiddleware(app)
return app
|
def setup_app(config={}):
LOG.info("Creating st2stream: %s as OpenAPI app.", VERSION_STRING)
is_gunicorn = config.get("is_gunicorn", False)
if is_gunicorn:
# Note: We need to perform monkey patching in the worker. If we do it in
# the master process (gunicorn_config.py), it breaks tons of things
# including shutdown
monkey_patch()
st2stream_config.register_opts()
# This should be called in gunicorn case because we only want
# workers to connect to db, rabbbitmq etc. In standalone HTTP
# server case, this setup would have already occurred.
common_setup(
service="stream",
config=st2stream_config,
setup_db=True,
register_mq_exchanges=True,
register_signal_handlers=True,
register_internal_trigger_types=False,
run_migrations=False,
config_args=config.get("config_args", None),
)
router = Router(debug=cfg.CONF.stream.debug, auth=cfg.CONF.auth.enable)
spec = spec_loader.load_spec("st2common", "openapi.yaml.j2")
transforms = {"^/stream/v1/": ["/", "/v1/"]}
router.add_spec(spec, transforms=transforms)
app = router.as_wsgi
# Order is important. Check middleware for detailed explanation.
app = StreamingMiddleware(app)
app = ErrorHandlingMiddleware(app)
app = CorsMiddleware(app)
app = LoggingMiddleware(app, router)
app = RequestIDMiddleware(app)
return app
|
https://github.com/StackStorm/st2/issues/4254
|
2018-07-19 18:31:46,668 INFO [-] f590ed84-3e54-4634-89f2-120eb3f956e5 - POST /tokens with query={} (remote_addr='127.0.0.1',method='POST',request_id='f590ed84-3e54-4634-89f2-120eb3f956e5',query={},path='/tokens')
2018-07-19 18:31:46,669 DEBUG [-] Recieved call with WebOb: POST /tokens HTTP/1.0
Accept: */*
Authorization: Basic c3QyYWRtaW46Q2hAbmdlTWU=
Content-Type: text/plain
Host: 127.0.0.1:9100
User-Agent: curl/7.47.0
X-Request-Id: f590ed84-3e54-4634-89f2-120eb3f956e5
2018-07-19 18:31:46,669 DEBUG [-] Match path: /tokens
2018-07-19 18:31:46,670 DEBUG [-] Parsed endpoint: {'x-parameters': [{'in': 'environ', 'type': 'string', 'description': 'source of the request', 'name': 'remote_addr'}, {'in': 'environ', 'type': 'string', 'description': 'set externally to indicate user identity in case of proxy auth', 'name': 'remote_user'}], 'responses': {'201': {'headers': {'x-api-url': {'type': 'string'}}, 'schema': {'$ref': '#/definitions/Token'}, 'examples': {'application/json': {'user': 'st2admin', 'token': '5e86421776f946e98faea36c29e5a7c7', 'expiry': '2016-05-28T12:39:28.650231Z', 'id': '574840001878c10d0b6e8fbf', 'metadata': {}}}, 'description': 'New token has been created'}, 'default': {'description': 'Unexpected error', 'schema': {'$ref': '#/definitions/Error'}}, '401': {'schema': {'$ref': '#/definitions/Error'}, 'examples': {'application/json': {'faultstring': 'Invalid or missing credentials'}}, 'description': 'Invalid or missing credentials has been provided'}}, 'parameters': [{'in': 'header', 'type': 'string', 'description': 'base64 encoded string containing login and password', 'name': 'authorization'}, {'in': 'header', 'type': 'string', 'description': 'set externally to indicate real source of the request', 'name': 'x-forwarded-for'}, {'in': 'body', 'description': 'Lifespan of the token', 'name': 'request', 'schema': {'$ref': '#/definitions/TokenRequest'}}], 'x-log-result': False, 'security': [], 'description': 'Authenticates a user with `Authorization` header and returns a `token`\nobject.\n', 'operationId': 'st2auth.controllers.v1.auth:token_controller.post'}
2018-07-19 18:31:46,670 DEBUG [-] Parsed path_vars: {}
2018-07-19 18:31:46,700 DEBUG [-] API call failed: Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/router.py", line 372, in __call__
CustomValidator(schema, resolver=self.spec_resolver).validate(data)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/jsonschema/validators.py", line 130, in validate
raise error
ValidationError: '{}' is not of type 'object'
Failed validating 'type' in schema:
{'properties': {'ttl': {'minimum': 1, 'type': ['integer', 'null']}},
'type': 'object'}
On instance:
'{}'
(exception_message="'{}' is not of type 'object'",exception_data={'comment': 'Traceback (most recent call last):\n File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/router.py", line 372, in __call__\n CustomValidator(schema, resolver=self.spec_resolver).validate(data)\n File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/jsonschema/validators.py", line 130, in validate\n raise error\nValidationError: \'{}\' is not of type \'object\'\n\nFailed validating \'type\' in schema:\n {\'properties\': {\'ttl\': {\'minimum\': 1, \'type\': [\'integer\', \'null\']}},\n \'type\': \'object\'}\n\nOn instance:\n \'{}\'\n', '_headers': None, 'conditional_response': False, 'detail': "'{}' is not of type 'object'", '_app_iter': [''], '_status': '400 Bad Request', '_headerlist': [('Content-Type', 'text/html; charset=UTF-8'), ('Content-Length', '0')]},exception_class='HTTPBadRequest')
2018-07-19 18:31:46,701 DEBUG [-] Match path: /tokens
2018-07-19 18:31:46,701 INFO [-] f590ed84-3e54-4634-89f2-120eb3f956e5 - 400 53 33.332ms (content_length=53,request_id='f590ed84-3e54-4634-89f2-120eb3f956e5',runtime=33.332,remote_addr='127.0.0.1',status=400,method='POST',path='/tokens')
|
ValidationError
|
def validate_config_against_schema(
config_schema, config_object, config_path, pack_name=None
):
"""
Validate provided config dictionary against the provided config schema
dictionary.
"""
# NOTE: Lazy improt to avoid performance overhead of importing this module when it's not used
import jsonschema
pack_name = pack_name or "unknown"
schema = util_schema.get_schema_for_resource_parameters(
parameters_schema=config_schema, allow_additional_properties=True
)
instance = config_object
try:
cleaned = util_schema.validate(
instance=instance,
schema=schema,
cls=util_schema.CustomValidator,
use_default=True,
allow_default_none=True,
)
except jsonschema.ValidationError as e:
attribute = getattr(e, "path", [])
if isinstance(attribute, (tuple, list, collections.Iterable)):
attribute = [str(item) for item in attribute]
attribute = ".".join(attribute)
else:
attribute = str(attribute)
msg = 'Failed validating attribute "%s" in config for pack "%s" (%s): %s' % (
attribute,
pack_name,
config_path,
str(e),
)
raise jsonschema.ValidationError(msg)
return cleaned
|
def validate_config_against_schema(
config_schema, config_object, config_path, pack_name=None
):
"""
Validate provided config dictionary against the provided config schema
dictionary.
"""
# NOTE: Lazy improt to avoid performance overhead of importing this module when it's not used
import jsonschema
pack_name = pack_name or "unknown"
schema = util_schema.get_schema_for_resource_parameters(
parameters_schema=config_schema, allow_additional_properties=True
)
instance = config_object
try:
cleaned = util_schema.validate(
instance=instance,
schema=schema,
cls=util_schema.CustomValidator,
use_default=True,
allow_default_none=True,
)
except jsonschema.ValidationError as e:
attribute = getattr(e, "path", [])
attribute = ".".join(attribute)
msg = 'Failed validating attribute "%s" in config for pack "%s" (%s): %s' % (
attribute,
pack_name,
config_path,
str(e),
)
raise jsonschema.ValidationError(msg)
return cleaned
|
https://github.com/StackStorm/st2/issues/4166
|
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/router.py", line 472, in __call__
resp = func(**kw)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2api/controllers/v1/pack_configs.py", line 107, in put
config_api.validate(validate_against_schema=True)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/api/pack.py", line 274, in validate
cleaned_values = self._validate_config_values_against_schema()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/api/pack.py", line 296, in _validate_config_values_against_schema
pack_name=self.pack)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/util/pack.py", line 111, in validate_config_against_schema
attribute = '.'.join(attribute)
TypeError: sequence item 1: expected string or Unicode, int found
2018-06-06 14:24:35,673 139995931735056 ERROR error_handling [-] API call failed: sequence item 1: expected string or Unicode, int found
Traceback (most recent call last):
|
TypeError
|
def post(self, api_key_api, requester_user):
"""
Create a new entry.
"""
permission_type = PermissionType.API_KEY_CREATE
rbac_utils.assert_user_has_resource_api_permission(
user_db=requester_user,
resource_api=api_key_api,
permission_type=permission_type,
)
api_key_db = None
api_key = None
try:
if not getattr(api_key_api, "user", None):
if requester_user:
api_key_api.user = requester_user.name
else:
api_key_api.user = cfg.CONF.system_user.user
try:
User.get_by_name(api_key_api.user)
except StackStormDBObjectNotFoundError:
user_db = UserDB(name=api_key_api.user)
User.add_or_update(user_db)
extra = {"username": api_key_api.user, "user": user_db}
LOG.audit('Registered new user "%s".' % (api_key_api.user), extra=extra)
# If key_hash is provided use that and do not create a new key. The assumption
# is user already has the original api-key
if not getattr(api_key_api, "key_hash", None):
api_key, api_key_hash = auth_util.generate_api_key_and_hash()
# store key_hash in DB
api_key_api.key_hash = api_key_hash
api_key_db = ApiKey.add_or_update(ApiKeyAPI.to_model(api_key_api))
except (ValidationError, ValueError) as e:
LOG.exception("Validation failed for api_key data=%s.", api_key_api)
abort(http_client.BAD_REQUEST, str(e))
extra = {"api_key_db": api_key_db}
LOG.audit("ApiKey created. ApiKey.id=%s" % (api_key_db.id), extra=extra)
api_key_create_response_api = ApiKeyCreateResponseAPI.from_model(api_key_db)
# Return real api_key back to user. A one-way hash of the api_key is stored in the DB
# only the real value only returned at create time. Also, no masking of key here since
# the user needs to see this value atleast once.
api_key_create_response_api.key = api_key
return Response(json=api_key_create_response_api, status=http_client.CREATED)
|
def post(self, api_key_api, requester_user):
"""
Create a new entry.
"""
permission_type = PermissionType.API_KEY_CREATE
rbac_utils.assert_user_has_resource_api_permission(
user_db=requester_user,
resource_api=api_key_api,
permission_type=permission_type,
)
api_key_db = None
api_key = None
try:
if not getattr(api_key_api, "user", None):
api_key_api.user = requester_user.name or cfg.CONF.system_user.user
try:
User.get_by_name(api_key_api.user)
except StackStormDBObjectNotFoundError:
user_db = UserDB(name=api_key_api.user)
User.add_or_update(user_db)
extra = {"username": api_key_api.user, "user": user_db}
LOG.audit('Registered new user "%s".' % (api_key_api.user), extra=extra)
# If key_hash is provided use that and do not create a new key. The assumption
# is user already has the original api-key
if not getattr(api_key_api, "key_hash", None):
api_key, api_key_hash = auth_util.generate_api_key_and_hash()
# store key_hash in DB
api_key_api.key_hash = api_key_hash
api_key_db = ApiKey.add_or_update(ApiKeyAPI.to_model(api_key_api))
except (ValidationError, ValueError) as e:
LOG.exception("Validation failed for api_key data=%s.", api_key_api)
abort(http_client.BAD_REQUEST, str(e))
extra = {"api_key_db": api_key_db}
LOG.audit("ApiKey created. ApiKey.id=%s" % (api_key_db.id), extra=extra)
api_key_create_response_api = ApiKeyCreateResponseAPI.from_model(api_key_db)
# Return real api_key back to user. A one-way hash of the api_key is stored in the DB
# only the real value only returned at create time. Also, no masking of key here since
# the user needs to see this value atleast once.
api_key_create_response_api.key = api_key
return Response(json=api_key_create_response_api, status=http_client.CREATED)
|
https://github.com/StackStorm/st2/issues/3578
|
2017-07-18 05:36:03,099 140399285519696 ERROR router [-] Failed to call controller function "post" for operation "st2api.controllers.v1.auth:api_key_controller.post": 'NoneType' object has no attribute 'name'
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/router.py", line 414, in __call__
resp = func(**kw)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2api/controllers/v1/auth.py", line 140, in post
api_key_api.user = requester_user.name or cfg.CONF.system_user.user
AttributeError: 'NoneType' object has no attribute 'name'
2017-07-18 05:36:03,103 140399285519696 ERROR error_handling [-] API call failed: 'NoneType' object has no attribute 'name'
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/middleware/error_handling.py", line 46, in __call__
return self.app(environ, start_response)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/router.py", line 445, in as_wsgi
resp = self(req)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/router.py", line 418, in __call__
raise e
AttributeError: 'NoneType' object has no attribute 'name' (_exception_data={},_exception_class='AttributeError',_exception_message="'NoneType' object has no attribute 'name'")
2017-07-18 05:36:03,105 140399285519696 INFO logging [-] 07293032-81fe-41ea-8d4d-cdd4e0e222c6 - 500 46 7.573ms (content_length=46,request_id='07293032-81fe-41ea-8d4d-cdd4e0e222c6',runtime=7.573,remote_addr='127.0.0.1',status=500,method='POST',path='/v1/apikeys')
|
AttributeError
|
def _eval_repo_url(repo_url):
"""Allow passing short GitHub style URLs"""
if not repo_url:
raise Exception("No valid repo_url provided or could be inferred.")
if repo_url.startswith("file://"):
return repo_url
else:
if len(repo_url.split("/")) == 2 and "git@" not in repo_url:
url = "https://github.com/{}".format(repo_url)
else:
url = repo_url
return url
|
def _eval_repo_url(repo_url):
"""Allow passing short GitHub style URLs"""
if not repo_url:
raise Exception("No valid repo_url provided or could be inferred.")
if repo_url.startswith("file://"):
return repo_url
else:
if len(repo_url.split("/")) == 2 and "git@" not in repo_url:
url = "https://github.com/{}".format(repo_url)
else:
url = repo_url
return url if url.endswith(".git") else "{}.git".format(url)
|
https://github.com/StackStorm/st2/issues/3534
|
ubuntu@st2-local:~$ st2 pack install ssh://<user@host>/AutomationStackStorm
[ failed ] download pack
id: 595525a6a5fb1d0755645a1e
action.ref: packs.install
parameters:
packs:
- ssh://<user@host>/AutomationStackStorm
status: failed
error: Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/runners/python_action_wrapper.py", line 259, in <module>
obj.run()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/runners/python_action_wrapper.py", line 155, in run
output = action.run(**self._parameters)
File "/opt/stackstorm/packs/packs/actions/pack_mgmt/download.py", line 75, in run
verifyssl=verifyssl, ref=pack_version)
File "/opt/stackstorm/packs/packs/actions/pack_mgmt/download.py", line 102, in _clone_repo
repo = Repo.clone_from(repo_url, temp_dir)
File "/opt/stackstorm/st2/lib/python2.7/site-packages/git/repo/base.py", line 942, in clone_from
return cls._clone(git, url, to_path, GitCmdObjectDB, progress, **kwargs)
File "/opt/stackstorm/st2/lib/python2.7/site-packages/git/repo/base.py", line 897, in _clone
finalize_process(proc, stderr=stderr)
File "/opt/stackstorm/st2/lib/python2.7/site-packages/git/util.py", line 341, in finalize_process
proc.wait(**kwargs)
File "/opt/stackstorm/st2/lib/python2.7/site-packages/git/cmd.py", line 292, in wait
raise GitCommandError(self.args, status, errstr)
git.exc.GitCommandError: Cmd('git') failed due to: exit code(128)
cmdline: git clone -v ssh://<user@host>/AutomationStackStorm.git /root/<hash>
stderr: 'Cloning into '/root/<hash>'...
Your Git command did not succeed.
Details:
TF401019: The Git repository with name or identifier AutomationStackStorm.git does not exist or you do not have permissions for the operation you are attempting.
fatal: Could not read from remote repository.
Please make sure you have the correct access rights
and the repository exists.
'
traceback: None
failed_on: download pack
start_timestamp: 2017-06-29T16:07:02.112809Z
end_timestamp: 2017-06-29T16:07:08.565932Z
+--------------------------+---------------------+---------------+----------------+-----------------+
| id | status | task | action | start_timestamp |
+--------------------------+---------------------+---------------+----------------+-----------------+
| 595525a6a5fb1d0559d30ab4 | failed (6s elapsed) | download pack | packs.download | Thu, 29 Jun |
| | | | | 2017 16:07:02 |
| | | | | UTC |
+--------------------------+---------------------+---------------+----------------+-----------------+
|
git.exc.GitCommandError
|
def post(self, pack_search_request):
if hasattr(pack_search_request, "query"):
packs = packs_service.search_pack_index(
pack_search_request.query, case_sensitive=False
)
return [PackAPI(**pack) for pack in packs]
else:
pack = packs_service.get_pack_from_index(pack_search_request.pack)
return PackAPI(**pack) if pack else []
|
def post(self, pack_search_request):
if hasattr(pack_search_request, "query"):
packs = packs_service.search_pack_index(
pack_search_request.query, case_sensitive=False
)
return [PackAPI(**pack) for pack in packs]
else:
pack = packs_service.get_pack_from_index(pack_search_request.pack)
return PackAPI(**pack) if pack else None
|
https://github.com/StackStorm/st2/issues/3377
|
(virtualenv)vagrant@st2dev /m/s/s/st2 ❯❯❯ st2 --debug pack show core ⏎ master ✭ ◼
# -------- begin 139705409745296 request ----------
curl -X POST -H 'Connection: keep-alive' -H 'Accept-Encoding: gzip, deflate' -H 'Accept: */*' -H 'User-Agent: python-requests/2.13.0' -H 'content-type: application/json' -H 'Content-Length: 16' --data-binary '{"pack": "core"}' http://127.0.0.1:9101/v1/packs/index/search
# -------- begin 139705409745296 response ----------
# -------- end 139705409745296 response ------------
ERROR: Expecting value: line 1 column 1 (char 0)
CLI settings:
----------------
Config file path: /home/vagrant/.st2/config
Client settings:
----------------
ST2_BASE_URL: http://127.0.0.1
ST2_AUTH_URL: http://127.0.0.1:9100
ST2_API_URL: http://127.0.0.1:9101/v1
ST2_AUTH_TOKEN: None
Proxy settings:
---------------
HTTP_PROXY:
HTTPS_PROXY:
Traceback (most recent call last):
File "/mnt/src/storm/st2/st2client/st2client/shell.py", line 289, in run
args.func(args)
File "/mnt/src/storm/st2/st2client/st2client/commands/pack.py", line 85, in run_and_print
raise OperationFailureException(message)
OperationFailureException: Expecting value: line 1 column 1 (char 0)
(virtualenv)vagrant@st2dev /m/s/s/st2 ❯❯❯
|
OperationFailureException
|
def __init__(self, tables, shift_zeros=False):
if isinstance(tables, np.ndarray):
sp = tables.shape
if (len(sp) != 3) or (sp[0] != 2) or (sp[1] != 2):
raise ValueError("If an ndarray, argument must be 2x2xn")
table = tables * 1.0 # use atleast float dtype
else:
if any([np.asarray(x).shape != (2, 2) for x in tables]):
m = "If `tables` is a list, all of its elements should be 2x2"
raise ValueError(m)
# Create a data cube
table = np.dstack(tables).astype(np.float64)
if shift_zeros:
zx = (table == 0).sum(0).sum(0)
ix = np.flatnonzero(zx > 0)
if len(ix) > 0:
table = table.copy()
table[:, :, ix] += 0.5
self.table = table
self._cache = {}
# Quantities to precompute. Table entries are [[a, b], [c,
# d]], 'ad' is 'a * d', 'apb' is 'a + b', 'dma' is 'd - a',
# etc.
self._apb = table[0, 0, :] + table[0, 1, :]
self._apc = table[0, 0, :] + table[1, 0, :]
self._bpd = table[0, 1, :] + table[1, 1, :]
self._cpd = table[1, 0, :] + table[1, 1, :]
self._ad = table[0, 0, :] * table[1, 1, :]
self._bc = table[0, 1, :] * table[1, 0, :]
self._apd = table[0, 0, :] + table[1, 1, :]
self._dma = table[1, 1, :] - table[0, 0, :]
self._n = table.sum(0).sum(0)
|
def __init__(self, tables, shift_zeros=False):
if isinstance(tables, np.ndarray):
sp = tables.shape
if (len(sp) != 3) or (sp[0] != 2) or (sp[1] != 2):
raise ValueError("If an ndarray, argument must be 2x2xn")
table = tables
else:
if any([np.asarray(x).shape != (2, 2) for x in tables]):
m = "If `tables` is a list, all of its elements should be 2x2"
raise ValueError(m)
# Create a data cube
table = np.dstack(tables).astype(np.float64)
if shift_zeros:
zx = (table == 0).sum(0).sum(0)
ix = np.flatnonzero(zx > 0)
if len(ix) > 0:
table = table.copy()
table[:, :, ix] += 0.5
self.table = table
self._cache = {}
# Quantities to precompute. Table entries are [[a, b], [c,
# d]], 'ad' is 'a * d', 'apb' is 'a + b', 'dma' is 'd - a',
# etc.
self._apb = table[0, 0, :] + table[0, 1, :]
self._apc = table[0, 0, :] + table[1, 0, :]
self._bpd = table[0, 1, :] + table[1, 1, :]
self._cpd = table[1, 0, :] + table[1, 1, :]
self._ad = table[0, 0, :] * table[1, 1, :]
self._bc = table[0, 1, :] * table[1, 0, :]
self._apd = table[0, 0, :] + table[1, 1, :]
self._dma = table[1, 1, :] - table[0, 0, :]
self._n = table.sum(0).sum(0)
|
https://github.com/statsmodels/statsmodels/issues/6670
|
st.summary()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-49-bf722b7cb8e1> in <module>
----> 1 st.summary()
...\statsmodels\stats\contingency_tables.py in summary(self, alpha, float_format, method)
1270 headers = ["Statistic", "P-value", ""]
1271 stubs = ["Test of OR=1", "Test constant OR"]
-> 1272 rslt1 = self.test_null_odds()
1273 rslt2 = self.test_equal_odds()
1274 data = [[fmt(x) for x in [rslt1.statistic, rslt1.pvalue, ""]],
...\statsmodels\stats\contingency_tables.py in test_null_odds(self, correction)
1045 statistic = statistic**2
1046 denom = self._apb * self._apc * self._bpd * self._cpd
-> 1047 denom /= (self._n**2 * (self._n - 1))
1048 denom = np.sum(denom)
1049 statistic /= denom
TypeError: No loop matching the specified signature and casting
was found for ufunc true_divide
|
TypeError
|
def __init__(
self,
endog,
exog,
offset=None,
exposure=None,
missing="none",
check_rank=True,
**kwargs,
):
super().__init__(
endog,
exog,
check_rank,
missing=missing,
offset=offset,
exposure=exposure,
**kwargs,
)
if exposure is not None:
self.exposure = np.asarray(self.exposure)
self.exposure = np.log(self.exposure)
if offset is not None:
self.offset = np.asarray(self.offset)
self._check_inputs(self.offset, self.exposure, self.endog)
if offset is None:
delattr(self, "offset")
if exposure is None:
delattr(self, "exposure")
# promote dtype to float64 if needed
dt = np.promote_types(self.endog.dtype, np.float64)
self.endog = np.asarray(self.endog, dt)
dt = np.promote_types(self.exog.dtype, np.float64)
self.exog = np.asarray(self.exog, dt)
|
def __init__(
self,
endog,
exog,
offset=None,
exposure=None,
missing="none",
check_rank=True,
**kwargs,
):
super().__init__(
endog,
exog,
check_rank,
missing=missing,
offset=offset,
exposure=exposure,
**kwargs,
)
if exposure is not None:
self.exposure = np.log(self.exposure)
self._check_inputs(self.offset, self.exposure, self.endog)
if offset is None:
delattr(self, "offset")
if exposure is None:
delattr(self, "exposure")
# promote dtype to float64 if needed
dt = np.promote_types(self.endog.dtype, np.float64)
self.endog = np.asarray(self.endog, dt)
dt = np.promote_types(self.exog.dtype, np.float64)
self.exog = np.asarray(self.exog, dt)
|
https://github.com/statsmodels/statsmodels/issues/7015
|
ValueError Traceback (most recent call last)
<ipython-input-8-623bd13d22ad> in <module>
----> 1 Pmodel2 = sm.Poisson(endog=y_mat, exog=X_mat, exposure=data['PERSONS']).fit() # this gives an error
~/miniconda3/envs/stdsci/lib/python3.6/site-packages/statsmodels/discrete/discrete_model.py in fit(self, start_params, method, maxiter, full_output, disp, callback, **kwargs)
1104 disp=disp,
1105 callback=callback,
-> 1106 **kwargs)
1107
1108 if 'cov_type' in kwargs:
~/miniconda3/envs/stdsci/lib/python3.6/site-packages/statsmodels/discrete/discrete_model.py in fit(self, start_params, method, maxiter, full_output, disp, callback, **kwargs)
231 disp=disp,
232 callback=callback,
--> 233 **kwargs)
234
235 return mlefit # It is up to subclasses to wrap results
~/miniconda3/envs/stdsci/lib/python3.6/site-packages/statsmodels/base/model.py in fit(self, start_params, method, maxiter, full_output, disp, fargs, callback, retall, skip_hessian, **kwargs)
525 callback=callback,
526 retall=retall,
--> 527 full_output=full_output)
528
529 # NOTE: this is for fit_regularized and should be generalized
~/miniconda3/envs/stdsci/lib/python3.6/site-packages/statsmodels/base/optimizer.py in _fit(self, objective, gradient, start_params, fargs, kwargs, hessian, method, maxiter, full_output, disp, callback, retall)
216 disp=disp, maxiter=maxiter, callback=callback,
217 retall=retall, full_output=full_output,
--> 218 hess=hessian)
219
220 optim_settings = {'optimizer': method, 'start_params': start_params,
~/miniconda3/envs/stdsci/lib/python3.6/site-packages/statsmodels/base/optimizer.py in _fit_newton(f, score, start_params, fargs, kwargs, disp, maxiter, callback, retall, full_output, hess, ridge_factor)
314 while (iterations < maxiter and np.any(np.abs(newparams -
315 oldparams) > tol)):
--> 316 H = np.asarray(hess(newparams))
317 # regularize Hessian, not clear what ridge factor should be
318 # keyword option with absolute default 1e-10, see #1847
~/miniconda3/envs/stdsci/lib/python3.6/site-packages/statsmodels/base/model.py in hess(params, *args)
507
508 def hess(params, *args):
--> 509 return self.hessian(params, *args) / nobs
510 else:
511 def score(params, *args):
~/miniconda3/envs/stdsci/lib/python3.6/site-packages/statsmodels/discrete/discrete_model.py in hessian(self, params)
1317 X = self.exog
1318 L = np.exp(np.dot(X,params) + exposure + offset)
-> 1319 return -np.dot(L*X.T, X)
1320
1321 def hessian_factor(self, params):
~/miniconda3/envs/stdsci/lib/python3.6/site-packages/pandas/core/ops/common.py in new_method(self, other)
63 other = item_from_zerodim(other)
64
---> 65 return method(self, other)
66
67 return new_method
~/miniconda3/envs/stdsci/lib/python3.6/site-packages/pandas/core/ops/__init__.py in wrapper(left, right)
343 result = arithmetic_op(lvalues, rvalues, op)
344
--> 345 return left._construct_result(result, name=res_name)
346
347 wrapper.__name__ = op_name
~/miniconda3/envs/stdsci/lib/python3.6/site-packages/pandas/core/series.py in _construct_result(self, result, name)
2755 # We do not pass dtype to ensure that the Series constructor
2756 # does inference in the case where `result` has object-dtype.
-> 2757 out = self._constructor(result, index=self.index)
2758 out = out.__finalize__(self)
2759
~/miniconda3/envs/stdsci/lib/python3.6/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 if len(index) != len(data):
313 raise ValueError(
--> 314 f"Length of passed values is {len(data)}, "
315 f"index implies {len(index)}."
316 )
ValueError: Length of passed values is 2, index implies 250.
|
ValueError
|
def _wrap_data(self, data, start_idx, end_idx, names=None):
# TODO: check if this is reasonable for statespace
# squeezing data: data may be:
# - m x n: m dates, n simulations -> squeeze does nothing
# - m x 1: m dates, 1 simulation -> squeeze removes last dimension
# - 1 x n: don't squeeze, already fine
# - 1 x 1: squeeze only second axis
if data.ndim > 1 and data.shape[1] == 1:
data = np.squeeze(data, axis=1)
if self.use_pandas:
_, _, _, index = self._get_prediction_index(start_idx, end_idx)
if data.ndim < 2:
data = pd.Series(data, index=index, name=names)
else:
data = pd.DataFrame(data, index=index, columns=names)
return data
|
def _wrap_data(self, data, start_idx, end_idx, names=None):
# TODO: check if this is reasonable for statespace
# squeezing data: data may be:
# - m x n: m dates, n simulations -> squeeze does nothing
# - m x 1: m dates, 1 simulation -> squeeze removes last dimension
# - 1 x n: don't squeeze, already fine
# - 1 x 1: squeeze only second axis
if data.ndim > 1 and data.shape[1] == 1:
data = np.squeeze(data, axis=1)
data = np.squeeze(data)
if self.use_pandas:
_, _, _, index = self._get_prediction_index(start_idx, end_idx)
if data.ndim < 2:
data = pd.Series(data, index=index, name=names)
else:
data = pd.DataFrame(data, index=index, columns=names)
return data
|
https://github.com/statsmodels/statsmodels/issues/7175
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
%matplotlib inline
from statsmodels.tsa.exponential_smoothing.ets import ETSModel
austourists_data = [
30.05251300, 19.14849600, 25.31769200, 27.59143700,
32.07645600, 23.48796100, 28.47594000, 35.12375300,
36.83848500, 25.00701700, 30.72223000, 28.69375900,
36.64098600, 23.82460900, 29.31168300, 31.77030900,
35.17787700, 19.77524400, 29.60175000, 34.53884200,
41.27359900, 26.65586200, 28.27985900, 35.19115300,
42.20566386, 24.64917133, 32.66733514, 37.25735401,
45.24246027, 29.35048127, 36.34420728, 41.78208136,
49.27659843, 31.27540139, 37.85062549, 38.83704413,
51.23690034, 31.83855162, 41.32342126, 42.79900337,
55.70835836, 33.40714492, 42.31663797, 45.15712257,
59.57607996, 34.83733016, 44.84168072, 46.97124960,
60.01903094, 38.37117851, 46.97586413, 50.73379646,
61.64687319, 39.29956937, 52.67120908, 54.33231689,
66.83435838, 40.87118847, 51.82853579, 57.49190993,
65.25146985, 43.06120822, 54.76075713, 59.83447494,
73.25702747, 47.69662373, 61.09776802, 66.05576122,
]
austourists = pd.Series(austourists_data)
model = ETSModel(austourists, error="mul", trend="mul", seasonal="mul",seasonal_periods=12)
fit = model.fit()
pred = fit.get_prediction(start=len(austourists), end=len(austourists))
df = pred.summary_frame(alpha=0.05)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
30
31
---> 32 pred = fit.get_prediction(start=len(austourists), end=len(austourists))
33 df = pred.summary_frame(alpha=0.05)
34 df
5665 # GH#29069
5666 if not is_hashable(name):
-> 5667 raise TypeError(f"{cls.__name__}.name must be a hashable type")
5668
5669 return name
TypeError: Series.name must be a hashable type
|
TypeError
|
def _autolag(
mod,
endog,
exog,
startlag,
maxlag,
method,
modargs=(),
fitargs=(),
regresults=False,
):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array_like
nobs array containing endogenous variable
exog : array_like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {"aic", "bic", "t-stat"}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
# TODO: can tcol be replaced by maxlag + 2?
# TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in results.items())
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in results.items())
elif method == "t-stat":
# stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
# Default values to ensure that always set
bestlag = startlag + maxlag
icbest = 0.0
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
bestlag = lag
if np.abs(icbest) >= stop:
# Break for first lag with a significant t-stat
break
else:
raise ValueError(f"Information Criterion {method} not understood.")
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
|
def _autolag(
mod,
endog,
exog,
startlag,
maxlag,
method,
modargs=(),
fitargs=(),
regresults=False,
):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array_like
nobs array containing endogenous variable
exog : array_like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {"aic", "bic", "t-stat"}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
# TODO: can tcol be replaced by maxlag + 2?
# TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in results.items())
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in results.items())
elif method == "t-stat":
# stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
|
https://github.com/statsmodels/statsmodels/issues/7014
|
coint(
arr[:sdp, i, ch],
arr[:sdp, j, ch],
trend='c', method='aeg', maxlag=0, autolag='t-stat', return_results=False)
Traceback (most recent call last):
File "E:\projects\Jan Medical\software\corr_surface\segment_2_34.py", line 961, in <module>
main()
File "E:\projects\Jan Medical\software\corr_surface\segment_2_34.py", line 883, in main
OnSegment(dataObj, filename, textData, recurseLevel=0)
File "E:\projects\Jan Medical\software\corr_surface\segment_2_34.py", line 352, in OnSegment
coint_res = coint(
File "C:\Python38\lib\site-packages\statsmodels\tsa\stattools.py", line 1392, in coint
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag,
File "C:\Python38\lib\site-packages\statsmodels\tsa\stattools.py", line 267, in adfuller
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
File "C:\Python38\lib\site-packages\statsmodels\tsa\stattools.py", line 115, in _autolag
return icbest, bestlag
nboundLocalError: local variable 'bestlag' referenced before assignment
|
nboundLocalError
|
def pacf(x, nlags=None, method="ywadjusted", alpha=None):
"""
Partial autocorrelation estimate.
Parameters
----------
x : array_like
Observations of time series for which pacf is calculated.
nlags : int
The largest lag for which the pacf is returned. The default
is currently 40, but will change to
min(int(10 * np.log10(nobs)), nobs // 2 - 1) in the future
method : str, default "ywunbiased"
Specifies which method for the calculations to use.
- "yw" or "ywadjusted" : Yule-Walker with sample-size adjustment in
denominator for acovf. Default.
- "ywm" or "ywmle" : Yule-Walker without adjustment.
- "ols" : regression of time series on lags of it and on constant.
- "ols-inefficient" : regression of time series on lags using a single
common sample to estimate all pacf coefficients.
- "ols-adjusted" : regression of time series on lags with a bias
adjustment.
- "ld" or "ldadjusted" : Levinson-Durbin recursion with bias
correction.
- "ldb" or "ldbiased" : Levinson-Durbin recursion without bias
correction.
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x)).
Returns
-------
pacf : ndarray
Partial autocorrelations, nlags elements, including lag zero.
confint : ndarray, optional
Confidence intervals for the PACF. Returned if confint is not None.
See Also
--------
statsmodels.tsa.stattools.acf
Estimate the autocorrelation function.
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_yw
Partial autocorrelation estimation using Yule-Walker.
statsmodels.tsa.stattools.pacf_ols
Partial autocorrelation estimation using OLS.
statsmodels.tsa.stattools.pacf_burg
Partial autocorrelation estimation using Burg"s method.
Notes
-----
Based on simulation evidence across a range of low-order ARMA models,
the best methods based on root MSE are Yule-Walker (MLW), Levinson-Durbin
(MLE) and Burg, respectively. The estimators with the lowest bias included
included these three in addition to OLS and OLS-adjusted.
Yule-Walker (adjusted) and Levinson-Durbin (adjusted) performed
consistently worse than the other options.
"""
nlags = int_like(nlags, "nlags", optional=True)
renames = {
"ydu": "yda",
"ywu": "ywa",
"ywunbiased": "ywadjusted",
"ldunbiased": "ldadjusted",
"ld_unbiased": "ld_adjusted",
"ldu": "lda",
"ols-unbiased": "ols-adjusted",
}
if method in renames:
warnings.warn(
f"{method} has been renamed {renames[method]}. After release 0.13, "
"using the old name will raise.",
FutureWarning,
)
method = renames[method]
methods = (
"ols",
"ols-inefficient",
"ols-adjusted",
"yw",
"ywa",
"ld",
"ywadjusted",
"yw_adjusted",
"ywm",
"ywmle",
"yw_mle",
"lda",
"ldadjusted",
"ld_adjusted",
"ldb",
"ldbiased",
"ld_biased",
)
x = array_like(x, "x", maxdim=2)
method = string_like(method, "method", options=methods)
alpha = float_like(alpha, "alpha", optional=True)
if nlags is None:
warnings.warn(
"The default number of lags is changing from 40 to"
"min(int(10 * np.log10(nobs)), nobs // 2 - 1) after 0.12"
"is released. Set the number of lags to an integer to "
" silence this warning.",
FutureWarning,
)
nlags = 40
if nlags >= x.shape[0] // 2:
raise ValueError(
"Can only compute partial correlations for lags up to 50% of the "
f"sample size. The requested nlags {nlags} must be < "
f"{x.shape[0] // 2}."
)
if method in ("ols", "ols-inefficient", "ols-adjusted"):
efficient = "inefficient" not in method
adjusted = "adjusted" in method
ret = pacf_ols(x, nlags=nlags, efficient=efficient, adjusted=adjusted)
elif method in ("yw", "ywa", "ywadjusted", "yw_adjusted"):
ret = pacf_yw(x, nlags=nlags, method="adjusted")
elif method in ("ywm", "ywmle", "yw_mle"):
ret = pacf_yw(x, nlags=nlags, method="mle")
elif method in ("ld", "lda", "ldadjusted", "ld_adjusted"):
acv = acovf(x, adjusted=True, fft=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
# inconsistent naming with ywmle
else: # method in ("ldb", "ldbiased", "ld_biased")
acv = acovf(x, adjusted=False, fft=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
if alpha is not None:
varacf = 1.0 / len(x) # for all lags >=1
interval = stats.norm.ppf(1.0 - alpha / 2.0) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
|
def pacf(x, nlags=None, method="ywadjusted", alpha=None):
"""
Partial autocorrelation estimate.
Parameters
----------
x : array_like
Observations of time series for which pacf is calculated.
nlags : int
The largest lag for which the pacf is returned. The default
is currently 40, but will change to
min(int(10 * np.log10(nobs)), nobs // 2 - 1) in the future
method : str, default "ywunbiased"
Specifies which method for the calculations to use.
- "yw" or "ywadjusted" : Yule-Walker with sample-size adjustment in
denominator for acovf. Default.
- "ywm" or "ywmle" : Yule-Walker without adjustment.
- "ols" : regression of time series on lags of it and on constant.
- "ols-inefficient" : regression of time series on lags using a single
common sample to estimate all pacf coefficients.
- "ols-adjusted" : regression of time series on lags with a bias
adjustment.
- "ld" or "ldadjusted" : Levinson-Durbin recursion with bias
correction.
- "ldb" or "ldbiased" : Levinson-Durbin recursion without bias
correction.
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x)).
Returns
-------
pacf : ndarray
Partial autocorrelations, nlags elements, including lag zero.
confint : ndarray, optional
Confidence intervals for the PACF. Returned if confint is not None.
See Also
--------
statsmodels.tsa.stattools.acf
Estimate the autocorrelation function.
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_yw
Partial autocorrelation estimation using Yule-Walker.
statsmodels.tsa.stattools.pacf_ols
Partial autocorrelation estimation using OLS.
statsmodels.tsa.stattools.pacf_burg
Partial autocorrelation estimation using Burg"s method.
Notes
-----
Based on simulation evidence across a range of low-order ARMA models,
the best methods based on root MSE are Yule-Walker (MLW), Levinson-Durbin
(MLE) and Burg, respectively. The estimators with the lowest bias included
included these three in addition to OLS and OLS-adjusted.
Yule-Walker (adjusted) and Levinson-Durbin (adjusted) performed
consistently worse than the other options.
"""
nlags = int_like(nlags, "nlags", optional=True)
renames = {
"ydu": "yda",
"ywu": "ywa",
"ywunbiased": "ywadjusted",
"ldunbiased": "ldadjusted",
"ld_unbiased": "ld_adjusted",
"ldu": "lda",
"ols-unbiased": "ols-adjusted",
}
if method in renames:
warnings.warn(
f"{method} has been renamed {renames[method]}. After release 0.13, "
"using the old name will raise.",
FutureWarning,
)
method = renames[method]
methods = (
"ols",
"ols-inefficient",
"ols-adjusted",
"yw",
"ywa",
"ld",
"ywadjusted",
"yw_adjusted",
"ywm",
"ywmle",
"yw_mle",
"lda",
"ldadjusted",
"ld_adjusted",
"ldb",
"ldbiased",
"ld_biased",
)
x = array_like(x, "x", maxdim=2)
method = string_like(method, "method", options=methods)
alpha = float_like(alpha, "alpha", optional=True)
if nlags is None:
warnings.warn(
"The default number of lags is changing from 40 to"
"min(int(10 * np.log10(nobs)), nobs // 2 - 1) after 0.12"
"is released. Set the number of lags to an integer to "
" silence this warning.",
FutureWarning,
)
nlags = 40
if nlags >= x.shape[0] // 2:
raise ValueError(
"Can only compute partial correlations for lags up to 50% of the "
f"sample size. The requested nlags {nlags} must be < "
f"{x.shape[0] // 2}."
)
if method in ("ols", "ols-inefficient", "ols-adjusted"):
efficient = "inefficient" not in method
adjusted = "adjusted" in method
ret = pacf_ols(x, nlags=nlags, efficient=efficient, adjusted=adjusted)
elif method in ("yw", "ywa", "ywadjusted", "yw_adjusted"):
ret = pacf_yw(x, nlags=nlags, method="adjusted")
elif method in ("ywm", "ywmle", "yw_mle"):
ret = pacf_yw(x, nlags=nlags, method="mle")
elif method in ("ld", "lda", "ldadjusted", "ld_adjusted"):
acv = acovf(x, adjusted=True, fft=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
# inconsistent naming with ywmle
else: # method in ("ldb", "ldbiased", "ld_biased")
acv = acovf(x, adjusted=False, fft=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
if alpha is not None:
varacf = 1.0 / len(x) # for all lags >=1
interval = stats.norm.ppf(1.0 - alpha / 2.0) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
|
https://github.com/statsmodels/statsmodels/issues/7014
|
coint(
arr[:sdp, i, ch],
arr[:sdp, j, ch],
trend='c', method='aeg', maxlag=0, autolag='t-stat', return_results=False)
Traceback (most recent call last):
File "E:\projects\Jan Medical\software\corr_surface\segment_2_34.py", line 961, in <module>
main()
File "E:\projects\Jan Medical\software\corr_surface\segment_2_34.py", line 883, in main
OnSegment(dataObj, filename, textData, recurseLevel=0)
File "E:\projects\Jan Medical\software\corr_surface\segment_2_34.py", line 352, in OnSegment
coint_res = coint(
File "C:\Python38\lib\site-packages\statsmodels\tsa\stattools.py", line 1392, in coint
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag,
File "C:\Python38\lib\site-packages\statsmodels\tsa\stattools.py", line 267, in adfuller
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
File "C:\Python38\lib\site-packages\statsmodels\tsa\stattools.py", line 115, in _autolag
return icbest, bestlag
nboundLocalError: local variable 'bestlag' referenced before assignment
|
nboundLocalError
|
def plot_diagnostics(
self, variable=0, lags=10, fig=None, figsize=None, truncate_endog_names=24
):
"""
Diagnostic plots for standardized residuals of one endogenous variable
Parameters
----------
variable : int, optional
Index of the endogenous variable for which the diagnostic plots
should be created. Default is 0.
lags : int, optional
Number of lags to include in the correlogram. Default is 10.
fig : Figure, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Returns
-------
Figure
Figure instance with diagnostic plots
See Also
--------
statsmodels.graphics.gofplots.qqplot
statsmodels.graphics.tsaplots.plot_acf
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residuals, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
"""
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
# Eliminate residuals associated with burned or diffuse likelihoods
d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)
# If given a variable name, find the index
if isinstance(variable, str):
variable = self.model.endog_names.index(variable)
# Get residuals
if hasattr(self.data, "dates") and self.data.dates is not None:
ix = self.data.dates[d:]
else:
ix = np.arange(self.nobs - d)
resid = pd.Series(
self.filter_results.standardized_forecasts_error[variable, d:], index=ix
)
if resid.shape[0] < max(d, lags):
raise ValueError(
"Length of endogenous variable must be larger the the number "
"of lags used in the model and the number of observations "
"burned in the log-likelihood calculation."
)
# Top-left: residuals vs time
ax = fig.add_subplot(221)
resid.dropna().plot(ax=ax)
ax.hlines(0, ix[0], ix[-1], alpha=0.5)
ax.set_xlim(ix[0], ix[-1])
name = self.model.endog_names[variable]
if len(name) > truncate_endog_names:
name = name[: truncate_endog_names - 3] + "..."
ax.set_title(f'Standardized residual for "{name}"')
# Top-right: histogram, Gaussian kernel density, Normal density
# Can only do histogram and Gaussian kernel density on the non-null
# elements
resid_nonmissing = resid.dropna()
ax = fig.add_subplot(222)
# gh5792: Remove except after support for matplotlib>2.1 required
try:
ax.hist(resid_nonmissing, density=True, label="Hist")
except AttributeError:
ax.hist(resid_nonmissing, normed=True, label="Hist")
from scipy.stats import gaussian_kde, norm
kde = gaussian_kde(resid_nonmissing)
xlim = (-1.96 * 2, 1.96 * 2)
x = np.linspace(xlim[0], xlim[1])
ax.plot(x, kde(x), label="KDE")
ax.plot(x, norm.pdf(x), label="N(0,1)")
ax.set_xlim(xlim)
ax.legend()
ax.set_title("Histogram plus estimated density")
# Bottom-left: QQ plot
ax = fig.add_subplot(223)
from statsmodels.graphics.gofplots import qqplot
qqplot(resid_nonmissing, line="s", ax=ax)
ax.set_title("Normal Q-Q")
# Bottom-right: Correlogram
ax = fig.add_subplot(224)
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(resid, ax=ax, lags=lags)
ax.set_title("Correlogram")
ax.set_ylim(-1, 1)
return fig
|
def plot_diagnostics(
self, variable=0, lags=10, fig=None, figsize=None, truncate_endog_names=24
):
"""
Diagnostic plots for standardized residuals of one endogenous variable
Parameters
----------
variable : int, optional
Index of the endogenous variable for which the diagnostic plots
should be created. Default is 0.
lags : int, optional
Number of lags to include in the correlogram. Default is 10.
fig : Figure, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Returns
-------
Figure
Figure instance with diagnostic plots
See Also
--------
statsmodels.graphics.gofplots.qqplot
statsmodels.graphics.tsaplots.plot_acf
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residuals, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
"""
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
# Eliminate residuals associated with burned or diffuse likelihoods
d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)
# If given a variable name, find the index
if isinstance(variable, str):
variable = self.model.endog_names.index(variable)
# Get residuals
if hasattr(self.data, "dates") and self.data.dates is not None:
ix = self.data.dates[d:]
else:
ix = np.arange(self.nobs - d)
resid = pd.Series(
self.filter_results.standardized_forecasts_error[variable, d:], index=ix
)
# Top-left: residuals vs time
ax = fig.add_subplot(221)
resid.dropna().plot(ax=ax)
ax.hlines(0, ix[0], ix[-1], alpha=0.5)
ax.set_xlim(ix[0], ix[-1])
name = self.model.endog_names[variable]
if len(name) > truncate_endog_names:
name = name[: truncate_endog_names - 3] + "..."
ax.set_title(f'Standardized residual for "{name}"')
# Top-right: histogram, Gaussian kernel density, Normal density
# Can only do histogram and Gaussian kernel density on the non-null
# elements
resid_nonmissing = resid.dropna()
ax = fig.add_subplot(222)
# gh5792: Remove except after support for matplotlib>2.1 required
try:
ax.hist(resid_nonmissing, density=True, label="Hist")
except AttributeError:
ax.hist(resid_nonmissing, normed=True, label="Hist")
from scipy.stats import gaussian_kde, norm
kde = gaussian_kde(resid_nonmissing)
xlim = (-1.96 * 2, 1.96 * 2)
x = np.linspace(xlim[0], xlim[1])
ax.plot(x, kde(x), label="KDE")
ax.plot(x, norm.pdf(x), label="N(0,1)")
ax.set_xlim(xlim)
ax.legend()
ax.set_title("Histogram plus estimated density")
# Bottom-left: QQ plot
ax = fig.add_subplot(223)
from statsmodels.graphics.gofplots import qqplot
qqplot(resid_nonmissing, line="s", ax=ax)
ax.set_title("Normal Q-Q")
# Bottom-right: Correlogram
ax = fig.add_subplot(224)
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(resid, ax=ax, lags=lags)
ax.set_title("Correlogram")
ax.set_ylim(-1, 1)
return fig
|
https://github.com/statsmodels/statsmodels/issues/6173
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-312-be24090d9a69> in <module>
1 mod = sm.tsa.statespace.SARIMAX(np.random.normal(size=10), order=(10, 0, 0))
2 results = mod.fit()
----> 3 results.plot_diagnostics(figsize=(15, 5));
~/projects/statsmodels/statsmodels/tsa/statespace/mlemodel.py in plot_diagnostics(self, variable, lags, fig, figsize)
3286 ax = fig.add_subplot(224)
3287 from statsmodels.graphics.tsaplots import plot_acf
-> 3288 plot_acf(resid, ax=ax, lags=lags)
3289 ax.set_title('Correlogram')
3290
~/projects/statsmodels/statsmodels/graphics/tsaplots.py in plot_acf(x, ax, lags, alpha, use_vlines, unbiased, fft, title, zero, vlines_kwargs, **kwargs)
151 else:
152 acf_x, confint = acf(x, nlags=nlags, alpha=alpha, fft=fft,
--> 153 unbiased=unbiased)
154
155 _plot_corr(ax, title, acf_x, confint, lags, irregular, use_vlines,
~/projects/statsmodels/statsmodels/tsa/stattools.py in acf(x, unbiased, nlags, qstat, fft, alpha, missing)
583 varacf[0] = 0
584 varacf[1] = 1. / nobs
--> 585 varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
586 interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
587 confint = np.array(lzip(acf - interval, acf + interval))
ValueError: operands could not be broadcast together with shapes (9,) (8,) (9,)
|
ValueError
|
def acf(
x,
adjusted=False,
nlags=None,
qstat=False,
fft=None,
alpha=None,
missing="none",
):
"""
Calculate the autocorrelation function.
Parameters
----------
x : array_like
The time series data.
adjusted : bool, default False
If True, then denominators for autocovariance are n-k, otherwise n.
nlags : int, default 40
Number of lags to return autocorrelation for.
qstat : bool, default False
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, default None
If True, computes the ACF via FFT.
alpha : scalar, default None
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett"s formula.
missing : str, default "none"
A string in ["none", "raise", "conservative", "drop"] specifying how
the NaNs are to be treated. "none" performs no checks. "raise" raises
an exception if NaN values are found. "drop" removes the missing
observations and then estimates the autocovariances treating the
non-missing as contiguous. "conservative" computes the autocovariance
using nan-ops so that nans are removed when computing the mean
and cross-products that are used to estimate the autocovariance.
When using "conservative", n is set to the number of non-missing
observations.
Returns
-------
acf : ndarray
The autocorrelation function.
confint : ndarray, optional
Confidence intervals for the ACF. Returned if alpha is not None.
qstat : ndarray, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : ndarray, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
For very long time series it is recommended to use fft convolution instead.
When fft is False uses a simple, direct estimator of the autocovariances
that only computes the first nlag + 1 values. This can be much faster when
the time series is long and only a small number of autocovariances are
needed.
If adjusted is true, the denominator for the autocovariance is adjusted
for the loss of data.
References
----------
.. [1] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
adjusted = bool_like(adjusted, "adjusted")
nlags = int_like(nlags, "nlags", optional=True)
qstat = bool_like(qstat, "qstat")
fft = bool_like(fft, "fft", optional=True)
alpha = float_like(alpha, "alpha", optional=True)
missing = string_like(
missing, "missing", options=("none", "raise", "conservative", "drop")
)
if nlags is None:
warnings.warn(
"The default number of lags is changing from 40 to"
"min(int(10 * np.log10(nobs)), nobs - 1) after 0.12"
"is released. Set the number of lags to an integer to "
" silence this warning.",
FutureWarning,
)
nlags = 40
if fft is None:
warnings.warn(
"fft=True will become the default after the release of the 0.12 "
"release of statsmodels. To suppress this warning, explicitly "
"set fft=False.",
FutureWarning,
)
fft = False
x = array_like(x, "x")
nobs = len(x) # TODO: should this shrink for missing="drop" and NaNs in x?
avf = acovf(x, adjusted=adjusted, demean=True, fft=fft, missing=missing)
acf = avf[: nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones_like(acf) / nobs
varacf[0] = 0
varacf[1] = 1.0 / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1] ** 2)
interval = stats.norm.ppf(1 - alpha / 2.0) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
|
def acf(
x,
adjusted=False,
nlags=None,
qstat=False,
fft=None,
alpha=None,
missing="none",
):
"""
Calculate the autocorrelation function.
Parameters
----------
x : array_like
The time series data.
adjusted : bool, default False
If True, then denominators for autocovariance are n-k, otherwise n.
nlags : int, default 40
Number of lags to return autocorrelation for.
qstat : bool, default False
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, default None
If True, computes the ACF via FFT.
alpha : scalar, default None
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett"s formula.
missing : str, default "none"
A string in ["none", "raise", "conservative", "drop"] specifying how
the NaNs are to be treated. "none" performs no checks. "raise" raises
an exception if NaN values are found. "drop" removes the missing
observations and then estimates the autocovariances treating the
non-missing as contiguous. "conservative" computes the autocovariance
using nan-ops so that nans are removed when computing the mean
and cross-products that are used to estimate the autocovariance.
When using "conservative", n is set to the number of non-missing
observations.
Returns
-------
acf : ndarray
The autocorrelation function.
confint : ndarray, optional
Confidence intervals for the ACF. Returned if alpha is not None.
qstat : ndarray, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : ndarray, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
For very long time series it is recommended to use fft convolution instead.
When fft is False uses a simple, direct estimator of the autocovariances
that only computes the first nlag + 1 values. This can be much faster when
the time series is long and only a small number of autocovariances are
needed.
If adjusted is true, the denominator for the autocovariance is adjusted
for the loss of data.
References
----------
.. [1] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
adjusted = bool_like(adjusted, "adjusted")
nlags = int_like(nlags, "nlags", optional=True)
qstat = bool_like(qstat, "qstat")
fft = bool_like(fft, "fft", optional=True)
alpha = float_like(alpha, "alpha", optional=True)
missing = string_like(
missing, "missing", options=("none", "raise", "conservative", "drop")
)
if nlags is None:
warnings.warn(
"The default number of lags is changing from 40 to"
"min(int(10 * np.log10(nobs)), nobs - 1) after 0.12"
"is released. Set the number of lags to an integer to "
" silence this warning.",
FutureWarning,
)
nlags = 40
if fft is None:
warnings.warn(
"fft=True will become the default after the release of the 0.12 "
"release of statsmodels. To suppress this warning, explicitly "
"set fft=False.",
FutureWarning,
)
fft = False
x = array_like(x, "x")
nobs = len(x) # TODO: should this shrink for missing="drop" and NaNs in x?
avf = acovf(x, adjusted=adjusted, demean=True, fft=fft, missing=missing)
acf = avf[: nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones(nlags + 1) / nobs
varacf[0] = 0
varacf[1] = 1.0 / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1] ** 2)
interval = stats.norm.ppf(1 - alpha / 2.0) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
|
https://github.com/statsmodels/statsmodels/issues/6173
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-312-be24090d9a69> in <module>
1 mod = sm.tsa.statespace.SARIMAX(np.random.normal(size=10), order=(10, 0, 0))
2 results = mod.fit()
----> 3 results.plot_diagnostics(figsize=(15, 5));
~/projects/statsmodels/statsmodels/tsa/statespace/mlemodel.py in plot_diagnostics(self, variable, lags, fig, figsize)
3286 ax = fig.add_subplot(224)
3287 from statsmodels.graphics.tsaplots import plot_acf
-> 3288 plot_acf(resid, ax=ax, lags=lags)
3289 ax.set_title('Correlogram')
3290
~/projects/statsmodels/statsmodels/graphics/tsaplots.py in plot_acf(x, ax, lags, alpha, use_vlines, unbiased, fft, title, zero, vlines_kwargs, **kwargs)
151 else:
152 acf_x, confint = acf(x, nlags=nlags, alpha=alpha, fft=fft,
--> 153 unbiased=unbiased)
154
155 _plot_corr(ax, title, acf_x, confint, lags, irregular, use_vlines,
~/projects/statsmodels/statsmodels/tsa/stattools.py in acf(x, unbiased, nlags, qstat, fft, alpha, missing)
583 varacf[0] = 0
584 varacf[1] = 1. / nobs
--> 585 varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
586 interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
587 confint = np.array(lzip(acf - interval, acf + interval))
ValueError: operands could not be broadcast together with shapes (9,) (8,) (9,)
|
ValueError
|
def gls(
endog,
exog=None,
order=(0, 0, 0),
seasonal_order=(0, 0, 0, 0),
include_constant=None,
n_iter=None,
max_iter=50,
tolerance=1e-8,
arma_estimator="innovations_mle",
arma_estimator_kwargs=None,
):
"""
Estimate ARMAX parameters by GLS.
Parameters
----------
endog : array_like
Input time series array.
exog : array_like, optional
Array of exogenous regressors. If not included, then `include_constant`
must be True, and then `exog` will only include the constant column.
order : tuple, optional
The (p,d,q) order of the ARIMA model. Default is (0, 0, 0).
seasonal_order : tuple, optional
The (P,D,Q,s) order of the seasonal ARIMA model.
Default is (0, 0, 0, 0).
include_constant : bool, optional
Whether to add a constant term in `exog` if it's not already there.
The estimate of the constant will then appear as one of the `exog`
parameters. If `exog` is None, then the constant will represent the
mean of the process. Default is True if the specified model does not
include integration and False otherwise.
n_iter : int, optional
Optionally iterate feasible GSL a specific number of times. Default is
to iterate to convergence. If set, this argument overrides the
`max_iter` and `tolerance` arguments.
max_iter : int, optional
Maximum number of feasible GLS iterations. Default is 50. If `n_iter`
is set, it overrides this argument.
tolerance : float, optional
Tolerance for determining convergence of feasible GSL iterations. If
`iter` is set, this argument has no effect.
Default is 1e-8.
arma_estimator : str, optional
The estimator used for estimating the ARMA model. This option should
not generally be used, unless the default method is failing or is
otherwise unsuitable. Not all values will be valid, depending on the
specified model orders (`order` and `seasonal_order`). Possible values
are:
* 'innovations_mle' - can be used with any specification
* 'statespace' - can be used with any specification
* 'hannan_rissanen' - can be used with any ARMA non-seasonal model
* 'yule_walker' - only non-seasonal consecutive
autoregressive (AR) models
* 'burg' - only non-seasonal, consecutive autoregressive (AR) models
* 'innovations' - only non-seasonal, consecutive moving
average (MA) models.
The default is 'innovations_mle'.
arma_estimator_kwargs : dict, optional
Arguments to pass to the ARMA estimator.
Returns
-------
parameters : SARIMAXParams object
Contains the parameter estimates from the final iteration.
other_results : Bunch
Includes eight components: `spec`, `params`, `converged`,
`differences`, `iterations`, `arma_estimator`, 'arma_estimator_kwargs',
and `arma_results`.
Notes
-----
The primary reference is [1]_, section 6.6. In particular, the
implementation follows the iterative procedure described in section 6.6.2.
Construction of the transformed variables used to compute the GLS estimator
described in section 6.6.1 is done via an application of the innovations
algorithm (rather than explicit construction of the transformation matrix).
Note that if the specified model includes integration, both the `endog` and
`exog` series will be differenced prior to estimation and a warning will
be issued to alert the user.
References
----------
.. [1] Brockwell, Peter J., and Richard A. Davis. 2016.
Introduction to Time Series and Forecasting. Springer.
"""
# Handle n_iter
if n_iter is not None:
max_iter = n_iter
tolerance = np.inf
# Default for include_constant is True if there is no integration and
# False otherwise
integrated = order[1] > 0 or seasonal_order[1] > 0
if include_constant is None:
include_constant = not integrated
elif include_constant and integrated:
raise ValueError("Cannot include a constant in an integrated model.")
# Handle including the constant (need to do it now so that the constant
# parameter can be included in the specification as part of `exog`.)
if include_constant:
exog = np.ones_like(endog) if exog is None else add_constant(exog)
# Create the SARIMAX specification
spec = SARIMAXSpecification(
endog, exog=exog, order=order, seasonal_order=seasonal_order
)
endog = spec.endog
exog = spec.exog
# Handle integration
if spec.is_integrated:
# TODO: this is the approach suggested by BD (see Remark 1 in
# section 6.6.2 and Example 6.6.3), but maybe there are some cases
# where we don't want to force this behavior on the user?
warnings.warn(
"Provided `endog` and `exog` series have been"
" differenced to eliminate integration prior to GLS"
" parameter estimation."
)
endog = diff(
endog,
k_diff=spec.diff,
k_seasonal_diff=spec.seasonal_diff,
seasonal_periods=spec.seasonal_periods,
)
exog = diff(
exog,
k_diff=spec.diff,
k_seasonal_diff=spec.seasonal_diff,
seasonal_periods=spec.seasonal_periods,
)
augmented = np.c_[endog, exog]
# Validate arma_estimator
spec.validate_estimator(arma_estimator)
if arma_estimator_kwargs is None:
arma_estimator_kwargs = {}
# Step 1: OLS
mod_ols = OLS(endog, exog)
res_ols = mod_ols.fit()
exog_params = res_ols.params
resid = res_ols.resid
# 0th iteration parameters
p = SARIMAXParams(spec=spec)
p.exog_params = exog_params
if spec.max_ar_order > 0:
p.ar_params = np.zeros(spec.k_ar_params)
if spec.max_seasonal_ar_order > 0:
p.seasonal_ar_params = np.zeros(spec.k_seasonal_ar_params)
if spec.max_ma_order > 0:
p.ma_params = np.zeros(spec.k_ma_params)
if spec.max_seasonal_ma_order > 0:
p.seasonal_ma_params = np.zeros(spec.k_seasonal_ma_params)
p.sigma2 = res_ols.scale
ar_params = p.ar_params
seasonal_ar_params = p.seasonal_ar_params
ma_params = p.ma_params
seasonal_ma_params = p.seasonal_ma_params
sigma2 = p.sigma2
# Step 2 - 4: iterate feasible GLS to convergence
arma_results = [None]
differences = [None]
parameters = [p]
converged = False if n_iter is None else None
i = 0
for i in range(1, max_iter + 1):
prev = exog_params
# Step 2: ARMA
# TODO: allow estimator-specific kwargs?
if arma_estimator == "yule_walker":
p_arma, res_arma = yule_walker(
resid, ar_order=spec.ar_order, demean=False, **arma_estimator_kwargs
)
elif arma_estimator == "burg":
p_arma, res_arma = burg(
resid, ar_order=spec.ar_order, demean=False, **arma_estimator_kwargs
)
elif arma_estimator == "innovations":
out, res_arma = innovations(
resid, ma_order=spec.ma_order, demean=False, **arma_estimator_kwargs
)
p_arma = out[-1]
elif arma_estimator == "hannan_rissanen":
p_arma, res_arma = hannan_rissanen(
resid,
ar_order=spec.ar_order,
ma_order=spec.ma_order,
demean=False,
**arma_estimator_kwargs,
)
else:
# For later iterations, use a "warm start" for parameter estimates
# (speeds up estimation and convergence)
start_params = (
None
if i == 1
else np.r_[
ar_params, ma_params, seasonal_ar_params, seasonal_ma_params, sigma2
]
)
# Note: in each case, we do not pass in the order of integration
# since we have already differenced the series
tmp_order = (spec.order[0], 0, spec.order[2])
tmp_seasonal_order = (
spec.seasonal_order[0],
0,
spec.seasonal_order[2],
spec.seasonal_order[3],
)
if arma_estimator == "innovations_mle":
p_arma, res_arma = innovations_mle(
resid,
order=tmp_order,
seasonal_order=tmp_seasonal_order,
demean=False,
start_params=start_params,
**arma_estimator_kwargs,
)
else:
p_arma, res_arma = statespace(
resid,
order=tmp_order,
seasonal_order=tmp_seasonal_order,
include_constant=False,
start_params=start_params,
**arma_estimator_kwargs,
)
ar_params = p_arma.ar_params
seasonal_ar_params = p_arma.seasonal_ar_params
ma_params = p_arma.ma_params
seasonal_ma_params = p_arma.seasonal_ma_params
sigma2 = p_arma.sigma2
arma_results.append(res_arma)
# Step 3: GLS
# Compute transformed variables that satisfy OLS assumptions
# Note: In section 6.1.1 of Brockwell and Davis (2016), these
# transformations are developed as computed by left multiplcation
# by a matrix T. However, explicitly constructing T and then
# performing the left-multiplications does not scale well when nobs is
# large. Instead, we can retrieve the transformed variables as the
# residuals of the innovations algorithm (the `normalize=True`
# argument applies a Prais-Winsten-type normalization to the first few
# observations to ensure homoskedasticity). Brockwell and Davis
# mention that they also take this approach in practice.
# GH-6540: AR must be stationary
if not p_arma.is_stationary:
raise ValueError(
"Roots of the autoregressive parameters indicate that data is"
"non-stationary. GLS cannot be used with non-stationary "
"parameters. You should consider differencing the model data"
"or applying a nonlinear transformation (e.g., natural log)."
)
tmp, _ = arma_innovations.arma_innovations(
augmented, ar_params=ar_params, ma_params=ma_params, normalize=True
)
u = tmp[:, 0]
x = tmp[:, 1:]
# OLS on transformed variables
mod_gls = OLS(u, x)
res_gls = mod_gls.fit()
exog_params = res_gls.params
resid = endog - np.dot(exog, exog_params)
# Construct the parameter vector for the iteration
p = SARIMAXParams(spec=spec)
p.exog_params = exog_params
if spec.max_ar_order > 0:
p.ar_params = ar_params
if spec.max_seasonal_ar_order > 0:
p.seasonal_ar_params = seasonal_ar_params
if spec.max_ma_order > 0:
p.ma_params = ma_params
if spec.max_seasonal_ma_order > 0:
p.seasonal_ma_params = seasonal_ma_params
p.sigma2 = sigma2
parameters.append(p)
# Check for convergence
difference = np.abs(exog_params - prev)
differences.append(difference)
if n_iter is None and np.all(difference < tolerance):
converged = True
break
else:
if n_iter is None:
warnings.warn(
"Feasible GLS failed to converge in %d iterations."
" Consider increasing the maximum number of"
" iterations using the `max_iter` argument or"
" reducing the required tolerance using the"
" `tolerance` argument." % max_iter
)
# Construct final results
p = parameters[-1]
other_results = Bunch(
{
"spec": spec,
"params": parameters,
"converged": converged,
"differences": differences,
"iterations": i,
"arma_estimator": arma_estimator,
"arma_estimator_kwargs": arma_estimator_kwargs,
"arma_results": arma_results,
}
)
return p, other_results
|
def gls(
endog,
exog=None,
order=(0, 0, 0),
seasonal_order=(0, 0, 0, 0),
include_constant=None,
n_iter=None,
max_iter=50,
tolerance=1e-8,
arma_estimator="innovations_mle",
arma_estimator_kwargs=None,
):
"""
Estimate ARMAX parameters by GLS.
Parameters
----------
endog : array_like
Input time series array.
exog : array_like, optional
Array of exogenous regressors. If not included, then `include_constant`
must be True, and then `exog` will only include the constant column.
order : tuple, optional
The (p,d,q) order of the ARIMA model. Default is (0, 0, 0).
seasonal_order : tuple, optional
The (P,D,Q,s) order of the seasonal ARIMA model.
Default is (0, 0, 0, 0).
include_constant : bool, optional
Whether to add a constant term in `exog` if it's not already there.
The estimate of the constant will then appear as one of the `exog`
parameters. If `exog` is None, then the constant will represent the
mean of the process. Default is True if the specified model does not
include integration and False otherwise.
n_iter : int, optional
Optionally iterate feasible GSL a specific number of times. Default is
to iterate to convergence. If set, this argument overrides the
`max_iter` and `tolerance` arguments.
max_iter : int, optional
Maximum number of feasible GLS iterations. Default is 50. If `n_iter`
is set, it overrides this argument.
tolerance : float, optional
Tolerance for determining convergence of feasible GSL iterations. If
`iter` is set, this argument has no effect.
Default is 1e-8.
arma_estimator : str, optional
The estimator used for estimating the ARMA model. This option should
not generally be used, unless the default method is failing or is
otherwise unsuitable. Not all values will be valid, depending on the
specified model orders (`order` and `seasonal_order`). Possible values
are:
* 'innovations_mle' - can be used with any specification
* 'statespace' - can be used with any specification
* 'hannan_rissanen' - can be used with any ARMA non-seasonal model
* 'yule_walker' - only non-seasonal consecutive
autoregressive (AR) models
* 'burg' - only non-seasonal, consecutive autoregressive (AR) models
* 'innovations' - only non-seasonal, consecutive moving
average (MA) models.
The default is 'innovations_mle'.
arma_estimator_kwargs : dict, optional
Arguments to pass to the ARMA estimator.
Returns
-------
parameters : SARIMAXParams object
Contains the parameter estimates from the final iteration.
other_results : Bunch
Includes eight components: `spec`, `params`, `converged`,
`differences`, `iterations`, `arma_estimator`, 'arma_estimator_kwargs',
and `arma_results`.
Notes
-----
The primary reference is [1]_, section 6.6. In particular, the
implementation follows the iterative procedure described in section 6.6.2.
Construction of the transformed variables used to compute the GLS estimator
described in section 6.6.1 is done via an application of the innovations
algorithm (rather than explicit construction of the transformation matrix).
Note that if the specified model includes integration, both the `endog` and
`exog` series will be differenced prior to estimation and a warning will
be issued to alert the user.
References
----------
.. [1] Brockwell, Peter J., and Richard A. Davis. 2016.
Introduction to Time Series and Forecasting. Springer.
"""
# Handle n_iter
if n_iter is not None:
max_iter = n_iter
tolerance = np.inf
# Default for include_constant is True if there is no integration and
# False otherwise
integrated = order[1] > 0 or seasonal_order[1] > 0
if include_constant is None:
include_constant = not integrated
elif include_constant and integrated:
raise ValueError("Cannot include a constant in an integrated model.")
# Handle including the constant (need to do it now so that the constant
# parameter can be included in the specification as part of `exog`.)
if include_constant:
exog = np.ones_like(endog) if exog is None else add_constant(exog)
# Create the SARIMAX specification
spec = SARIMAXSpecification(
endog, exog=exog, order=order, seasonal_order=seasonal_order
)
endog = spec.endog
exog = spec.exog
# Handle integration
if spec.is_integrated:
# TODO: this is the approach suggested by BD (see Remark 1 in
# section 6.6.2 and Example 6.6.3), but maybe there are some cases
# where we don't want to force this behavior on the user?
warnings.warn(
"Provided `endog` and `exog` series have been"
" differenced to eliminate integration prior to GLS"
" parameter estimation."
)
endog = diff(
endog,
k_diff=spec.diff,
k_seasonal_diff=spec.seasonal_diff,
seasonal_periods=spec.seasonal_periods,
)
exog = diff(
exog,
k_diff=spec.diff,
k_seasonal_diff=spec.seasonal_diff,
seasonal_periods=spec.seasonal_periods,
)
augmented = np.c_[endog, exog]
# Validate arma_estimator
spec.validate_estimator(arma_estimator)
if arma_estimator_kwargs is None:
arma_estimator_kwargs = {}
# Step 1: OLS
mod_ols = OLS(endog, exog)
res_ols = mod_ols.fit()
exog_params = res_ols.params
resid = res_ols.resid
# 0th iteration parameters
p = SARIMAXParams(spec=spec)
p.exog_params = exog_params
if spec.max_ar_order > 0:
p.ar_params = np.zeros(spec.k_ar_params)
if spec.max_seasonal_ar_order > 0:
p.seasonal_ar_params = np.zeros(spec.k_seasonal_ar_params)
if spec.max_ma_order > 0:
p.ma_params = np.zeros(spec.k_ma_params)
if spec.max_seasonal_ma_order > 0:
p.seasonal_ma_params = np.zeros(spec.k_seasonal_ma_params)
p.sigma2 = res_ols.scale
ar_params = p.ar_params
seasonal_ar_params = p.seasonal_ar_params
ma_params = p.ma_params
seasonal_ma_params = p.seasonal_ma_params
sigma2 = p.sigma2
# Step 2 - 4: iterate feasible GLS to convergence
arma_results = [None]
differences = [None]
parameters = [p]
converged = False if n_iter is None else None
i = 0
for i in range(1, max_iter + 1):
prev = exog_params
# Step 2: ARMA
# TODO: allow estimator-specific kwargs?
if arma_estimator == "yule_walker":
p_arma, res_arma = yule_walker(
resid, ar_order=spec.ar_order, demean=False, **arma_estimator_kwargs
)
elif arma_estimator == "burg":
p_arma, res_arma = burg(
resid, ar_order=spec.ar_order, demean=False, **arma_estimator_kwargs
)
elif arma_estimator == "innovations":
out, res_arma = innovations(
resid, ma_order=spec.ma_order, demean=False, **arma_estimator_kwargs
)
p_arma = out[-1]
elif arma_estimator == "hannan_rissanen":
p_arma, res_arma = hannan_rissanen(
resid,
ar_order=spec.ar_order,
ma_order=spec.ma_order,
demean=False,
**arma_estimator_kwargs,
)
else:
# For later iterations, use a "warm start" for parameter estimates
# (speeds up estimation and convergence)
start_params = (
None
if i == 1
else np.r_[
ar_params, ma_params, seasonal_ar_params, seasonal_ma_params, sigma2
]
)
# Note: in each case, we do not pass in the order of integration
# since we have already differenced the series
tmp_order = (spec.order[0], 0, spec.order[2])
tmp_seasonal_order = (
spec.seasonal_order[0],
0,
spec.seasonal_order[2],
spec.seasonal_order[3],
)
if arma_estimator == "innovations_mle":
p_arma, res_arma = innovations_mle(
resid,
order=tmp_order,
seasonal_order=tmp_seasonal_order,
demean=False,
start_params=start_params,
**arma_estimator_kwargs,
)
else:
p_arma, res_arma = statespace(
resid,
order=tmp_order,
seasonal_order=tmp_seasonal_order,
include_constant=False,
start_params=start_params,
**arma_estimator_kwargs,
)
ar_params = p_arma.ar_params
seasonal_ar_params = p_arma.seasonal_ar_params
ma_params = p_arma.ma_params
seasonal_ma_params = p_arma.seasonal_ma_params
sigma2 = p_arma.sigma2
arma_results.append(res_arma)
# Step 3: GLS
# Compute transformed variables that satisfy OLS assumptions
# Note: In section 6.1.1 of Brockwell and Davis (2016), these
# transformations are developed as computed by left multiplcation
# by a matrix T. However, explicitly constructing T and then
# performing the left-multiplications does not scale well when nobs is
# large. Instead, we can retrieve the transformed variables as the
# residuals of the innovations algorithm (the `normalize=True`
# argument applies a Prais-Winsten-type normalization to the first few
# observations to ensure homoskedasticity). Brockwell and Davis
# mention that they also take this approach in practice.
tmp, _ = arma_innovations.arma_innovations(
augmented, ar_params=ar_params, ma_params=ma_params, normalize=True
)
u = tmp[:, 0]
x = tmp[:, 1:]
# OLS on transformed variables
mod_gls = OLS(u, x)
res_gls = mod_gls.fit()
exog_params = res_gls.params
resid = endog - np.dot(exog, exog_params)
# Construct the parameter vector for the iteration
p = SARIMAXParams(spec=spec)
p.exog_params = exog_params
if spec.max_ar_order > 0:
p.ar_params = ar_params
if spec.max_seasonal_ar_order > 0:
p.seasonal_ar_params = seasonal_ar_params
if spec.max_ma_order > 0:
p.ma_params = ma_params
if spec.max_seasonal_ma_order > 0:
p.seasonal_ma_params = seasonal_ma_params
p.sigma2 = sigma2
parameters.append(p)
# Check for convergence
difference = np.abs(exog_params - prev)
differences.append(difference)
if n_iter is None and np.all(difference < tolerance):
converged = True
break
else:
if n_iter is None:
warnings.warn(
"Feasible GLS failed to converge in %d iterations."
" Consider increasing the maximum number of"
" iterations using the `max_iter` argument or"
" reducing the required tolerance using the"
" `tolerance` argument." % max_iter
)
# Construct final results
p = parameters[-1]
other_results = Bunch(
{
"spec": spec,
"params": parameters,
"converged": converged,
"differences": differences,
"iterations": i,
"arma_estimator": arma_estimator,
"arma_estimator_kwargs": arma_estimator_kwargs,
"arma_results": arma_results,
}
)
return p, other_results
|
https://github.com/statsmodels/statsmodels/issues/6540
|
/my_path/test.py
/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/tsa/innovations/arma_innovations.py:83: RuntimeWarning: invalid value encountered in sqrt
v05 = v**0.5
Traceback (most recent call last):
File "/my_path/test.py", line 19, in <module>
cov_type="none")
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/tsa/arima/model.py", line 307, in fit
**gls_kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/tsa/arima/estimators/gls.py", line 251, in gls
mod_gls = OLS(u, x)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/regression/linear_model.py", line 858, in __init__
hasconst=hasconst, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/regression/linear_model.py", line 701, in __init__
weights=weights, hasconst=hasconst, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/regression/linear_model.py", line 189, in __init__
super(RegressionModel, self).__init__(endog, exog, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/model.py", line 236, in __init__
super(LikelihoodModel, self).__init__(endog, exog, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/model.py", line 77, in __init__
**kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/model.py", line 100, in _handle_data
data = handle_data(endog, exog, missing, hasconst, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/data.py", line 667, in handle_data
**kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/data.py", line 83, in __init__
self._handle_constant(hasconst)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/data.py", line 129, in _handle_constant
raise MissingDataError('exog contains inf or nans')
statsmodels.tools.sm_exceptions.MissingDataError: exog contains inf or nans
|
statsmodels.tools.sm_exceptions.MissingDataError
|
def arma_acovf(ar, ma, nobs=10, sigma2=1, dtype=None):
"""
Theoretical autocovariances of stationary ARMA processes
Parameters
----------
ar : array_like, 1d
The coefficients for autoregressive lag polynomial, including zero lag.
ma : array_like, 1d
The coefficients for moving-average lag polynomial, including zero lag.
nobs : int
The number of terms (lags plus zero lag) to include in returned acovf.
sigma2 : float
Variance of the innovation term.
Returns
-------
ndarray
The autocovariance of ARMA process given by ar, ma.
See Also
--------
arma_acf : Autocorrelation function for ARMA processes.
acovf : Sample autocovariance estimation.
References
----------
.. [*] Brockwell, Peter J., and Richard A. Davis. 2009. Time Series:
Theory and Methods. 2nd ed. 1991. New York, NY: Springer.
"""
if dtype is None:
dtype = np.common_type(np.array(ar), np.array(ma), np.array(sigma2))
p = len(ar) - 1
q = len(ma) - 1
m = max(p, q) + 1
if sigma2.real < 0:
raise ValueError("Must have positive innovation variance.")
# Short-circuit for trivial corner-case
if p == q == 0:
out = np.zeros(nobs, dtype=dtype)
out[0] = sigma2
return out
elif p > 0 and np.max(np.abs(np.roots(ar))) >= 1:
raise ValueError(NONSTATIONARY_ERROR)
# Get the moving average representation coefficients that we need
ma_coeffs = arma2ma(ar, ma, lags=m)
# Solve for the first m autocovariances via the linear system
# described by (BD, eq. 3.3.8)
A = np.zeros((m, m), dtype=dtype)
b = np.zeros((m, 1), dtype=dtype)
# We need a zero-right-padded version of ar params
tmp_ar = np.zeros(m, dtype=dtype)
tmp_ar[: p + 1] = ar
for k in range(m):
A[k, : (k + 1)] = tmp_ar[: (k + 1)][::-1]
A[k, 1 : m - k] += tmp_ar[(k + 1) : m]
b[k] = sigma2 * np.dot(ma[k : q + 1], ma_coeffs[: max((q + 1 - k), 0)])
acovf = np.zeros(max(nobs, m), dtype=dtype)
try:
acovf[:m] = np.linalg.solve(A, b)[:, 0]
except np.linalg.LinAlgError:
raise ValueError(NONSTATIONARY_ERROR)
# Iteratively apply (BD, eq. 3.3.9) to solve for remaining autocovariances
if nobs > m:
zi = signal.lfiltic([1], ar, acovf[:m:][::-1])
acovf[m:] = signal.lfilter([1], ar, np.zeros(nobs - m, dtype=dtype), zi=zi)[0]
return acovf[:nobs]
|
def arma_acovf(ar, ma, nobs=10, sigma2=1, dtype=None):
"""
Theoretical autocovariance function of ARMA process.
Parameters
----------
ar : array_like, 1d
The coefficients for autoregressive lag polynomial, including zero lag.
ma : array_like, 1d
The coefficients for moving-average lag polynomial, including zero lag.
nobs : int
The number of terms (lags plus zero lag) to include in returned acovf.
sigma2 : float
Variance of the innovation term.
Returns
-------
ndarray
The autocovariance of ARMA process given by ar, ma.
See Also
--------
arma_acf : Autocorrelation function for ARMA processes.
acovf : Sample autocovariance estimation.
References
----------
.. [*] Brockwell, Peter J., and Richard A. Davis. 2009. Time Series:
Theory and Methods. 2nd ed. 1991. New York, NY: Springer.
"""
if dtype is None:
dtype = np.common_type(np.array(ar), np.array(ma), np.array(sigma2))
p = len(ar) - 1
q = len(ma) - 1
m = max(p, q) + 1
if sigma2.real < 0:
raise ValueError("Must have positive innovation variance.")
# Short-circuit for trivial corner-case
if p == q == 0:
out = np.zeros(nobs, dtype=dtype)
out[0] = sigma2
return out
# Get the moving average representation coefficients that we need
ma_coeffs = arma2ma(ar, ma, lags=m)
# Solve for the first m autocovariances via the linear system
# described by (BD, eq. 3.3.8)
A = np.zeros((m, m), dtype=dtype)
b = np.zeros((m, 1), dtype=dtype)
# We need a zero-right-padded version of ar params
tmp_ar = np.zeros(m, dtype=dtype)
tmp_ar[: p + 1] = ar
for k in range(m):
A[k, : (k + 1)] = tmp_ar[: (k + 1)][::-1]
A[k, 1 : m - k] += tmp_ar[(k + 1) : m]
b[k] = sigma2 * np.dot(ma[k : q + 1], ma_coeffs[: max((q + 1 - k), 0)])
acovf = np.zeros(max(nobs, m), dtype=dtype)
acovf[:m] = np.linalg.solve(A, b)[:, 0]
# Iteratively apply (BD, eq. 3.3.9) to solve for remaining autocovariances
if nobs > m:
zi = signal.lfiltic([1], ar, acovf[:m:][::-1])
acovf[m:] = signal.lfilter([1], ar, np.zeros(nobs - m, dtype=dtype), zi=zi)[0]
return acovf[:nobs]
|
https://github.com/statsmodels/statsmodels/issues/6540
|
/my_path/test.py
/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/tsa/innovations/arma_innovations.py:83: RuntimeWarning: invalid value encountered in sqrt
v05 = v**0.5
Traceback (most recent call last):
File "/my_path/test.py", line 19, in <module>
cov_type="none")
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/tsa/arima/model.py", line 307, in fit
**gls_kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/tsa/arima/estimators/gls.py", line 251, in gls
mod_gls = OLS(u, x)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/regression/linear_model.py", line 858, in __init__
hasconst=hasconst, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/regression/linear_model.py", line 701, in __init__
weights=weights, hasconst=hasconst, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/regression/linear_model.py", line 189, in __init__
super(RegressionModel, self).__init__(endog, exog, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/model.py", line 236, in __init__
super(LikelihoodModel, self).__init__(endog, exog, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/model.py", line 77, in __init__
**kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/model.py", line 100, in _handle_data
data = handle_data(endog, exog, missing, hasconst, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/data.py", line 667, in handle_data
**kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/data.py", line 83, in __init__
self._handle_constant(hasconst)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/data.py", line 129, in _handle_constant
raise MissingDataError('exog contains inf or nans')
statsmodels.tools.sm_exceptions.MissingDataError: exog contains inf or nans
|
statsmodels.tools.sm_exceptions.MissingDataError
|
def arma_innovations(
endog, ar_params=None, ma_params=None, sigma2=1, normalize=False, prefix=None
):
"""
Compute innovations using a given ARMA process.
Parameters
----------
endog : ndarray
The observed time-series process, may be univariate or multivariate.
ar_params : ndarray, optional
Autoregressive parameters.
ma_params : ndarray, optional
Moving average parameters.
sigma2 : ndarray, optional
The ARMA innovation variance. Default is 1.
normalize : bool, optional
Whether or not to normalize the returned innovations. Default is False.
prefix : str, optional
The BLAS prefix associated with the datatype. Default is to find the
best datatype based on given input. This argument is typically only
used internally.
Returns
-------
innovations : ndarray
Innovations (one-step-ahead prediction errors) for the given `endog`
series with predictions based on the given ARMA process. If
`normalize=True`, then the returned innovations have been "whitened" by
dividing through by the square root of the mean square error.
innovations_mse : ndarray
Mean square error for the innovations.
"""
# Parameters
endog = np.array(endog)
squeezed = endog.ndim == 1
if squeezed:
endog = endog[:, None]
ar_params = np.atleast_1d([] if ar_params is None else ar_params)
ma_params = np.atleast_1d([] if ma_params is None else ma_params)
nobs, k_endog = endog.shape
ar = np.r_[1, -ar_params]
ma = np.r_[1, ma_params]
# Get BLAS prefix
if prefix is None:
prefix, dtype, _ = find_best_blas_type(
[endog, ar_params, ma_params, np.array(sigma2)]
)
dtype = prefix_dtype_map[prefix]
# Make arrays contiguous for BLAS calls
endog = np.asfortranarray(endog, dtype=dtype)
ar_params = np.asfortranarray(ar_params, dtype=dtype)
ma_params = np.asfortranarray(ma_params, dtype=dtype)
sigma2 = dtype(sigma2).item()
# Get the appropriate functions
arma_transformed_acovf_fast = getattr(
_arma_innovations, prefix + "arma_transformed_acovf_fast"
)
arma_innovations_algo_fast = getattr(
_arma_innovations, prefix + "arma_innovations_algo_fast"
)
arma_innovations_filter = getattr(
_arma_innovations, prefix + "arma_innovations_filter"
)
# Run the innovations algorithm for ARMA coefficients
arma_acovf = arima_process.arma_acovf(ar, ma, sigma2=sigma2, nobs=nobs) / sigma2
acovf, acovf2 = arma_transformed_acovf_fast(ar, ma, arma_acovf)
theta, v = arma_innovations_algo_fast(nobs, ar_params, ma_params, acovf, acovf2)
v = np.array(v)
if np.any(v < 0) or not np.isfinite(theta).all() or not np.isfinite(v).all():
# This is defensive code that is hard to hit
raise ValueError(NON_STATIONARY_ERROR)
# Run the innovations filter across each series
u = []
for i in range(k_endog):
u_i = np.array(
arma_innovations_filter(endog[:, i], ar_params, ma_params, theta)
)
u.append(u_i)
u = np.vstack(u).T
if normalize:
u /= v[:, None] ** 0.5
# Post-processing
if squeezed:
u = u.squeeze()
return u, v
|
def arma_innovations(
endog, ar_params=None, ma_params=None, sigma2=1, normalize=False, prefix=None
):
"""
Compute innovations using a given ARMA process.
Parameters
----------
endog : ndarray
The observed time-series process, may be univariate or multivariate.
ar_params : ndarray, optional
Autoregressive parameters.
ma_params : ndarray, optional
Moving average parameters.
sigma2 : ndarray, optional
The ARMA innovation variance. Default is 1.
normalize : bool, optional
Whether or not to normalize the returned innovations. Default is False.
prefix : str, optional
The BLAS prefix associated with the datatype. Default is to find the
best datatype based on given input. This argument is typically only
used internally.
Returns
-------
innovations : ndarray
Innovations (one-step-ahead prediction errors) for the given `endog`
series with predictions based on the given ARMA process. If
`normalize=True`, then the returned innovations have been "whitened" by
dividing through by the square root of the mean square error.
innovations_mse : ndarray
Mean square error for the innovations.
"""
# Parameters
endog = np.array(endog)
squeezed = endog.ndim == 1
if squeezed:
endog = endog[:, None]
ar_params = np.atleast_1d([] if ar_params is None else ar_params)
ma_params = np.atleast_1d([] if ma_params is None else ma_params)
nobs, k_endog = endog.shape
ar = np.r_[1, -ar_params]
ma = np.r_[1, ma_params]
# Get BLAS prefix
if prefix is None:
prefix, dtype, _ = find_best_blas_type(
[endog, ar_params, ma_params, np.array(sigma2)]
)
dtype = prefix_dtype_map[prefix]
# Make arrays contiguous for BLAS calls
endog = np.asfortranarray(endog, dtype=dtype)
ar_params = np.asfortranarray(ar_params, dtype=dtype)
ma_params = np.asfortranarray(ma_params, dtype=dtype)
sigma2 = dtype(sigma2).item()
# Get the appropriate functions
arma_transformed_acovf_fast = getattr(
_arma_innovations, prefix + "arma_transformed_acovf_fast"
)
arma_innovations_algo_fast = getattr(
_arma_innovations, prefix + "arma_innovations_algo_fast"
)
arma_innovations_filter = getattr(
_arma_innovations, prefix + "arma_innovations_filter"
)
# Run the innovations algorithm for ARMA coefficients
arma_acovf = arima_process.arma_acovf(ar, ma, sigma2=sigma2, nobs=nobs) / sigma2
acovf, acovf2 = arma_transformed_acovf_fast(ar, ma, arma_acovf)
theta, v = arma_innovations_algo_fast(nobs, ar_params, ma_params, acovf, acovf2)
v = np.array(v)
if normalize:
v05 = v**0.5
# Run the innovations filter across each series
u = []
for i in range(k_endog):
u_i = np.array(
arma_innovations_filter(endog[:, i], ar_params, ma_params, theta)
)
u.append(u_i / v05 if normalize else u_i)
u = np.vstack(u).T
# Post-processing
if squeezed:
u = u.squeeze()
return u, v
|
https://github.com/statsmodels/statsmodels/issues/6540
|
/my_path/test.py
/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/tsa/innovations/arma_innovations.py:83: RuntimeWarning: invalid value encountered in sqrt
v05 = v**0.5
Traceback (most recent call last):
File "/my_path/test.py", line 19, in <module>
cov_type="none")
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/tsa/arima/model.py", line 307, in fit
**gls_kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/tsa/arima/estimators/gls.py", line 251, in gls
mod_gls = OLS(u, x)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/regression/linear_model.py", line 858, in __init__
hasconst=hasconst, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/regression/linear_model.py", line 701, in __init__
weights=weights, hasconst=hasconst, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/regression/linear_model.py", line 189, in __init__
super(RegressionModel, self).__init__(endog, exog, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/model.py", line 236, in __init__
super(LikelihoodModel, self).__init__(endog, exog, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/model.py", line 77, in __init__
**kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/model.py", line 100, in _handle_data
data = handle_data(endog, exog, missing, hasconst, **kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/data.py", line 667, in handle_data
**kwargs)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/data.py", line 83, in __init__
self._handle_constant(hasconst)
File "/my_path2/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/base/data.py", line 129, in _handle_constant
raise MissingDataError('exog contains inf or nans')
statsmodels.tools.sm_exceptions.MissingDataError: exog contains inf or nans
|
statsmodels.tools.sm_exceptions.MissingDataError
|
def _select_sigma(x, percentile=25):
"""
Returns the smaller of std(X, ddof=1) or normalized IQR(X) over axis 0.
References
----------
Silverman (1986) p.47
"""
# normalize = norm.ppf(.75) - norm.ppf(.25)
normalize = 1.349
IQR = (scoreatpercentile(x, 75) - scoreatpercentile(x, 25)) / normalize
std_dev = np.std(x, axis=0, ddof=1)
if IQR > 0:
return np.minimum(std_dev, IQR)
else:
return std_dev
|
def _select_sigma(X):
"""
Returns the smaller of std(X, ddof=1) or normalized IQR(X) over axis 0.
References
----------
Silverman (1986) p.47
"""
# normalize = norm.ppf(.75) - norm.ppf(.25)
normalize = 1.349
# IQR = np.subtract.reduce(percentile(X, [75,25],
# axis=axis), axis=axis)/normalize
IQR = (sap(X, 75) - sap(X, 25)) / normalize
return np.minimum(np.std(X, axis=0, ddof=1), IQR)
|
https://github.com/statsmodels/statsmodels/issues/6679
|
Numpy: 1.18.3
Statsmodels: 0.11.1
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/statsmodels/nonparametric/kde.py", line 451, in kdensityfft
bw = float(bw)
ValueError: could not convert string to float: 'normal_reference'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "repr.py", line 11, in <module>
kde.fit()
File "/usr/local/lib/python3.7/site-packages/statsmodels/nonparametric/kde.py", line 140, in fit
clip=clip, cut=cut)
File "/usr/local/lib/python3.7/site-packages/statsmodels/nonparametric/kde.py", line 453, in kdensityfft
bw = bandwidths.select_bandwidth(X, bw, kern) # will cross-val fit this pattern?
File "/usr/local/lib/python3.7/site-packages/statsmodels/nonparametric/bandwidths.py", line 174, in select_bandwidth
raise RuntimeError(err)
RuntimeError: Selected KDE bandwidth is 0. Cannot estimate density.
|
ValueError
|
def select_bandwidth(x, bw, kernel):
"""
Selects bandwidth for a selection rule bw
this is a wrapper around existing bandwidth selection rules
Parameters
----------
x : array_like
Array for which to get the bandwidth
bw : str
name of bandwidth selection rule, currently supported are:
%s
kernel : not used yet
Returns
-------
bw : float
The estimate of the bandwidth
"""
bw = bw.lower()
if bw not in bandwidth_funcs:
raise ValueError("Bandwidth %s not understood" % bw)
bandwidth = bandwidth_funcs[bw](x, kernel)
if np.any(bandwidth == 0):
# eventually this can fall back on another selection criterion.
err = (
"Selected KDE bandwidth is 0. Cannot estimate density. "
"Either provide the bandwidth during initialization or use "
"an alternative method."
)
raise RuntimeError(err)
else:
return bandwidth
|
def select_bandwidth(x, bw, kernel):
"""
Selects bandwidth for a selection rule bw
this is a wrapper around existing bandwidth selection rules
Parameters
----------
x : array_like
Array for which to get the bandwidth
bw : str
name of bandwidth selection rule, currently supported are:
%s
kernel : not used yet
Returns
-------
bw : float
The estimate of the bandwidth
"""
bw = bw.lower()
if bw not in bandwidth_funcs:
raise ValueError("Bandwidth %s not understood" % bw)
bandwidth = bandwidth_funcs[bw](x, kernel)
if np.any(bandwidth == 0):
# eventually this can fall back on another selection criterion.
err = "Selected KDE bandwidth is 0. Cannot estimate density."
raise RuntimeError(err)
else:
return bandwidth
|
https://github.com/statsmodels/statsmodels/issues/6679
|
Numpy: 1.18.3
Statsmodels: 0.11.1
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/statsmodels/nonparametric/kde.py", line 451, in kdensityfft
bw = float(bw)
ValueError: could not convert string to float: 'normal_reference'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "repr.py", line 11, in <module>
kde.fit()
File "/usr/local/lib/python3.7/site-packages/statsmodels/nonparametric/kde.py", line 140, in fit
clip=clip, cut=cut)
File "/usr/local/lib/python3.7/site-packages/statsmodels/nonparametric/kde.py", line 453, in kdensityfft
bw = bandwidths.select_bandwidth(X, bw, kern) # will cross-val fit this pattern?
File "/usr/local/lib/python3.7/site-packages/statsmodels/nonparametric/bandwidths.py", line 174, in select_bandwidth
raise RuntimeError(err)
RuntimeError: Selected KDE bandwidth is 0. Cannot estimate density.
|
ValueError
|
def __init__(self, endog):
self.endog = array_like(endog, "endog", ndim=1, contiguous=True)
|
def __init__(self, endog):
self.endog = np.asarray(endog)
|
https://github.com/statsmodels/statsmodels/issues/1915
|
Traceback (most recent call last):
File "./statsmodels_01.py", line 40, in <module>
kde_.fit()
File "/home/kian/.local/lib/python2.7/site-packages/statsmodels/nonparametric/kde.py", line 142, in fit
clip=clip, cut=cut)
File "/home/kian/.local/lib/python2.7/site-packages/statsmodels/nonparametric/kde.py", line 484, in kdensityfft
binned = fast_linbin(X,a,b,gridsize)/(delta*nobs)
File "linbin.pyx", line 17, in statsmodels.nonparametric.linbin.fast_linbin (statsmodels/nonparametric/linbin.c:1246)
ValueError: Buffer dtype mismatch, expected 'DOUBLE' but got 'long'
|
ValueError
|
def __init__(self, x, kernel=None):
x = array_like(x, "x", maxdim=2, contiguous=True)
if x.ndim == 1:
x = x[:, None]
nobs, n_series = x.shape
if kernel is None:
kernel = kernels.Gaussian() # no meaningful bandwidth yet
if n_series > 1:
if isinstance(kernel, kernels.CustomKernel):
kernel = kernels.NdKernel(n_series, kernels=kernel)
self.kernel = kernel
self.n = n_series # TODO change attribute
self.x = x
|
def __init__(self, x, kernel=None):
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
nobs, n_series = x.shape
if kernel is None:
kernel = kernels.Gaussian() # no meaningful bandwidth yet
if n_series > 1:
if isinstance(kernel, kernels.CustomKernel):
kernel = kernels.NdKernel(n_series, kernels=kernel)
self.kernel = kernel
self.n = n_series # TODO change attribute
self.x = x
|
https://github.com/statsmodels/statsmodels/issues/1915
|
Traceback (most recent call last):
File "./statsmodels_01.py", line 40, in <module>
kde_.fit()
File "/home/kian/.local/lib/python2.7/site-packages/statsmodels/nonparametric/kde.py", line 142, in fit
clip=clip, cut=cut)
File "/home/kian/.local/lib/python2.7/site-packages/statsmodels/nonparametric/kde.py", line 484, in kdensityfft
binned = fast_linbin(X,a,b,gridsize)/(delta*nobs)
File "linbin.pyx", line 17, in statsmodels.nonparametric.linbin.fast_linbin (statsmodels/nonparametric/linbin.c:1246)
ValueError: Buffer dtype mismatch, expected 'DOUBLE' but got 'long'
|
ValueError
|
def conf_int(self, method="endpoint", alpha=0.05, **kwds):
# TODO: this performs metadata wrapping, and that should be handled
# by attach_* methods. However, they do not currently support
# this use case.
conf_int = super(PredictionResults, self).conf_int(method, alpha, **kwds)
# Create a dataframe
if self.row_labels is not None:
conf_int = pd.DataFrame(conf_int, index=self.row_labels)
# Attach the endog names
ynames = self.model.data.ynames
if not type(ynames) == list:
ynames = [ynames]
names = ["lower {0}".format(name) for name in ynames] + [
"upper {0}".format(name) for name in ynames
]
conf_int.columns = names
return conf_int
|
def conf_int(self, method="endpoint", alpha=0.05, **kwds):
# TODO: this performs metadata wrapping, and that should be handled
# by attach_* methods. However, they do not currently support
# this use case.
conf_int = super(PredictionResults, self).conf_int(method, alpha, **kwds)
# Create a dataframe
if self.row_labels is not None:
conf_int = pd.DataFrame(conf_int, index=self.row_labels)
# Attach the endog names
ynames = self.model.data.ynames
if not type(ynames) == list:
ynames = [ynames]
names = ["lower %s" % name for name in ynames] + [
"upper %s" % name for name in ynames
]
conf_int.columns = names
return conf_int
|
https://github.com/statsmodels/statsmodels/issues/6296
|
Traceback (most recent call last):
File "/home/user/Projects/subfolder/my_project/test1.py", line 42, in <module>
pred_ci = pred_res.conf_int(alpha=0.01)
File "/home/user/Programs/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/tsa/statespace/mlemodel.py", line 3707, in conf_int
names = (['lower %s' % name for name in ynames] +
File "/home/user/Programs/anaconda3/envs/vision/lib/python3.7/site-packages/statsmodels/tsa/statespace/mlemodel.py", line 3707, in <listcomp>
names = (['lower %s' % name for name in ynames] +
TypeError: not all arguments converted during string formatting
|
TypeError
|
def __init__(
self,
endog,
exog=None,
order=(0, 0, 0),
seasonal_order=(0, 0, 0, 0),
trend=None,
enforce_stationarity=True,
enforce_invertibility=True,
concentrate_scale=False,
dates=None,
freq=None,
missing="none",
):
# Default for trend
# 'c' if there is no integration and 'n' otherwise
# TODO: if trend='c', then we could alternatively use `demean=True` in
# the estimation methods rather than setting up `exog` and using GLS.
# Not sure if it's worth the trouble though.
integrated = order[1] > 0 or seasonal_order[1] > 0
if trend is None and not integrated:
trend = "c"
elif trend is None:
trend = "n"
# Construct the specification
# (don't pass specific values of enforce stationarity/invertibility,
# because we don't actually want to restrict the estimators based on
# this criteria. Instead, we'll just make sure that the parameter
# estimates from those methods satisfy the criteria.)
self._spec_arima = SARIMAXSpecification(
endog,
exog=exog,
order=order,
seasonal_order=seasonal_order,
trend=trend,
enforce_stationarity=None,
enforce_invertibility=None,
concentrate_scale=concentrate_scale,
dates=dates,
freq=freq,
missing=missing,
)
exog = self._spec_arima._model.data.orig_exog
# Initialize the base SARIMAX class
# Note: we don't pass in a trend value to the base class, since ARIMA
# standardizes the trend to always be part of exog, while the base
# SARIMAX class puts it in the transition equation.
super(ARIMA, self).__init__(
endog,
exog,
order=order,
seasonal_order=seasonal_order,
enforce_stationarity=enforce_stationarity,
enforce_invertibility=enforce_invertibility,
concentrate_scale=concentrate_scale,
dates=dates,
freq=freq,
missing=missing,
)
|
def __init__(
self,
endog,
exog=None,
order=(0, 0, 0),
seasonal_order=(0, 0, 0, 0),
trend=None,
enforce_stationarity=True,
enforce_invertibility=True,
concentrate_scale=False,
dates=None,
freq=None,
missing="none",
):
# Default for trend
# 'c' if there is no integration and 'n' otherwise
# TODO: if trend='c', then we could alternatively use `demean=True` in
# the estimation methods rather than setting up `exog` and using GLS.
# Not sure if it's worth the trouble though.
integrated = order[1] > 0 or seasonal_order[1] > 0
if trend is None and not integrated:
trend = "c"
elif trend is None:
trend = "n"
# Construct the specification
# (don't pass specific values of enforce stationarity/invertibility,
# because we don't actually want to restrict the estimators based on
# this criteria. Instead, we'll just make sure that the parameter
# estimates from those methods satisfy the criteria.)
self._spec = SARIMAXSpecification(
endog,
exog=exog,
order=order,
seasonal_order=seasonal_order,
trend=trend,
enforce_stationarity=None,
enforce_invertibility=None,
concentrate_scale=concentrate_scale,
dates=dates,
freq=freq,
missing=missing,
)
exog = self._spec._model.data.orig_exog
# Initialize the base SARIMAX class
super(ARIMA, self).__init__(
endog,
exog,
order=order,
seasonal_order=seasonal_order,
enforce_stationarity=enforce_stationarity,
enforce_invertibility=enforce_invertibility,
concentrate_scale=concentrate_scale,
dates=dates,
freq=freq,
missing=missing,
)
|
https://github.com/statsmodels/statsmodels/issues/6244
|
c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py:914: RuntimeWarning: overflow encountered in square
params_variance = (residuals[k_params_ma:]**2).mean()
c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py:976: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
warn('Non-stationary starting autoregressive parameters'
c:\git\statsmodels\statsmodels\tsa\statespace\tools.py:487: RuntimeWarning: invalid value encountered in double_scalars
y[k-1, i] = (y[k, i] - y[k, k]*y[k, k-i-1]) / (1 - y[k, k]**2)
c:\git\statsmodels\statsmodels\tsa\statespace\tools.py:489: RuntimeWarning: invalid value encountered in sqrt
x = r / ((1 - r**2)**0.5)
Traceback (most recent call last):
File "C:\Anaconda\lib\site-packages\IPython\core\interactiveshell.py", line 3326, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-8-a7c5e4baeb4f>", line 10, in <module>
res = mod.fit()
File "c:\git\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 605, in fit
start_params = self.untransform_params(start_params)
File "c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py", line 1495, in untransform_params
unconstrained[start] = constrained[start]**0.5
IndexError: index 6 is out of bounds for axis 0 with size 4
|
IndexError
|
def fit(
self,
start_params=None,
transformed=True,
includes_fixed=False,
method=None,
method_kwargs=None,
gls=None,
gls_kwargs=None,
cov_type=None,
cov_kwds=None,
return_params=False,
low_memory=False,
):
"""
Fit (estimate) the parameters of the model.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
transformed : bool, optional
Whether or not `start_params` is already transformed. Default is
True.
includes_fixed : bool, optional
If parameters were previously fixed with the `fix_params` method,
this argument describes whether or not `start_params` also includes
the fixed parameters, in addition to the free parameters. Default
is False.
method : str, optional
The method used for estimating the parameters of the model. Valid
options include 'statespace', 'innovations_mle', 'hannan_rissanen',
'burg', 'innovations', and 'yule_walker'. Not all options are
available for every specification (for example 'yule_walker' can
only be used with AR(p) models).
method_kwargs : dict, optional
Arguments to pass to the fit function for the parameter estimator
described by the `method` argument.
gls : bool, optional
Whether or not to use generalied least squares (GLS) to estimate
regression effects. The default is False if `method='statespace'`
and is True otherwise.
gls_kwargs : dict, optional
Arguments to pass to the GLS etimation fit method. Only applicable
if GLS estimation is used (see `gls` argument for details).
cov_type : str, optional
The `cov_type` keyword governs the method for calculating the
covariance matrix of parameter estimates. Can be one of:
- 'opg' for the outer product of gradient estimator
- 'oim' for the observed information matrix estimator, calculated
using the method of Harvey (1989)
- 'approx' for the observed information matrix estimator,
calculated using a numerical approximation of the Hessian matrix.
- 'robust' for an approximate (quasi-maximum likelihood) covariance
matrix that may be valid even in the presence of some
misspecifications. Intermediate calculations use the 'oim'
method.
- 'robust_approx' is the same as 'robust' except that the
intermediate calculations use the 'approx' method.
- 'none' for no covariance matrix calculation.
Default is 'opg' unless memory conservation is used to avoid
computing the loglikelihood values for each observation, in which
case the default is 'oim'.
cov_kwds : dict or None, optional
A dictionary of arguments affecting covariance matrix computation.
**opg, oim, approx, robust, robust_approx**
- 'approx_complex_step' : bool, optional - If True, numerical
approximations are computed using complex-step methods. If False,
numerical approximations are computed using finite difference
methods. Default is True.
- 'approx_centered' : bool, optional - If True, numerical
approximations computed using finite difference methods use a
centered approximation. Default is False.
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
low_memory : bool, optional
If set to True, techniques are applied to substantially reduce
memory usage. If used, some features of the results object will
not be available (including smoothed results and in-sample
prediction), although out-of-sample forecasting is possible.
Default is False.
Returns
-------
ARIMAResults
Examples
--------
>>> mod = sm.tsa.arima.ARIMA(endog, order=(1, 0, 0))
>>> res = mod.fit()
>>> print(res.summary())
"""
# Determine which method to use
# 1. If method is specified, make sure it is valid
if method is not None:
self._spec_arima.validate_estimator(method)
# 2. Otherwise, use state space
# TODO: may want to consider using innovations (MLE) if possible here,
# (since in some cases it may be faster than state space), but it is
# less tested.
else:
method = "statespace"
# Can only use fixed parameters with method='statespace'
if self._has_fixed_params and method != "statespace":
raise ValueError(
"When parameters have been fixed, only the method"
' "statespace" can be used; got "%s".' % method
)
# Handle kwargs related to the fit method
if method_kwargs is None:
method_kwargs = {}
required_kwargs = []
if method == "statespace":
required_kwargs = [
"enforce_stationarity",
"enforce_invertibility",
"concentrate_scale",
]
elif method == "innovations_mle":
required_kwargs = ["enforce_invertibility"]
for name in required_kwargs:
if name in method_kwargs:
raise ValueError(
'Cannot override model level value for "%s"'
' when method="%s".' % (name, method)
)
method_kwargs[name] = getattr(self, name)
# Handle kwargs related to GLS estimation
if gls_kwargs is None:
gls_kwargs = {}
# Handle starting parameters
# TODO: maybe should have standard way of computing starting
# parameters in this class?
if start_params is not None:
if method not in ["statespace", "innovations_mle"]:
raise ValueError(
'Estimation method "%s" does not use starting'
" parameters, but `start_params` argument was"
" given." % method
)
method_kwargs["start_params"] = start_params
method_kwargs["transformed"] = transformed
method_kwargs["includes_fixed"] = includes_fixed
# Perform estimation, depending on whether we have exog or not
p = None
fit_details = None
has_exog = self._spec_arima.exog is not None
if has_exog or method == "statespace":
# Use GLS if it was explicitly requested (`gls = True`) or if it
# was left at the default (`gls = None`) and the ARMA estimator is
# anything but statespace.
# Note: both GLS and statespace are able to handle models with
# integration, so we don't need to difference endog or exog here.
if has_exog and (gls or (gls is None and method != "statespace")):
p, fit_details = estimate_gls(
self.endog,
exog=self.exog,
order=self.order,
seasonal_order=self.seasonal_order,
include_constant=False,
arma_estimator=method,
arma_estimator_kwargs=method_kwargs,
**gls_kwargs,
)
elif method != "statespace":
raise ValueError(
"If `exog` is given and GLS is disabled"
" (`gls=False`), then the only valid"
" method is 'statespace'. Got '%s'." % method
)
else:
method_kwargs.setdefault("disp", 0)
res = super(ARIMA, self).fit(
return_params=return_params, low_memory=low_memory, **method_kwargs
)
if not return_params:
res.fit_details = res.mlefit
else:
# Handle differencing if we have an integrated model
# (these methods do not support handling integration internally,
# so we need to manually do the differencing)
endog = self.endog
order = self._spec_arima.order
seasonal_order = self._spec_arima.seasonal_order
if self._spec_arima.is_integrated:
warnings.warn(
"Provided `endog` series has been differenced"
" to eliminate integration prior to parameter"
' estimation by method "%s".' % method
)
endog = diff(
endog,
k_diff=self._spec_arima.diff,
k_seasonal_diff=self._spec_arima.seasonal_diff,
seasonal_periods=self._spec_arima.seasonal_periods,
)
if order[1] > 0:
order = (order[0], 0, order[2])
if seasonal_order[1] > 0:
seasonal_order = (
seasonal_order[0],
0,
seasonal_order[2],
seasonal_order[3],
)
# Now, estimate parameters
if method == "yule_walker":
p, fit_details = yule_walker(
endog, ar_order=order[0], demean=False, **method_kwargs
)
elif method == "burg":
p, fit_details = burg(
endog, ar_order=order[0], demean=False, **method_kwargs
)
elif method == "hannan_rissanen":
p, fit_details = hannan_rissanen(
endog,
ar_order=order[0],
ma_order=order[2],
demean=False,
**method_kwargs,
)
elif method == "innovations":
p, fit_details = innovations(
endog, ma_order=order[2], demean=False, **method_kwargs
)
# innovations computes estimates through the given order, so
# we want to take the estimate associated with the given order
p = p[-1]
elif method == "innovations_mle":
p, fit_details = innovations_mle(
endog,
order=order,
seasonal_order=seasonal_order,
demean=False,
**method_kwargs,
)
# In all cases except method='statespace', we now need to extract the
# parameters and, optionally, create a new results object
if p is not None:
# Need to check that fitted parameters satisfy given restrictions
if (
self.enforce_stationarity
and self._spec_arima.max_reduced_ar_order > 0
and not p.is_stationary
):
raise ValueError(
"Non-stationary autoregressive parameters"
" found with `enforce_stationarity=True`."
" Consider setting it to False or using a"
" different estimation method, such as"
' method="statespace".'
)
if (
self.enforce_invertibility
and self._spec_arima.max_reduced_ma_order > 0
and not p.is_invertible
):
raise ValueError(
"Non-invertible moving average parameters"
" found with `enforce_invertibility=True`."
" Consider setting it to False or using a"
" different estimation method, such as"
' method="statespace".'
)
# Build the requested results
if return_params:
res = p.params
else:
# Handle memory conservation option
if low_memory:
conserve_memory = self.ssm.conserve_memory
self.ssm.set_conserve_memory(MEMORY_CONSERVE)
# Perform filtering / smoothing
if (
self.ssm.memory_no_predicted
or self.ssm.memory_no_gain
or self.ssm.memory_no_smoothing
):
func = self.filter
else:
func = self.smooth
res = func(
p.params,
transformed=True,
includes_fixed=True,
cov_type=cov_type,
cov_kwds=cov_kwds,
)
# Save any details from the fit method
res.fit_details = fit_details
# Reset memory conservation
if low_memory:
self.ssm.set_conserve_memory(conserve_memory)
return res
|
def fit(
self,
start_params=None,
transformed=True,
includes_fixed=False,
method=None,
method_kwargs=None,
gls=None,
gls_kwargs=None,
cov_type=None,
cov_kwds=None,
return_params=False,
low_memory=False,
):
"""
Fit (estimate) the parameters of the model.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
transformed : bool, optional
Whether or not `start_params` is already transformed. Default is
True.
includes_fixed : bool, optional
If parameters were previously fixed with the `fix_params` method,
this argument describes whether or not `start_params` also includes
the fixed parameters, in addition to the free parameters. Default
is False.
method : str, optional
The method used for estimating the parameters of the model. Valid
options include 'statespace', 'innovations_mle', 'hannan_rissanen',
'burg', 'innovations', and 'yule_walker'. Not all options are
available for every specification (for example 'yule_walker' can
only be used with AR(p) models).
method_kwargs : dict, optional
Arguments to pass to the fit function for the parameter estimator
described by the `method` argument.
gls : bool, optional
Whether or not to use generalied least squares (GLS) to estimate
regression effects. The default is False if `method='statespace'`
and is True otherwise.
gls_kwargs : dict, optional
Arguments to pass to the GLS etimation fit method. Only applicable
if GLS estimation is used (see `gls` argument for details).
cov_type : str, optional
The `cov_type` keyword governs the method for calculating the
covariance matrix of parameter estimates. Can be one of:
- 'opg' for the outer product of gradient estimator
- 'oim' for the observed information matrix estimator, calculated
using the method of Harvey (1989)
- 'approx' for the observed information matrix estimator,
calculated using a numerical approximation of the Hessian matrix.
- 'robust' for an approximate (quasi-maximum likelihood) covariance
matrix that may be valid even in the presence of some
misspecifications. Intermediate calculations use the 'oim'
method.
- 'robust_approx' is the same as 'robust' except that the
intermediate calculations use the 'approx' method.
- 'none' for no covariance matrix calculation.
Default is 'opg' unless memory conservation is used to avoid
computing the loglikelihood values for each observation, in which
case the default is 'oim'.
cov_kwds : dict or None, optional
A dictionary of arguments affecting covariance matrix computation.
**opg, oim, approx, robust, robust_approx**
- 'approx_complex_step' : bool, optional - If True, numerical
approximations are computed using complex-step methods. If False,
numerical approximations are computed using finite difference
methods. Default is True.
- 'approx_centered' : bool, optional - If True, numerical
approximations computed using finite difference methods use a
centered approximation. Default is False.
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
low_memory : bool, optional
If set to True, techniques are applied to substantially reduce
memory usage. If used, some features of the results object will
not be available (including smoothed results and in-sample
prediction), although out-of-sample forecasting is possible.
Default is False.
Returns
-------
ARIMAResults
Examples
--------
>>> mod = sm.tsa.arima.ARIMA(endog, order=(1, 0, 0))
>>> res = mod.fit()
>>> print(res.summary())
"""
# Determine which method to use
# 1. If method is specified, make sure it is valid
if method is not None:
self._spec.validate_estimator(method)
# 2. Otherwise, use state space
# TODO: may want to consider using innovations (MLE) if possible here,
# (since in some cases it may be faster than state space), but it is
# less tested.
else:
method = "statespace"
# Can only use fixed parameters with method='statespace'
if self._has_fixed_params and method != "statespace":
raise ValueError(
"When parameters have been fixed, only the method"
' "statespace" can be used; got "%s".' % method
)
# Handle kwargs related to the fit method
if method_kwargs is None:
method_kwargs = {}
required_kwargs = []
if method == "statespace":
required_kwargs = [
"enforce_stationarity",
"enforce_invertibility",
"concentrate_scale",
]
elif method == "innovations_mle":
required_kwargs = ["enforce_invertibility"]
for name in required_kwargs:
if name in method_kwargs:
raise ValueError(
'Cannot override model level value for "%s"'
' when method="%s".' % (name, method)
)
method_kwargs[name] = getattr(self, name)
# Handle kwargs related to GLS estimation
if gls_kwargs is None:
gls_kwargs = {}
# Handle starting parameters
# TODO: maybe should have standard way of computing starting
# parameters in this class?
if start_params is not None:
if method not in ["statespace", "innovations_mle"]:
raise ValueError(
'Estimation method "%s" does not use starting'
" parameters, but `start_params` argument was"
" given." % method
)
method_kwargs["start_params"] = start_params
method_kwargs["transformed"] = transformed
method_kwargs["includes_fixed"] = includes_fixed
# Perform estimation, depending on whether we have exog or not
p = None
fit_details = None
has_exog = self._spec.exog is not None
if has_exog or method == "statespace":
# Use GLS if it was explicitly requested (`gls = True`) or if it
# was left at the default (`gls = None`) and the ARMA estimator is
# anything but statespace.
# Note: both GLS and statespace are able to handle models with
# integration, so we don't need to difference endog or exog here.
if has_exog and (gls or (gls is None and method != "statespace")):
p, fit_details = estimate_gls(
self.endog,
exog=self.exog,
order=self.order,
seasonal_order=self.seasonal_order,
include_constant=False,
arma_estimator=method,
arma_estimator_kwargs=method_kwargs,
**gls_kwargs,
)
elif method != "statespace":
raise ValueError(
"If `exog` is given and GLS is disabled"
" (`gls=False`), then the only valid"
" method is 'statespace'. Got '%s'." % method
)
else:
method_kwargs.setdefault("disp", 0)
res = super(ARIMA, self).fit(
return_params=return_params, low_memory=low_memory, **method_kwargs
)
if not return_params:
res.fit_details = res.mlefit
else:
# Handle differencing if we have an integrated model
# (these methods do not support handling integration internally,
# so we need to manually do the differencing)
endog = self.endog
order = self._spec.order
seasonal_order = self._spec.seasonal_order
if self._spec.is_integrated:
warnings.warn(
"Provided `endog` series has been differenced"
" to eliminate integration prior to parameter"
' estimation by method "%s".' % method
)
endog = diff(
endog,
k_diff=self._spec.diff,
k_seasonal_diff=self._spec.seasonal_diff,
seasonal_periods=self._spec.seasonal_periods,
)
if order[1] > 0:
order = (order[0], 0, order[2])
if seasonal_order[1] > 0:
seasonal_order = (
seasonal_order[0],
0,
seasonal_order[2],
seasonal_order[3],
)
# Now, estimate parameters
if method == "yule_walker":
p, fit_details = yule_walker(
endog, ar_order=order[0], demean=False, **method_kwargs
)
elif method == "burg":
p, fit_details = burg(
endog, ar_order=order[0], demean=False, **method_kwargs
)
elif method == "hannan_rissanen":
p, fit_details = hannan_rissanen(
endog,
ar_order=order[0],
ma_order=order[2],
demean=False,
**method_kwargs,
)
elif method == "innovations":
p, fit_details = innovations(
endog, ma_order=order[2], demean=False, **method_kwargs
)
# innovations computes estimates through the given order, so
# we want to take the estimate associated with the given order
p = p[-1]
elif method == "innovations_mle":
p, fit_details = innovations_mle(
endog,
order=order,
seasonal_order=seasonal_order,
demean=False,
**method_kwargs,
)
# In all cases except method='statespace', we now need to extract the
# parameters and, optionally, create a new results object
if p is not None:
# Need to check that fitted parameters satisfy given restrictions
if (
self.enforce_stationarity
and self._spec.max_reduced_ar_order > 0
and not p.is_stationary
):
raise ValueError(
"Non-stationary autoregressive parameters"
" found with `enforce_stationarity=True`."
" Consider setting it to False or using a"
" different estimation method, such as"
' method="statespace".'
)
if (
self.enforce_invertibility
and self._spec.max_reduced_ma_order > 0
and not p.is_invertible
):
raise ValueError(
"Non-invertible moving average parameters"
" found with `enforce_invertibility=True`."
" Consider setting it to False or using a"
" different estimation method, such as"
' method="statespace".'
)
# Build the requested results
if return_params:
res = p.params
else:
# Handle memory conservation option
if low_memory:
conserve_memory = self.ssm.conserve_memory
self.ssm.set_conserve_memory(MEMORY_CONSERVE)
# Perform filtering / smoothing
if (
self.ssm.memory_no_predicted
or self.ssm.memory_no_gain
or self.ssm.memory_no_smoothing
):
func = self.filter
else:
func = self.smooth
res = func(
p.params,
transformed=True,
includes_fixed=True,
cov_type=cov_type,
cov_kwds=cov_kwds,
)
# Save any details from the fit method
res.fit_details = fit_details
# Reset memory conservation
if low_memory:
self.ssm.set_conserve_memory(conserve_memory)
return res
|
https://github.com/statsmodels/statsmodels/issues/6244
|
c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py:914: RuntimeWarning: overflow encountered in square
params_variance = (residuals[k_params_ma:]**2).mean()
c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py:976: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
warn('Non-stationary starting autoregressive parameters'
c:\git\statsmodels\statsmodels\tsa\statespace\tools.py:487: RuntimeWarning: invalid value encountered in double_scalars
y[k-1, i] = (y[k, i] - y[k, k]*y[k, k-i-1]) / (1 - y[k, k]**2)
c:\git\statsmodels\statsmodels\tsa\statespace\tools.py:489: RuntimeWarning: invalid value encountered in sqrt
x = r / ((1 - r**2)**0.5)
Traceback (most recent call last):
File "C:\Anaconda\lib\site-packages\IPython\core\interactiveshell.py", line 3326, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-8-a7c5e4baeb4f>", line 10, in <module>
res = mod.fit()
File "c:\git\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 605, in fit
start_params = self.untransform_params(start_params)
File "c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py", line 1495, in untransform_params
unconstrained[start] = constrained[start]**0.5
IndexError: index 6 is out of bounds for axis 0 with size 4
|
IndexError
|
def __init__(
self,
endog=None,
exog=None,
order=None,
seasonal_order=None,
ar_order=None,
diff=None,
ma_order=None,
seasonal_ar_order=None,
seasonal_diff=None,
seasonal_ma_order=None,
seasonal_periods=None,
trend=None,
enforce_stationarity=None,
enforce_invertibility=None,
concentrate_scale=None,
dates=None,
freq=None,
missing="none",
):
# Basic parameters
self.enforce_stationarity = enforce_stationarity
self.enforce_invertibility = enforce_invertibility
self.concentrate_scale = concentrate_scale
# Validate that we were not given conflicting specifications
has_order = order is not None
has_specific_order = (
ar_order is not None or diff is not None or ma_order is not None
)
has_seasonal_order = seasonal_order is not None
has_specific_seasonal_order = (
seasonal_ar_order is not None
or seasonal_diff is not None
or seasonal_ma_order is not None
or seasonal_periods is not None
)
if has_order and has_specific_order:
raise ValueError(
"Cannot specify both `order` and either of `ar_order` or `ma_order`."
)
if has_seasonal_order and has_specific_seasonal_order:
raise ValueError(
"Cannot specify both `seasonal_order` and any of"
" `seasonal_ar_order`, `seasonal_ma_order`,"
" or `seasonal_periods`."
)
# Compute `order`
if has_specific_order:
ar_order = 0 if ar_order is None else ar_order
diff = 0 if diff is None else diff
ma_order = 0 if ma_order is None else ma_order
order = (ar_order, diff, ma_order)
elif not has_order:
order = (0, 0, 0)
# Compute `seasonal_order`
if has_specific_seasonal_order:
seasonal_ar_order = 0 if seasonal_ar_order is None else seasonal_ar_order
seasonal_diff = 0 if seasonal_diff is None else seasonal_diff
seasonal_ma_order = 0 if seasonal_ma_order is None else seasonal_ma_order
seasonal_periods = 0 if seasonal_periods is None else seasonal_periods
seasonal_order = (
seasonal_ar_order,
seasonal_diff,
seasonal_ma_order,
seasonal_periods,
)
elif not has_seasonal_order:
seasonal_order = (0, 0, 0, 0)
# Validate shapes of `order`, `seasonal_order`
if len(order) != 3:
raise ValueError("`order` argument must be an iterable with three elements.")
if len(seasonal_order) != 4:
raise ValueError(
"`seasonal_order` argument must be an iterable with four elements."
)
# Validate differencing parameters
if order[1] < 0:
raise ValueError("Cannot specify negative differencing.")
if order[1] != int(order[1]):
raise ValueError("Cannot specify fractional differencing.")
if seasonal_order[1] < 0:
raise ValueError("Cannot specify negative seasonal differencing.")
if seasonal_order[1] != int(seasonal_order[1]):
raise ValueError("Cannot specify fractional seasonal differencing.")
if seasonal_order[3] < 0:
raise ValueError("Cannot specify negative seasonal periodicity.")
# Standardize to integers or lists of integers
order = (
standardize_lag_order(order[0], "AR"),
int(order[1]),
standardize_lag_order(order[2], "MA"),
)
seasonal_order = (
standardize_lag_order(seasonal_order[0], "seasonal AR"),
int(seasonal_order[1]),
standardize_lag_order(seasonal_order[2], "seasonal MA"),
int(seasonal_order[3]),
)
# Validate seasonals
if seasonal_order[3] == 1:
raise ValueError("Seasonal periodicity must be greater than 1.")
if (
seasonal_order[0] != 0 or seasonal_order[1] != 0 or seasonal_order[2] != 0
) and seasonal_order[3] == 0:
raise ValueError(
"Must include nonzero seasonal periodicity if"
" including seasonal AR, MA, or differencing."
)
# Basic order
self.order = order
self.ar_order, self.diff, self.ma_order = order
self.seasonal_order = seasonal_order
(
self.seasonal_ar_order,
self.seasonal_diff,
self.seasonal_ma_order,
self.seasonal_periods,
) = seasonal_order
# Lists of included lags
if isinstance(self.ar_order, list):
self.ar_lags = self.ar_order
else:
self.ar_lags = np.arange(1, self.ar_order + 1).tolist()
if isinstance(self.ma_order, list):
self.ma_lags = self.ma_order
else:
self.ma_lags = np.arange(1, self.ma_order + 1).tolist()
if isinstance(self.seasonal_ar_order, list):
self.seasonal_ar_lags = self.seasonal_ar_order
else:
self.seasonal_ar_lags = np.arange(1, self.seasonal_ar_order + 1).tolist()
if isinstance(self.seasonal_ma_order, list):
self.seasonal_ma_lags = self.seasonal_ma_order
else:
self.seasonal_ma_lags = np.arange(1, self.seasonal_ma_order + 1).tolist()
# Maximum lag orders
self.max_ar_order = self.ar_lags[-1] if self.ar_lags else 0
self.max_ma_order = self.ma_lags[-1] if self.ma_lags else 0
self.max_seasonal_ar_order = (
self.seasonal_ar_lags[-1] if self.seasonal_ar_lags else 0
)
self.max_seasonal_ma_order = (
self.seasonal_ma_lags[-1] if self.seasonal_ma_lags else 0
)
self.max_reduced_ar_order = (
self.max_ar_order + self.max_seasonal_ar_order * self.seasonal_periods
)
self.max_reduced_ma_order = (
self.max_ma_order + self.max_seasonal_ma_order * self.seasonal_periods
)
# Check that we don't have duplicate AR or MA lags from the seasonal
# component
ar_lags = set(self.ar_lags)
seasonal_ar_lags = set(np.array(self.seasonal_ar_lags) * self.seasonal_periods)
duplicate_ar_lags = ar_lags.intersection(seasonal_ar_lags)
if len(duplicate_ar_lags) > 0:
raise ValueError(
"Invalid model: autoregressive lag(s) %s are"
" in both the seasonal and non-seasonal"
" autoregressive components." % duplicate_ar_lags
)
ma_lags = set(self.ma_lags)
seasonal_ma_lags = set(np.array(self.seasonal_ma_lags) * self.seasonal_periods)
duplicate_ma_lags = ma_lags.intersection(seasonal_ma_lags)
if len(duplicate_ma_lags) > 0:
raise ValueError(
"Invalid model: moving average lag(s) %s are"
" in both the seasonal and non-seasonal"
" moving average components." % duplicate_ma_lags
)
# Handle trend
self.trend_poly, _ = prepare_trend_spec(trend)
# This contains the included exponents of the trend polynomial,
# where e.g. the constant term has exponent 0, a linear trend has
# exponent 1, etc.
self.trend_terms = np.where(self.trend_poly == 1)[0]
# Trend order is either the degree of the trend polynomial, if all
# exponents are included, or a list of included exponents. Here we need
# to make a distinction between a degree zero polynomial (i.e. a
# constant) and the zero polynomial (i.e. not even a constant). The
# former has `trend_order = 0`, while the latter has
# `trend_order = None`.
if len(self.trend_terms) == 0:
self.trend_order = None
self.trend_degree = None
elif np.all(self.trend_terms == np.arange(len(self.trend_terms))):
self.trend_order = self.trend_terms[-1]
self.trend_degree = self.trend_terms[-1]
else:
self.trend_order = self.trend_terms
self.trend_degree = self.trend_terms[-1]
# Handle endog / exog
# Standardize exog
_, exog = prepare_exog(exog)
# Standardize endog (including creating a faux endog if necessary)
faux_endog = endog is None
if endog is None:
endog = [] if exog is None else np.zeros(len(exog)) * np.nan
# Add trend data into exog
nobs = len(endog) if exog is None else len(exog)
if self.trend_order is not None:
trend_data = self.construct_trend_data(nobs)
if exog is None:
exog = trend_data
elif _is_using_pandas(exog, None):
trend_data = pd.DataFrame(
trend_data, index=exog.index, columns=self.construct_trend_names()
)
exog = pd.concat([trend_data, exog], axis=1)
else:
exog = np.c_[trend_data, exog]
# Create an underlying time series model, to handle endog / exog,
# especially validating shapes, retrieving names, and potentially
# providing us with a time series index
self._model = TimeSeriesModel(
endog, exog=exog, dates=dates, freq=freq, missing=missing
)
self.endog = None if faux_endog else self._model.endog
self.exog = self._model.exog
# Validate endog shape
if not faux_endog and self.endog.ndim > 1 and self.endog.shape[1] > 1:
raise ValueError(
"SARIMAX models require univariate `endog`. Got"
" shape %s." % str(self.endog.shape)
)
self._has_missing = None if faux_endog else np.any(np.isnan(self.endog))
|
def __init__(
self,
endog=None,
exog=None,
order=None,
seasonal_order=None,
ar_order=None,
diff=None,
ma_order=None,
seasonal_ar_order=None,
seasonal_diff=None,
seasonal_ma_order=None,
seasonal_periods=None,
trend=None,
enforce_stationarity=None,
enforce_invertibility=None,
concentrate_scale=None,
dates=None,
freq=None,
missing="none",
):
# Basic parameters
self.enforce_stationarity = enforce_stationarity
self.enforce_invertibility = enforce_invertibility
self.concentrate_scale = concentrate_scale
# Validate that we were not given conflicting specifications
has_order = order is not None
has_specific_order = (
ar_order is not None or diff is not None or ma_order is not None
)
has_seasonal_order = seasonal_order is not None
has_specific_seasonal_order = (
seasonal_ar_order is not None
or seasonal_diff is not None
or seasonal_ma_order is not None
or seasonal_periods is not None
)
if has_order and has_specific_order:
raise ValueError(
"Cannot specify both `order` and either of `ar_order` or `ma_order`."
)
if has_seasonal_order and has_specific_seasonal_order:
raise ValueError(
"Cannot specify both `seasonal_order` and any of"
" `seasonal_ar_order`, `seasonal_ma_order`,"
" or `seasonal_periods`."
)
# Compute `order`
if has_specific_order:
ar_order = 0 if ar_order is None else ar_order
diff = 0 if diff is None else diff
ma_order = 0 if ma_order is None else ma_order
order = (ar_order, diff, ma_order)
elif not has_order:
order = (0, 0, 0)
# Compute `seasonal_order`
if has_specific_seasonal_order:
seasonal_ar_order = 0 if seasonal_ar_order is None else seasonal_ar_order
seasonal_diff = 0 if seasonal_diff is None else seasonal_diff
seasonal_ma_order = 0 if seasonal_ma_order is None else seasonal_ma_order
seasonal_periods = 0 if seasonal_periods is None else seasonal_periods
seasonal_order = (
seasonal_ar_order,
seasonal_diff,
seasonal_ma_order,
seasonal_periods,
)
elif not has_seasonal_order:
seasonal_order = (0, 0, 0, 0)
# Validate differencing parameters
if order[1] < 0:
raise ValueError("Cannot specify negative differencing.")
if order[1] != int(order[1]):
raise ValueError("Cannot specify fractional differencing.")
if seasonal_order[1] < 0:
raise ValueError("Cannot specify negative seasonal differencing.")
if seasonal_order[1] != int(seasonal_order[1]):
raise ValueError("Cannot specify fractional seasonal differencing.")
if seasonal_order[3] < 0:
raise ValueError("Cannot specify negative seasonal periodicity.")
# Standardize to integers or lists of integers
order = (
standardize_lag_order(order[0], "AR"),
int(order[1]),
standardize_lag_order(order[2], "MA"),
)
seasonal_order = (
standardize_lag_order(seasonal_order[0], "seasonal AR"),
int(seasonal_order[1]),
standardize_lag_order(seasonal_order[2], "seasonal MA"),
int(seasonal_order[3]),
)
# Validate seasonals
if (
seasonal_order[0] != 0 or seasonal_order[1] != 0 or seasonal_order[2] != 0
) and seasonal_order[3] == 0:
raise ValueError(
"Must include nonzero seasonal periodicity if"
" including seasonal AR, MA, or differencing."
)
# Basic order
self.order = order
self.ar_order, self.diff, self.ma_order = order
self.seasonal_order = seasonal_order
(
self.seasonal_ar_order,
self.seasonal_diff,
self.seasonal_ma_order,
self.seasonal_periods,
) = seasonal_order
# Lists of included lags
if isinstance(self.ar_order, list):
self.ar_lags = self.ar_order
else:
self.ar_lags = np.arange(1, self.ar_order + 1).tolist()
if isinstance(self.ma_order, list):
self.ma_lags = self.ma_order
else:
self.ma_lags = np.arange(1, self.ma_order + 1).tolist()
if isinstance(self.seasonal_ar_order, list):
self.seasonal_ar_lags = self.seasonal_ar_order
else:
self.seasonal_ar_lags = np.arange(1, self.seasonal_ar_order + 1).tolist()
if isinstance(self.seasonal_ma_order, list):
self.seasonal_ma_lags = self.seasonal_ma_order
else:
self.seasonal_ma_lags = np.arange(1, self.seasonal_ma_order + 1).tolist()
# Maximum lag orders
self.max_ar_order = self.ar_lags[-1] if self.ar_lags else 0
self.max_ma_order = self.ma_lags[-1] if self.ma_lags else 0
self.max_seasonal_ar_order = (
self.seasonal_ar_lags[-1] if self.seasonal_ar_lags else 0
)
self.max_seasonal_ma_order = (
self.seasonal_ma_lags[-1] if self.seasonal_ma_lags else 0
)
self.max_reduced_ar_order = (
self.max_ar_order + self.max_seasonal_ar_order * self.seasonal_periods
)
self.max_reduced_ma_order = (
self.max_ma_order + self.max_seasonal_ma_order * self.seasonal_periods
)
# Check that we don't have duplicate AR or MA lags from the seasonal
# component
ar_lags = set(self.ar_lags)
seasonal_ar_lags = set(np.array(self.seasonal_ar_lags) * self.seasonal_periods)
duplicate_ar_lags = ar_lags.intersection(seasonal_ar_lags)
if len(duplicate_ar_lags) > 0:
raise ValueError(
"Invalid model: autoregressive lag(s) %s are"
" in both the seasonal and non-seasonal"
" autoregressive components." % duplicate_ar_lags
)
ma_lags = set(self.ma_lags)
seasonal_ma_lags = set(np.array(self.seasonal_ma_lags) * self.seasonal_periods)
duplicate_ma_lags = ma_lags.intersection(seasonal_ma_lags)
if len(duplicate_ma_lags) > 0:
raise ValueError(
"Invalid model: moving average lag(s) %s are"
" in both the seasonal and non-seasonal"
" moving average components." % duplicate_ma_lags
)
# Handle trend
self.trend_poly, _ = prepare_trend_spec(trend)
# This contains the included exponents of the trend polynomial,
# where e.g. the constant term has exponent 0, a linear trend has
# exponent 1, etc.
self.trend_terms = np.where(self.trend_poly == 1)[0]
# Trend order is either the degree of the trend polynomial, if all
# exponents are included, or a list of included exponents. Here we need
# to make a distinction between a degree zero polynomial (i.e. a
# constant) and the zero polynomial (i.e. not even a constant). The
# former has `trend_order = 0`, while the latter has
# `trend_order = None`.
if len(self.trend_terms) == 0:
self.trend_order = None
self.trend_degree = None
elif np.all(self.trend_terms == np.arange(len(self.trend_terms))):
self.trend_order = self.trend_terms[-1]
self.trend_degree = self.trend_terms[-1]
else:
self.trend_order = self.trend_terms
self.trend_degree = self.trend_terms[-1]
# Handle endog / exog
# Standardize exog
_, exog = prepare_exog(exog)
# Standardize endog (including creating a faux endog if necessary)
faux_endog = endog is None
if endog is None:
endog = [] if exog is None else np.zeros(len(exog)) * np.nan
# Add trend data into exog
nobs = len(endog) if exog is None else len(exog)
if self.trend_order is not None:
trend_data = self.construct_trend_data(nobs)
if exog is None:
exog = trend_data
elif _is_using_pandas(exog, None):
trend_data = pd.DataFrame(
trend_data, index=exog.index, columns=self.construct_trend_names()
)
exog = pd.concat([trend_data, exog], axis=1)
else:
exog = np.c_[trend_data, exog]
# Create an underlying time series model, to handle endog / exog,
# especially validating shapes, retrieving names, and potentially
# providing us with a time series index
self._model = TimeSeriesModel(
endog, exog=exog, dates=dates, freq=freq, missing=missing
)
self.endog = None if faux_endog else self._model.endog
self.exog = self._model.exog
self._has_missing = None if faux_endog else np.any(np.isnan(self.endog))
|
https://github.com/statsmodels/statsmodels/issues/6244
|
c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py:914: RuntimeWarning: overflow encountered in square
params_variance = (residuals[k_params_ma:]**2).mean()
c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py:976: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
warn('Non-stationary starting autoregressive parameters'
c:\git\statsmodels\statsmodels\tsa\statespace\tools.py:487: RuntimeWarning: invalid value encountered in double_scalars
y[k-1, i] = (y[k, i] - y[k, k]*y[k, k-i-1]) / (1 - y[k, k]**2)
c:\git\statsmodels\statsmodels\tsa\statespace\tools.py:489: RuntimeWarning: invalid value encountered in sqrt
x = r / ((1 - r**2)**0.5)
Traceback (most recent call last):
File "C:\Anaconda\lib\site-packages\IPython\core\interactiveshell.py", line 3326, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-8-a7c5e4baeb4f>", line 10, in <module>
res = mod.fit()
File "c:\git\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 605, in fit
start_params = self.untransform_params(start_params)
File "c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py", line 1495, in untransform_params
unconstrained[start] = constrained[start]**0.5
IndexError: index 6 is out of bounds for axis 0 with size 4
|
IndexError
|
def standardize_lag_order(order, title=None):
"""
Standardize lag order input.
Parameters
----------
order : int or array_like
Maximum lag order (if integer) or iterable of specific lag orders.
title : str, optional
Description of the order (e.g. "autoregressive") to use in error
messages.
Returns
-------
order : int or list of int
Maximum lag order if consecutive lag orders were specified, otherwise
a list of integer lag orders.
Notes
-----
It is ambiguous if order=[1] is meant to be a boolean list or
a list of lag orders to include, but this is irrelevant because either
interpretation gives the same result.
Order=[0] would be ambiguous, except that 0 is not a valid lag
order to include, so there is no harm in interpreting as a boolean
list, in which case it is the same as order=0, which seems like
reasonable behavior.
Examples
--------
>>> standardize_lag_order(3)
3
>>> standardize_lag_order(np.arange(1, 4))
3
>>> standardize_lag_order([1, 3])
[1, 3]
"""
order = np.array(order)
title = "order" if title is None else "%s order" % title
# Only integer orders are valid
if not np.all(order == order.astype(int)):
raise ValueError("Invalid %s. Non-integer order (%s) given." % (title, order))
order = order.astype(int)
# Only positive integers are valid
if np.any(order < 0):
raise ValueError("Terms in the %s cannot be negative." % title)
# Try to squeeze out an irrelevant trailing dimension
if order.ndim == 2 and order.shape[1] == 1:
order = order[:, 0]
elif order.ndim > 1:
raise ValueError(
"Invalid %s. Must be an integer or"
" 1-dimensional array-like object (e.g. list,"
" ndarray, etc.). Got %s." % (title, order)
)
# Option 1: the typical integer response (implies including all
# lags up through and including the value)
if order.ndim == 0:
order = order.item()
elif len(order) == 0:
order = 0
else:
# Option 2: boolean list
has_zeros = 0 in order
has_multiple_ones = np.sum(order == 1) > 1
has_gt_one = np.any(order > 1)
if has_zeros or has_multiple_ones:
if has_gt_one:
raise ValueError(
"Invalid %s. Appears to be a boolean list"
" (since it contains a 0 element and/or"
" multiple elements) but also contains"
" elements greater than 1 like a list of"
" lag orders." % title
)
order = np.where(order == 1)[0] + 1
# (Default) Option 3: list of lag orders to include
else:
order = np.sort(order)
# If we have an empty list, set order to zero
if len(order) == 0:
order = 0
# If we actually were given consecutive lag orders, just use integer
elif np.all(order == np.arange(1, len(order) + 1)):
order = order[-1]
# Otherwise, convert to list
else:
order = order.tolist()
# Check for duplicates
has_duplicate = isinstance(order, list) and np.any(np.diff(order) == 0)
if has_duplicate:
raise ValueError("Invalid %s. Cannot have duplicate elements." % title)
return order
|
def standardize_lag_order(order, title=None):
"""
Standardize lag order input.
Parameters
----------
order : int or array_like
Maximum lag order (if integer) or iterable of specific lag orders.
title : str, optional
Description of the order (e.g. "autoregressive") to use in error
messages.
Returns
-------
order : int or list of int
Maximum lag order if consecutive lag orders were specified, otherwise
a list of integer lag orders.
Notes
-----
It is ambiguous if order=[1] is meant to be a boolean list or
a list of lag orders to include, but this is irrelevant because either
interpretation gives the same result.
Order=[0] would be ambiguous, except that 0 is not a valid lag
order to include, so there is no harm in interpreting as a boolean
list, in which case it is the same as order=0, which seems like
reasonable behavior.
Examples
--------
>>> standardize_lag_order(3)
3
>>> standardize_lag_order(np.arange(1, 4))
3
>>> standardize_lag_order([1, 3])
[1, 3]
"""
order = np.array(order)
title = "order" if title is None else "%s order" % title
# Only integer orders are valid
if not np.all(order == order.astype(int)):
raise ValueError("Invalid %s. Non-integer order (%s) given." % (title, order))
order = order.astype(int)
# Only positive integers are valid
if np.any(order < 0):
raise ValueError("Terms in the %s cannot be negative.")
# Try to squeeze out an irrelevant trailing dimension
if order.ndim == 2 and order.shape[1] == 1:
order = order[:, 0]
elif order.ndim > 1:
raise ValueError(
"Invalid %s. Must be an integer or"
" 1-dimensional array-like object (e.g. list,"
" ndarray, etc.). Got %s." % (title, order)
)
# Option 1: the typical integer response (implies including all
# lags up through and including the value)
if order.ndim == 0:
order = order.item()
elif len(order) == 0:
order = 0
else:
# Option 2: boolean list
if 0 in order or np.sum(order == 1) > 1 and not np.any(order > 1):
order = np.where(order == 1)[0] + 1
# (Default) Option 3: list of lag orders to include
else:
order = np.sort(order)
# If we have an empty list, set order to zero
if len(order) == 0:
order = 0
# If we actually were given consecutive lag orders, just use integer
elif np.all(order == np.arange(1, len(order) + 1)):
order = order[-1]
# Otherwise, convert to list
else:
order = order.tolist()
return order
|
https://github.com/statsmodels/statsmodels/issues/6244
|
c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py:914: RuntimeWarning: overflow encountered in square
params_variance = (residuals[k_params_ma:]**2).mean()
c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py:976: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
warn('Non-stationary starting autoregressive parameters'
c:\git\statsmodels\statsmodels\tsa\statespace\tools.py:487: RuntimeWarning: invalid value encountered in double_scalars
y[k-1, i] = (y[k, i] - y[k, k]*y[k, k-i-1]) / (1 - y[k, k]**2)
c:\git\statsmodels\statsmodels\tsa\statespace\tools.py:489: RuntimeWarning: invalid value encountered in sqrt
x = r / ((1 - r**2)**0.5)
Traceback (most recent call last):
File "C:\Anaconda\lib\site-packages\IPython\core\interactiveshell.py", line 3326, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-8-a7c5e4baeb4f>", line 10, in <module>
res = mod.fit()
File "c:\git\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 605, in fit
start_params = self.untransform_params(start_params)
File "c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py", line 1495, in untransform_params
unconstrained[start] = constrained[start]**0.5
IndexError: index 6 is out of bounds for axis 0 with size 4
|
IndexError
|
def __init__(
self,
endog,
exog=None,
order=(1, 0, 0),
seasonal_order=(0, 0, 0, 0),
trend=None,
measurement_error=False,
time_varying_regression=False,
mle_regression=True,
simple_differencing=False,
enforce_stationarity=True,
enforce_invertibility=True,
hamilton_representation=False,
concentrate_scale=False,
trend_offset=1,
use_exact_diffuse=False,
dates=None,
freq=None,
missing="none",
**kwargs,
):
self._spec = SARIMAXSpecification(
endog,
exog=exog,
order=order,
seasonal_order=seasonal_order,
trend=trend,
enforce_stationarity=None,
enforce_invertibility=None,
concentrate_scale=concentrate_scale,
dates=dates,
freq=freq,
missing=missing,
)
self._params = SARIMAXParams(self._spec)
# Save given orders
order = self._spec.order
seasonal_order = self._spec.seasonal_order
self.order = order
self.seasonal_order = seasonal_order
# Model parameters
self.seasonal_periods = seasonal_order[3]
self.measurement_error = measurement_error
self.time_varying_regression = time_varying_regression
self.mle_regression = mle_regression
self.simple_differencing = simple_differencing
self.enforce_stationarity = enforce_stationarity
self.enforce_invertibility = enforce_invertibility
self.hamilton_representation = hamilton_representation
self.concentrate_scale = concentrate_scale
self.use_exact_diffuse = use_exact_diffuse
# Enforce non-MLE coefficients if time varying coefficients is
# specified
if self.time_varying_regression and self.mle_regression:
raise ValueError(
"Models with time-varying regression coefficients"
" must integrate the coefficients as part of the"
" state vector, so that `mle_regression` must"
" be set to False."
)
# Lag polynomials
self._params.ar_params = -1
self.polynomial_ar = self._params.ar_poly.coef
self._polynomial_ar = self.polynomial_ar.copy()
self._params.ma_params = 1
self.polynomial_ma = self._params.ma_poly.coef
self._polynomial_ma = self.polynomial_ma.copy()
self._params.seasonal_ar_params = -1
self.polynomial_seasonal_ar = self._params.seasonal_ar_poly.coef
self._polynomial_seasonal_ar = self.polynomial_seasonal_ar.copy()
self._params.seasonal_ma_params = 1
self.polynomial_seasonal_ma = self._params.seasonal_ma_poly.coef
self._polynomial_seasonal_ma = self.polynomial_seasonal_ma.copy()
# Deterministic trend polynomial
self.trend = trend
self.trend_offset = trend_offset
self.polynomial_trend, self.k_trend = prepare_trend_spec(self.trend)
self._polynomial_trend = self.polynomial_trend.copy()
# Model orders
# Note: k_ar, k_ma, k_seasonal_ar, k_seasonal_ma do not include the
# constant term, so they may be zero.
# Note: for a typical ARMA(p,q) model, p = k_ar_params = k_ar - 1 and
# q = k_ma_params = k_ma - 1, although this may not be true for models
# with arbitrary log polynomials.
self.k_ar = self._spec.max_ar_order
self.k_ar_params = self._spec.k_ar_params
self.k_diff = int(order[1])
self.k_ma = self._spec.max_ma_order
self.k_ma_params = self._spec.k_ma_params
self.k_seasonal_ar = self._spec.max_seasonal_ar_order * self._spec.seasonal_periods
self.k_seasonal_ar_params = self._spec.k_seasonal_ar_params
self.k_seasonal_diff = int(seasonal_order[1])
self.k_seasonal_ma = self._spec.max_seasonal_ma_order * self._spec.seasonal_periods
self.k_seasonal_ma_params = self._spec.k_seasonal_ma_params
# Make internal copies of the differencing orders because if we use
# simple differencing, then we will need to internally use zeros after
# the simple differencing has been performed
self._k_diff = self.k_diff
self._k_seasonal_diff = self.k_seasonal_diff
# We can only use the Hamilton representation if differencing is not
# performed as a part of the state space
if self.hamilton_representation and not (
self.simple_differencing or self._k_diff == self._k_seasonal_diff == 0
):
raise ValueError(
"The Hamilton representation is only available"
" for models in which there is no differencing"
" integrated into the state vector. Set"
" `simple_differencing` to True or set"
" `hamilton_representation` to False"
)
# Model order
# (this is used internally in a number of locations)
self._k_order = max(
self.k_ar + self.k_seasonal_ar, self.k_ma + self.k_seasonal_ma + 1
)
if self._k_order == 1 and self.k_ar + self.k_seasonal_ar == 0:
# Handle time-varying regression
if self.time_varying_regression:
self._k_order = 0
# Exogenous data
(self.k_exog, exog) = prepare_exog(exog)
# Redefine mle_regression to be true only if it was previously set to
# true and there are exogenous regressors
self.mle_regression = self.mle_regression and exog is not None and self.k_exog > 0
# State regression is regression with coefficients estimated within
# the state vector
self.state_regression = (
not self.mle_regression and exog is not None and self.k_exog > 0
)
# If all we have is a regression (so k_ar = k_ma = 0), then put the
# error term as measurement error
if self.state_regression and self._k_order == 0:
self.measurement_error = True
# Number of states
k_states = self._k_order
if not self.simple_differencing:
k_states += self.seasonal_periods * self._k_seasonal_diff + self._k_diff
if self.state_regression:
k_states += self.k_exog
# Number of positive definite elements of the state covariance matrix
k_posdef = int(self._k_order > 0)
# Only have an error component to the states if k_posdef > 0
self.state_error = k_posdef > 0
if self.state_regression and self.time_varying_regression:
k_posdef += self.k_exog
# Diffuse initialization can be more sensistive to the variance value
# in the case of state regression, so set a higher than usual default
# variance
if self.state_regression:
kwargs.setdefault("initial_variance", 1e10)
# Handle non-default loglikelihood burn
self._loglikelihood_burn = kwargs.get("loglikelihood_burn", None)
# Number of parameters
self.k_params = (
self.k_ar_params
+ self.k_ma_params
+ self.k_seasonal_ar_params
+ self.k_seasonal_ma_params
+ self.k_trend
+ self.measurement_error
+ int(not self.concentrate_scale)
)
if self.mle_regression:
self.k_params += self.k_exog
# We need to have an array or pandas at this point
self.orig_endog = endog
self.orig_exog = exog
if not _is_using_pandas(endog, None):
endog = np.asanyarray(endog)
# Update the differencing dimensions if simple differencing is applied
self.orig_k_diff = self._k_diff
self.orig_k_seasonal_diff = self._k_seasonal_diff
if self.simple_differencing and (self._k_diff > 0 or self._k_seasonal_diff > 0):
self._k_diff = 0
self._k_seasonal_diff = 0
# Internally used in several locations
self._k_states_diff = self._k_diff + self.seasonal_periods * self._k_seasonal_diff
# Set some model variables now so they will be available for the
# initialize() method, below
self.nobs = len(endog)
self.k_states = k_states
self.k_posdef = k_posdef
# Initialize the statespace
super(SARIMAX, self).__init__(
endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs
)
# Set the filter to concentrate out the scale if requested
if self.concentrate_scale:
self.ssm.filter_concentrated = True
# Set as time-varying model if we have time-trend or exog
if self.k_exog > 0 or len(self.polynomial_trend) > 1:
self.ssm._time_invariant = False
# Initialize the fixed components of the statespace model
self.ssm["design"] = self.initial_design
self.ssm["state_intercept"] = self.initial_state_intercept
self.ssm["transition"] = self.initial_transition
self.ssm["selection"] = self.initial_selection
if self.concentrate_scale:
self.ssm["state_cov", 0, 0] = 1.0
# update _init_keys attached by super
self._init_keys += [
"order",
"seasonal_order",
"trend",
"measurement_error",
"time_varying_regression",
"mle_regression",
"simple_differencing",
"enforce_stationarity",
"enforce_invertibility",
"hamilton_representation",
"concentrate_scale",
"trend_offset",
] + list(kwargs.keys())
# TODO: I think the kwargs or not attached, need to recover from ???
# Initialize the state
if self.ssm.initialization is None:
self.initialize_default()
|
def __init__(
self,
endog,
exog=None,
order=(1, 0, 0),
seasonal_order=(0, 0, 0, 0),
trend=None,
measurement_error=False,
time_varying_regression=False,
mle_regression=True,
simple_differencing=False,
enforce_stationarity=True,
enforce_invertibility=True,
hamilton_representation=False,
concentrate_scale=False,
trend_offset=1,
use_exact_diffuse=False,
**kwargs,
):
# Save given orders
self.order = order
self.seasonal_order = seasonal_order
# Validate orders
if len(self.order) != 3:
raise ValueError("`order` argument must be an iterable with three elements.")
if len(self.seasonal_order) != 4:
raise ValueError(
"`seasonal_order` argument must be an iterable with four elements."
)
# Model parameters
self.seasonal_periods = seasonal_order[3]
self.measurement_error = measurement_error
self.time_varying_regression = time_varying_regression
self.mle_regression = mle_regression
self.simple_differencing = simple_differencing
self.enforce_stationarity = enforce_stationarity
self.enforce_invertibility = enforce_invertibility
self.hamilton_representation = hamilton_representation
self.concentrate_scale = concentrate_scale
self.use_exact_diffuse = use_exact_diffuse
# Enforce non-MLE coefficients if time varying coefficients is
# specified
if self.time_varying_regression and self.mle_regression:
raise ValueError(
"Models with time-varying regression coefficients"
" must integrate the coefficients as part of the"
" state vector, so that `mle_regression` must"
" be set to False."
)
# Lag polynomials
# Assume that they are given from lowest degree to highest, that all
# degrees except for the constant are included, and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(order[0], (int, np.integer)):
self.polynomial_ar = np.r_[1.0, np.ones(order[0])]
else:
self.polynomial_ar = np.r_[1.0, order[0]]
self._polynomial_ar = self.polynomial_ar.copy()
if isinstance(order[2], (int, np.integer)):
self.polynomial_ma = np.r_[1.0, np.ones(order[2])]
else:
self.polynomial_ma = np.r_[1.0, order[2]]
self._polynomial_ma = self.polynomial_ma.copy()
# Assume that they are given from lowest degree to highest, that the
# degrees correspond to (1*s, 2*s, ..., P*s), and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(seasonal_order[0], (int, np.integer)):
self.polynomial_seasonal_ar = np.r_[
1.0, # constant
([0] * (self.seasonal_periods - 1) + [1]) * seasonal_order[0],
]
else:
self.polynomial_seasonal_ar = np.r_[
1.0, [0] * self.seasonal_periods * len(seasonal_order[0])
]
for i in range(len(seasonal_order[0])):
tmp = (i + 1) * self.seasonal_periods
self.polynomial_seasonal_ar[tmp] = seasonal_order[0][i]
self._polynomial_seasonal_ar = self.polynomial_seasonal_ar.copy()
if isinstance(seasonal_order[2], (int, np.integer)):
self.polynomial_seasonal_ma = np.r_[
1.0, # constant
([0] * (self.seasonal_periods - 1) + [1]) * seasonal_order[2],
]
else:
self.polynomial_seasonal_ma = np.r_[
1.0, [0] * self.seasonal_periods * len(seasonal_order[2])
]
for i in range(len(seasonal_order[2])):
tmp = (i + 1) * self.seasonal_periods
self.polynomial_seasonal_ma[tmp] = seasonal_order[2][i]
self._polynomial_seasonal_ma = self.polynomial_seasonal_ma.copy()
# Deterministic trend polynomial
self.trend = trend
self.trend_offset = trend_offset
self.polynomial_trend, self.k_trend = prepare_trend_spec(self.trend)
self._polynomial_trend = self.polynomial_trend.copy()
# Model orders
# Note: k_ar, k_ma, k_seasonal_ar, k_seasonal_ma do not include the
# constant term, so they may be zero.
# Note: for a typical ARMA(p,q) model, p = k_ar_params = k_ar - 1 and
# q = k_ma_params = k_ma - 1, although this may not be true for models
# with arbitrary log polynomials.
self.k_ar = int(self.polynomial_ar.shape[0] - 1)
self.k_ar_params = int(np.sum(self.polynomial_ar) - 1)
self.k_diff = int(order[1])
self.k_ma = int(self.polynomial_ma.shape[0] - 1)
self.k_ma_params = int(np.sum(self.polynomial_ma) - 1)
self.k_seasonal_ar = int(self.polynomial_seasonal_ar.shape[0] - 1)
self.k_seasonal_ar_params = int(np.sum(self.polynomial_seasonal_ar) - 1)
self.k_seasonal_diff = int(seasonal_order[1])
self.k_seasonal_ma = int(self.polynomial_seasonal_ma.shape[0] - 1)
self.k_seasonal_ma_params = int(np.sum(self.polynomial_seasonal_ma) - 1)
# Make sure we don't have a seasonal specification without a valid
# seasonal period.
if self.seasonal_order[3] == 1:
raise ValueError("Seasonal period must be greater than 1.")
if self.seasonal_order[3] == 0 and (
self.k_seasonal_ar > 0 or self.k_seasonal_ma > 0
):
raise ValueError(
"Seasonal AR or MA components cannot be set when"
" the given seasonal period is equal to 0."
)
# Make internal copies of the differencing orders because if we use
# simple differencing, then we will need to internally use zeros after
# the simple differencing has been performed
self._k_diff = self.k_diff
self._k_seasonal_diff = self.k_seasonal_diff
# We can only use the Hamilton representation if differencing is not
# performed as a part of the state space
if self.hamilton_representation and not (
self.simple_differencing or self._k_diff == self._k_seasonal_diff == 0
):
raise ValueError(
"The Hamilton representation is only available"
" for models in which there is no differencing"
" integrated into the state vector. Set"
" `simple_differencing` to True or set"
" `hamilton_representation` to False"
)
# Model order
# (this is used internally in a number of locations)
self._k_order = max(
self.k_ar + self.k_seasonal_ar, self.k_ma + self.k_seasonal_ma + 1
)
if self._k_order == 1 and self.k_ar + self.k_seasonal_ar == 0:
# Handle time-varying regression
if self.time_varying_regression:
self._k_order = 0
# Exogenous data
(self.k_exog, exog) = prepare_exog(exog)
# Redefine mle_regression to be true only if it was previously set to
# true and there are exogenous regressors
self.mle_regression = self.mle_regression and exog is not None and self.k_exog > 0
# State regression is regression with coefficients estimated within
# the state vector
self.state_regression = (
not self.mle_regression and exog is not None and self.k_exog > 0
)
# If all we have is a regression (so k_ar = k_ma = 0), then put the
# error term as measurement error
if self.state_regression and self._k_order == 0:
self.measurement_error = True
# Number of states
k_states = self._k_order
if not self.simple_differencing:
k_states += self.seasonal_periods * self._k_seasonal_diff + self._k_diff
if self.state_regression:
k_states += self.k_exog
# Number of positive definite elements of the state covariance matrix
k_posdef = int(self._k_order > 0)
# Only have an error component to the states if k_posdef > 0
self.state_error = k_posdef > 0
if self.state_regression and self.time_varying_regression:
k_posdef += self.k_exog
# Diffuse initialization can be more sensistive to the variance value
# in the case of state regression, so set a higher than usual default
# variance
if self.state_regression:
kwargs.setdefault("initial_variance", 1e10)
# Handle non-default loglikelihood burn
self._loglikelihood_burn = kwargs.get("loglikelihood_burn", None)
# Number of parameters
self.k_params = (
self.k_ar_params
+ self.k_ma_params
+ self.k_seasonal_ar_params
+ self.k_seasonal_ma_params
+ self.k_trend
+ self.measurement_error
+ int(not self.concentrate_scale)
)
if self.mle_regression:
self.k_params += self.k_exog
# We need to have an array or pandas at this point
self.orig_endog = endog
self.orig_exog = exog
if not _is_using_pandas(endog, None):
endog = np.asanyarray(endog)
# Update the differencing dimensions if simple differencing is applied
self.orig_k_diff = self._k_diff
self.orig_k_seasonal_diff = self._k_seasonal_diff
if self.simple_differencing and (self._k_diff > 0 or self._k_seasonal_diff > 0):
self._k_diff = 0
self._k_seasonal_diff = 0
# Internally used in several locations
self._k_states_diff = self._k_diff + self.seasonal_periods * self._k_seasonal_diff
# Set some model variables now so they will be available for the
# initialize() method, below
self.nobs = len(endog)
self.k_states = k_states
self.k_posdef = k_posdef
# Initialize the statespace
super(SARIMAX, self).__init__(
endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs
)
# Set the filter to concentrate out the scale if requested
if self.concentrate_scale:
self.ssm.filter_concentrated = True
# Set as time-varying model if we have time-trend or exog
if self.k_exog > 0 or len(self.polynomial_trend) > 1:
self.ssm._time_invariant = False
# Initialize the fixed components of the statespace model
self.ssm["design"] = self.initial_design
self.ssm["state_intercept"] = self.initial_state_intercept
self.ssm["transition"] = self.initial_transition
self.ssm["selection"] = self.initial_selection
if self.concentrate_scale:
self.ssm["state_cov", 0, 0] = 1.0
# update _init_keys attached by super
self._init_keys += [
"order",
"seasonal_order",
"trend",
"measurement_error",
"time_varying_regression",
"mle_regression",
"simple_differencing",
"enforce_stationarity",
"enforce_invertibility",
"hamilton_representation",
"concentrate_scale",
"trend_offset",
] + list(kwargs.keys())
# TODO: I think the kwargs or not attached, need to recover from ???
# Initialize the state
if self.ssm.initialization is None:
self.initialize_default()
|
https://github.com/statsmodels/statsmodels/issues/6244
|
c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py:914: RuntimeWarning: overflow encountered in square
params_variance = (residuals[k_params_ma:]**2).mean()
c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py:976: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
warn('Non-stationary starting autoregressive parameters'
c:\git\statsmodels\statsmodels\tsa\statespace\tools.py:487: RuntimeWarning: invalid value encountered in double_scalars
y[k-1, i] = (y[k, i] - y[k, k]*y[k, k-i-1]) / (1 - y[k, k]**2)
c:\git\statsmodels\statsmodels\tsa\statespace\tools.py:489: RuntimeWarning: invalid value encountered in sqrt
x = r / ((1 - r**2)**0.5)
Traceback (most recent call last):
File "C:\Anaconda\lib\site-packages\IPython\core\interactiveshell.py", line 3326, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-8-a7c5e4baeb4f>", line 10, in <module>
res = mod.fit()
File "c:\git\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 605, in fit
start_params = self.untransform_params(start_params)
File "c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py", line 1495, in untransform_params
unconstrained[start] = constrained[start]**0.5
IndexError: index 6 is out of bounds for axis 0 with size 4
|
IndexError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.