code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _update_uncompressed(collection_name, upsert, multi, spec,
doc, safe, last_error_args, check_keys, opts):
"""Internal update message helper."""
op_update, max_bson_size = _update(
collection_name, upsert, multi, spec, doc, check_keys, opts)
rid, msg = __pack_message(2001, op_update)
if safe:
rid, gle, _ = __last_error(collection_name, last_error_args)
return rid, msg + gle, max_bson_size
return rid, msg, max_bson_size | Internal update message helper. | Below is the the instruction that describes the task:
### Input:
Internal update message helper.
### Response:
def _update_uncompressed(collection_name, upsert, multi, spec,
doc, safe, last_error_args, check_keys, opts):
"""Internal update message helper."""
op_update, max_bson_size = _update(
collection_name, upsert, multi, spec, doc, check_keys, opts)
rid, msg = __pack_message(2001, op_update)
if safe:
rid, gle, _ = __last_error(collection_name, last_error_args)
return rid, msg + gle, max_bson_size
return rid, msg, max_bson_size |
def should_trigger(self, dt):
"""
Composes the two rules with a lazy composer.
"""
return self.composer(
self.first.should_trigger,
self.second.should_trigger,
dt
) | Composes the two rules with a lazy composer. | Below is the the instruction that describes the task:
### Input:
Composes the two rules with a lazy composer.
### Response:
def should_trigger(self, dt):
"""
Composes the two rules with a lazy composer.
"""
return self.composer(
self.first.should_trigger,
self.second.should_trigger,
dt
) |
def matern_function(Xi, Xj, *args):
r"""Matern covariance function of arbitrary dimension, for use with :py:class:`ArbitraryKernel`.
The Matern kernel has the following hyperparameters, always referenced in
the order listed:
= ===== ====================================
0 sigma prefactor
1 nu order of kernel
2 l1 length scale for the first dimension
3 l2 ...and so on for all dimensions
= ===== ====================================
The kernel is defined as:
.. math::
k_M = \sigma^2 \frac{2^{1-\nu}}{\Gamma(\nu)}
\left (\sqrt{2\nu \sum_i\left (\frac{\tau_i^2}{l_i^2}\right )}\right )^\nu
K_\nu\left(\sqrt{2\nu \sum_i\left(\frac{\tau_i^2}{l_i^2}\right)}\right)
Parameters
----------
Xi, Xj : :py:class:`Array`, :py:class:`mpf`, tuple or scalar float
Points to evaluate the covariance between. If they are :py:class:`Array`,
:py:mod:`scipy` functions are used, otherwise :py:mod:`mpmath`
functions are used.
*args
Remaining arguments are the 2+num_dim hyperparameters as defined above.
"""
num_dim = len(args) - 2
nu = args[1]
if isinstance(Xi, scipy.ndarray):
if isinstance(Xi, scipy.matrix):
Xi = scipy.asarray(Xi, dtype=float)
Xj = scipy.asarray(Xj, dtype=float)
tau = scipy.asarray(Xi - Xj, dtype=float)
l_mat = scipy.tile(args[-num_dim:], (tau.shape[0], 1))
r2l2 = scipy.sum((tau / l_mat)**2, axis=1)
y = scipy.sqrt(2.0 * nu * r2l2)
k = 2.0**(1 - nu) / scipy.special.gamma(nu) * y**nu * scipy.special.kv(nu, y)
k[r2l2 == 0] = 1
else:
try:
tau = [xi - xj for xi, xj in zip(Xi, Xj)]
except TypeError:
tau = Xi - Xj
try:
r2l2 = sum([(t / l)**2 for t, l in zip(tau, args[2:])])
except TypeError:
r2l2 = (tau / args[2])**2
y = mpmath.sqrt(2.0 * nu * r2l2)
k = 2.0**(1 - nu) / mpmath.gamma(nu) * y**nu * mpmath.besselk(nu, y)
k *= args[0]**2.0
return k | r"""Matern covariance function of arbitrary dimension, for use with :py:class:`ArbitraryKernel`.
The Matern kernel has the following hyperparameters, always referenced in
the order listed:
= ===== ====================================
0 sigma prefactor
1 nu order of kernel
2 l1 length scale for the first dimension
3 l2 ...and so on for all dimensions
= ===== ====================================
The kernel is defined as:
.. math::
k_M = \sigma^2 \frac{2^{1-\nu}}{\Gamma(\nu)}
\left (\sqrt{2\nu \sum_i\left (\frac{\tau_i^2}{l_i^2}\right )}\right )^\nu
K_\nu\left(\sqrt{2\nu \sum_i\left(\frac{\tau_i^2}{l_i^2}\right)}\right)
Parameters
----------
Xi, Xj : :py:class:`Array`, :py:class:`mpf`, tuple or scalar float
Points to evaluate the covariance between. If they are :py:class:`Array`,
:py:mod:`scipy` functions are used, otherwise :py:mod:`mpmath`
functions are used.
*args
Remaining arguments are the 2+num_dim hyperparameters as defined above. | Below is the the instruction that describes the task:
### Input:
r"""Matern covariance function of arbitrary dimension, for use with :py:class:`ArbitraryKernel`.
The Matern kernel has the following hyperparameters, always referenced in
the order listed:
= ===== ====================================
0 sigma prefactor
1 nu order of kernel
2 l1 length scale for the first dimension
3 l2 ...and so on for all dimensions
= ===== ====================================
The kernel is defined as:
.. math::
k_M = \sigma^2 \frac{2^{1-\nu}}{\Gamma(\nu)}
\left (\sqrt{2\nu \sum_i\left (\frac{\tau_i^2}{l_i^2}\right )}\right )^\nu
K_\nu\left(\sqrt{2\nu \sum_i\left(\frac{\tau_i^2}{l_i^2}\right)}\right)
Parameters
----------
Xi, Xj : :py:class:`Array`, :py:class:`mpf`, tuple or scalar float
Points to evaluate the covariance between. If they are :py:class:`Array`,
:py:mod:`scipy` functions are used, otherwise :py:mod:`mpmath`
functions are used.
*args
Remaining arguments are the 2+num_dim hyperparameters as defined above.
### Response:
def matern_function(Xi, Xj, *args):
r"""Matern covariance function of arbitrary dimension, for use with :py:class:`ArbitraryKernel`.
The Matern kernel has the following hyperparameters, always referenced in
the order listed:
= ===== ====================================
0 sigma prefactor
1 nu order of kernel
2 l1 length scale for the first dimension
3 l2 ...and so on for all dimensions
= ===== ====================================
The kernel is defined as:
.. math::
k_M = \sigma^2 \frac{2^{1-\nu}}{\Gamma(\nu)}
\left (\sqrt{2\nu \sum_i\left (\frac{\tau_i^2}{l_i^2}\right )}\right )^\nu
K_\nu\left(\sqrt{2\nu \sum_i\left(\frac{\tau_i^2}{l_i^2}\right)}\right)
Parameters
----------
Xi, Xj : :py:class:`Array`, :py:class:`mpf`, tuple or scalar float
Points to evaluate the covariance between. If they are :py:class:`Array`,
:py:mod:`scipy` functions are used, otherwise :py:mod:`mpmath`
functions are used.
*args
Remaining arguments are the 2+num_dim hyperparameters as defined above.
"""
num_dim = len(args) - 2
nu = args[1]
if isinstance(Xi, scipy.ndarray):
if isinstance(Xi, scipy.matrix):
Xi = scipy.asarray(Xi, dtype=float)
Xj = scipy.asarray(Xj, dtype=float)
tau = scipy.asarray(Xi - Xj, dtype=float)
l_mat = scipy.tile(args[-num_dim:], (tau.shape[0], 1))
r2l2 = scipy.sum((tau / l_mat)**2, axis=1)
y = scipy.sqrt(2.0 * nu * r2l2)
k = 2.0**(1 - nu) / scipy.special.gamma(nu) * y**nu * scipy.special.kv(nu, y)
k[r2l2 == 0] = 1
else:
try:
tau = [xi - xj for xi, xj in zip(Xi, Xj)]
except TypeError:
tau = Xi - Xj
try:
r2l2 = sum([(t / l)**2 for t, l in zip(tau, args[2:])])
except TypeError:
r2l2 = (tau / args[2])**2
y = mpmath.sqrt(2.0 * nu * r2l2)
k = 2.0**(1 - nu) / mpmath.gamma(nu) * y**nu * mpmath.besselk(nu, y)
k *= args[0]**2.0
return k |
def estimate_augmented_markov_model(dtrajs, ftrajs, lag, m, sigmas,
count_mode='sliding', connectivity='largest',
dt_traj='1 step', maxiter=1000000, eps=0.05, maxcache=3000):
r""" Estimates an Augmented Markov model from discrete trajectories and experimental data
Returns a :class:`AugmentedMarkovModel` that
contains the estimated transition matrix and allows to compute a
large number of quantities related to Markov models.
Parameters
----------
dtrajs : list containing ndarrays(dtype=int) or ndarray(n, dtype=int)
discrete trajectories, stored as integer ndarrays (arbitrary size)
or a single ndarray for only one trajectory.
ftrajs : list of trajectories of microscopic observables. Has to have
the same shape (number of trajectories and timesteps) as dtrajs.
Each timestep in each trajectory should match the shape of m and sigma, k.
lag : int
lag time at which transitions are counted and the transition matrix is
estimated.
m : ndarray(k)
Experimental averages.
sigmas : ndarray(k)
Standard error for each experimental observable.
count_mode : str, optional, default='sliding'
mode to obtain count matrices from discrete trajectories. Should be
one of:
* 'sliding' : A trajectory of length T will have :math:`T-\tau` counts
at time indexes
.. math::
(0 \rightarrow \tau), (1 \rightarrow \tau+1), ..., (T-\tau-1 \rightarrow T-1)
* 'effective' : Uses an estimate of the transition counts that are
statistically uncorrelated. Recommended when used with a
Bayesian MSM.
* 'sample' : A trajectory of length T will have :math:`T/\tau` counts
at time indexes
.. math::
(0 \rightarrow \tau), (\tau \rightarrow 2 \tau), ..., (((T/\tau)-1) \tau \rightarrow T)
connectivity : str, optional
Connectivity mode. Three methods are intended (currently only
'largest' is implemented)
* 'largest' : The active set is the largest reversibly
connected set. All estimation will be done on this subset
and all quantities (transition matrix, stationary
distribution, etc) are only defined on this subset and are
correspondingly smaller than the full set of states
* 'all' : The active set is the full set of states. Estimation
will be conducted on each reversibly connected set
separately. That means the transition matrix will decompose
into disconnected submatrices, the stationary vector is only
defined within subsets, etc. Currently not implemented.
* 'none' : The active set is the full set of
states. Estimation will be conducted on the full set of
states without ensuring connectivity. This only permits
nonreversible estimation. Currently not implemented.
dt_traj : str, optional
Description of the physical time corresponding to the lag. May
be used by analysis algorithms such as plotting tools to
pretty-print the axes. By default '1 step', i.e. there is no
physical time unit. Specify by a number, whitespace and
unit. Permitted units are (* is an arbitrary string):
* 'fs', 'femtosecond*'
* 'ps', 'picosecond*'
* 'ns', 'nanosecond*'
* 'us', 'microsecond*'
* 'ms', 'millisecond*'
* 's', 'second*'
maxiter : int, optional
Optional parameter with specifies the maximum number of
updates for Lagrange multiplier estimation.
eps : float, optional
Additional convergence criterion used when some experimental data
are outside the support of the simulation. The value of the eps
parameter is the threshold of the relative change in the predicted
observables as a function of fixed-point iteration:
$$ \mathrm{eps} > \frac{\mid o_{\mathrm{pred}}^{(i+1)}-o_{\mathrm{pred}}^{(i)}\mid }{\sigma}. $$
maxcache : int, optional
Parameter which specifies the maximum size of cache used
when performing estimation of AMM, in megabytes.
Returns
-------
amm : :class:`AugmentedMarkovModel <pyemma.msm.AugmentedMarkovModel>`
Estimator object containing the AMM and estimation information.
See also
--------
AugmentedMarkovModel
An AMM object that has been estimated from data
.. autoclass:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:attributes:
References
----------
.. [1] Olsson S, Wu H, Paul F, Clementi C, Noe F "Combining experimental and simulation data
of molecular processes via augmented Markov models" PNAS (2017), 114(31), pp. 8265-8270
doi: 10.1073/pnas.1704803114
"""
# check input
if _np.all(sigmas>0):
_w = 1./(2*sigmas**2.)
else:
raise ValueError('Zero or negative standard errors supplied. Please revise input')
if ftrajs[0].ndim < 2:
raise ValueError("Supplied feature trajectories have inappropriate dimensions (%d) should be atleast 2."%ftrajs[0].ndim)
if len(dtrajs) != len(ftrajs):
raise ValueError("A different number of dtrajs and ftrajs were supplied as input. They must have exactly a one-to-one correspondence.")
elif not _np.all([len(dt)==len(ft) for dt,ft in zip(dtrajs, ftrajs)]):
raise ValueError("One or more supplied dtraj-ftraj pairs do not have the same length.")
else:
# MAKE E matrix
dta = _np.concatenate(dtrajs)
fta = _np.concatenate(ftrajs)
all_markov_states = set(dta)
_E = _np.zeros((len(all_markov_states), fta.shape[1]))
for i, s in enumerate(all_markov_states):
_E[i, :] = fta[_np.where(dta == s)].mean(axis = 0)
# transition matrix estimator
mlamm = _ML_AMM(lag=lag, count_mode=count_mode,
connectivity=connectivity,
dt_traj=dt_traj, maxiter=maxiter, max_cache=maxcache,
E=_E, w=_w, m=m)
# estimate and return
return mlamm.estimate(dtrajs) | r""" Estimates an Augmented Markov model from discrete trajectories and experimental data
Returns a :class:`AugmentedMarkovModel` that
contains the estimated transition matrix and allows to compute a
large number of quantities related to Markov models.
Parameters
----------
dtrajs : list containing ndarrays(dtype=int) or ndarray(n, dtype=int)
discrete trajectories, stored as integer ndarrays (arbitrary size)
or a single ndarray for only one trajectory.
ftrajs : list of trajectories of microscopic observables. Has to have
the same shape (number of trajectories and timesteps) as dtrajs.
Each timestep in each trajectory should match the shape of m and sigma, k.
lag : int
lag time at which transitions are counted and the transition matrix is
estimated.
m : ndarray(k)
Experimental averages.
sigmas : ndarray(k)
Standard error for each experimental observable.
count_mode : str, optional, default='sliding'
mode to obtain count matrices from discrete trajectories. Should be
one of:
* 'sliding' : A trajectory of length T will have :math:`T-\tau` counts
at time indexes
.. math::
(0 \rightarrow \tau), (1 \rightarrow \tau+1), ..., (T-\tau-1 \rightarrow T-1)
* 'effective' : Uses an estimate of the transition counts that are
statistically uncorrelated. Recommended when used with a
Bayesian MSM.
* 'sample' : A trajectory of length T will have :math:`T/\tau` counts
at time indexes
.. math::
(0 \rightarrow \tau), (\tau \rightarrow 2 \tau), ..., (((T/\tau)-1) \tau \rightarrow T)
connectivity : str, optional
Connectivity mode. Three methods are intended (currently only
'largest' is implemented)
* 'largest' : The active set is the largest reversibly
connected set. All estimation will be done on this subset
and all quantities (transition matrix, stationary
distribution, etc) are only defined on this subset and are
correspondingly smaller than the full set of states
* 'all' : The active set is the full set of states. Estimation
will be conducted on each reversibly connected set
separately. That means the transition matrix will decompose
into disconnected submatrices, the stationary vector is only
defined within subsets, etc. Currently not implemented.
* 'none' : The active set is the full set of
states. Estimation will be conducted on the full set of
states without ensuring connectivity. This only permits
nonreversible estimation. Currently not implemented.
dt_traj : str, optional
Description of the physical time corresponding to the lag. May
be used by analysis algorithms such as plotting tools to
pretty-print the axes. By default '1 step', i.e. there is no
physical time unit. Specify by a number, whitespace and
unit. Permitted units are (* is an arbitrary string):
* 'fs', 'femtosecond*'
* 'ps', 'picosecond*'
* 'ns', 'nanosecond*'
* 'us', 'microsecond*'
* 'ms', 'millisecond*'
* 's', 'second*'
maxiter : int, optional
Optional parameter with specifies the maximum number of
updates for Lagrange multiplier estimation.
eps : float, optional
Additional convergence criterion used when some experimental data
are outside the support of the simulation. The value of the eps
parameter is the threshold of the relative change in the predicted
observables as a function of fixed-point iteration:
$$ \mathrm{eps} > \frac{\mid o_{\mathrm{pred}}^{(i+1)}-o_{\mathrm{pred}}^{(i)}\mid }{\sigma}. $$
maxcache : int, optional
Parameter which specifies the maximum size of cache used
when performing estimation of AMM, in megabytes.
Returns
-------
amm : :class:`AugmentedMarkovModel <pyemma.msm.AugmentedMarkovModel>`
Estimator object containing the AMM and estimation information.
See also
--------
AugmentedMarkovModel
An AMM object that has been estimated from data
.. autoclass:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:attributes:
References
----------
.. [1] Olsson S, Wu H, Paul F, Clementi C, Noe F "Combining experimental and simulation data
of molecular processes via augmented Markov models" PNAS (2017), 114(31), pp. 8265-8270
doi: 10.1073/pnas.1704803114 | Below is the the instruction that describes the task:
### Input:
r""" Estimates an Augmented Markov model from discrete trajectories and experimental data
Returns a :class:`AugmentedMarkovModel` that
contains the estimated transition matrix and allows to compute a
large number of quantities related to Markov models.
Parameters
----------
dtrajs : list containing ndarrays(dtype=int) or ndarray(n, dtype=int)
discrete trajectories, stored as integer ndarrays (arbitrary size)
or a single ndarray for only one trajectory.
ftrajs : list of trajectories of microscopic observables. Has to have
the same shape (number of trajectories and timesteps) as dtrajs.
Each timestep in each trajectory should match the shape of m and sigma, k.
lag : int
lag time at which transitions are counted and the transition matrix is
estimated.
m : ndarray(k)
Experimental averages.
sigmas : ndarray(k)
Standard error for each experimental observable.
count_mode : str, optional, default='sliding'
mode to obtain count matrices from discrete trajectories. Should be
one of:
* 'sliding' : A trajectory of length T will have :math:`T-\tau` counts
at time indexes
.. math::
(0 \rightarrow \tau), (1 \rightarrow \tau+1), ..., (T-\tau-1 \rightarrow T-1)
* 'effective' : Uses an estimate of the transition counts that are
statistically uncorrelated. Recommended when used with a
Bayesian MSM.
* 'sample' : A trajectory of length T will have :math:`T/\tau` counts
at time indexes
.. math::
(0 \rightarrow \tau), (\tau \rightarrow 2 \tau), ..., (((T/\tau)-1) \tau \rightarrow T)
connectivity : str, optional
Connectivity mode. Three methods are intended (currently only
'largest' is implemented)
* 'largest' : The active set is the largest reversibly
connected set. All estimation will be done on this subset
and all quantities (transition matrix, stationary
distribution, etc) are only defined on this subset and are
correspondingly smaller than the full set of states
* 'all' : The active set is the full set of states. Estimation
will be conducted on each reversibly connected set
separately. That means the transition matrix will decompose
into disconnected submatrices, the stationary vector is only
defined within subsets, etc. Currently not implemented.
* 'none' : The active set is the full set of
states. Estimation will be conducted on the full set of
states without ensuring connectivity. This only permits
nonreversible estimation. Currently not implemented.
dt_traj : str, optional
Description of the physical time corresponding to the lag. May
be used by analysis algorithms such as plotting tools to
pretty-print the axes. By default '1 step', i.e. there is no
physical time unit. Specify by a number, whitespace and
unit. Permitted units are (* is an arbitrary string):
* 'fs', 'femtosecond*'
* 'ps', 'picosecond*'
* 'ns', 'nanosecond*'
* 'us', 'microsecond*'
* 'ms', 'millisecond*'
* 's', 'second*'
maxiter : int, optional
Optional parameter with specifies the maximum number of
updates for Lagrange multiplier estimation.
eps : float, optional
Additional convergence criterion used when some experimental data
are outside the support of the simulation. The value of the eps
parameter is the threshold of the relative change in the predicted
observables as a function of fixed-point iteration:
$$ \mathrm{eps} > \frac{\mid o_{\mathrm{pred}}^{(i+1)}-o_{\mathrm{pred}}^{(i)}\mid }{\sigma}. $$
maxcache : int, optional
Parameter which specifies the maximum size of cache used
when performing estimation of AMM, in megabytes.
Returns
-------
amm : :class:`AugmentedMarkovModel <pyemma.msm.AugmentedMarkovModel>`
Estimator object containing the AMM and estimation information.
See also
--------
AugmentedMarkovModel
An AMM object that has been estimated from data
.. autoclass:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:attributes:
References
----------
.. [1] Olsson S, Wu H, Paul F, Clementi C, Noe F "Combining experimental and simulation data
of molecular processes via augmented Markov models" PNAS (2017), 114(31), pp. 8265-8270
doi: 10.1073/pnas.1704803114
### Response:
def estimate_augmented_markov_model(dtrajs, ftrajs, lag, m, sigmas,
count_mode='sliding', connectivity='largest',
dt_traj='1 step', maxiter=1000000, eps=0.05, maxcache=3000):
r""" Estimates an Augmented Markov model from discrete trajectories and experimental data
Returns a :class:`AugmentedMarkovModel` that
contains the estimated transition matrix and allows to compute a
large number of quantities related to Markov models.
Parameters
----------
dtrajs : list containing ndarrays(dtype=int) or ndarray(n, dtype=int)
discrete trajectories, stored as integer ndarrays (arbitrary size)
or a single ndarray for only one trajectory.
ftrajs : list of trajectories of microscopic observables. Has to have
the same shape (number of trajectories and timesteps) as dtrajs.
Each timestep in each trajectory should match the shape of m and sigma, k.
lag : int
lag time at which transitions are counted and the transition matrix is
estimated.
m : ndarray(k)
Experimental averages.
sigmas : ndarray(k)
Standard error for each experimental observable.
count_mode : str, optional, default='sliding'
mode to obtain count matrices from discrete trajectories. Should be
one of:
* 'sliding' : A trajectory of length T will have :math:`T-\tau` counts
at time indexes
.. math::
(0 \rightarrow \tau), (1 \rightarrow \tau+1), ..., (T-\tau-1 \rightarrow T-1)
* 'effective' : Uses an estimate of the transition counts that are
statistically uncorrelated. Recommended when used with a
Bayesian MSM.
* 'sample' : A trajectory of length T will have :math:`T/\tau` counts
at time indexes
.. math::
(0 \rightarrow \tau), (\tau \rightarrow 2 \tau), ..., (((T/\tau)-1) \tau \rightarrow T)
connectivity : str, optional
Connectivity mode. Three methods are intended (currently only
'largest' is implemented)
* 'largest' : The active set is the largest reversibly
connected set. All estimation will be done on this subset
and all quantities (transition matrix, stationary
distribution, etc) are only defined on this subset and are
correspondingly smaller than the full set of states
* 'all' : The active set is the full set of states. Estimation
will be conducted on each reversibly connected set
separately. That means the transition matrix will decompose
into disconnected submatrices, the stationary vector is only
defined within subsets, etc. Currently not implemented.
* 'none' : The active set is the full set of
states. Estimation will be conducted on the full set of
states without ensuring connectivity. This only permits
nonreversible estimation. Currently not implemented.
dt_traj : str, optional
Description of the physical time corresponding to the lag. May
be used by analysis algorithms such as plotting tools to
pretty-print the axes. By default '1 step', i.e. there is no
physical time unit. Specify by a number, whitespace and
unit. Permitted units are (* is an arbitrary string):
* 'fs', 'femtosecond*'
* 'ps', 'picosecond*'
* 'ns', 'nanosecond*'
* 'us', 'microsecond*'
* 'ms', 'millisecond*'
* 's', 'second*'
maxiter : int, optional
Optional parameter with specifies the maximum number of
updates for Lagrange multiplier estimation.
eps : float, optional
Additional convergence criterion used when some experimental data
are outside the support of the simulation. The value of the eps
parameter is the threshold of the relative change in the predicted
observables as a function of fixed-point iteration:
$$ \mathrm{eps} > \frac{\mid o_{\mathrm{pred}}^{(i+1)}-o_{\mathrm{pred}}^{(i)}\mid }{\sigma}. $$
maxcache : int, optional
Parameter which specifies the maximum size of cache used
when performing estimation of AMM, in megabytes.
Returns
-------
amm : :class:`AugmentedMarkovModel <pyemma.msm.AugmentedMarkovModel>`
Estimator object containing the AMM and estimation information.
See also
--------
AugmentedMarkovModel
An AMM object that has been estimated from data
.. autoclass:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:attributes:
References
----------
.. [1] Olsson S, Wu H, Paul F, Clementi C, Noe F "Combining experimental and simulation data
of molecular processes via augmented Markov models" PNAS (2017), 114(31), pp. 8265-8270
doi: 10.1073/pnas.1704803114
"""
# check input
if _np.all(sigmas>0):
_w = 1./(2*sigmas**2.)
else:
raise ValueError('Zero or negative standard errors supplied. Please revise input')
if ftrajs[0].ndim < 2:
raise ValueError("Supplied feature trajectories have inappropriate dimensions (%d) should be atleast 2."%ftrajs[0].ndim)
if len(dtrajs) != len(ftrajs):
raise ValueError("A different number of dtrajs and ftrajs were supplied as input. They must have exactly a one-to-one correspondence.")
elif not _np.all([len(dt)==len(ft) for dt,ft in zip(dtrajs, ftrajs)]):
raise ValueError("One or more supplied dtraj-ftraj pairs do not have the same length.")
else:
# MAKE E matrix
dta = _np.concatenate(dtrajs)
fta = _np.concatenate(ftrajs)
all_markov_states = set(dta)
_E = _np.zeros((len(all_markov_states), fta.shape[1]))
for i, s in enumerate(all_markov_states):
_E[i, :] = fta[_np.where(dta == s)].mean(axis = 0)
# transition matrix estimator
mlamm = _ML_AMM(lag=lag, count_mode=count_mode,
connectivity=connectivity,
dt_traj=dt_traj, maxiter=maxiter, max_cache=maxcache,
E=_E, w=_w, m=m)
# estimate and return
return mlamm.estimate(dtrajs) |
def init(self):
"""Init the connection to the Cassandra server."""
if not self.export_enable:
return None
# if username and/or password are not set the connection will try to connect with no auth
auth_provider = PlainTextAuthProvider(
username=self.username, password=self.password)
# Cluster
try:
cluster = Cluster([self.host],
port=int(self.port),
protocol_version=int(self.protocol_version),
auth_provider=auth_provider)
session = cluster.connect()
except Exception as e:
logger.critical("Cannot connect to Cassandra cluster '%s:%s' (%s)" % (self.host, self.port, e))
sys.exit(2)
# Keyspace
try:
session.set_keyspace(self.keyspace)
except InvalidRequest as e:
logger.info("Create keyspace {} on the Cassandra cluster".format(self.keyspace))
c = "CREATE KEYSPACE %s WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '%s' }" % (self.keyspace, self.replication_factor)
session.execute(c)
session.set_keyspace(self.keyspace)
logger.info(
"Stats will be exported to Cassandra cluster {} ({}) in keyspace {}".format(
cluster.metadata.cluster_name, cluster.metadata.all_hosts(), self.keyspace))
# Table
try:
session.execute("CREATE TABLE %s (plugin text, time timeuuid, stat map<text,float>, PRIMARY KEY (plugin, time)) WITH CLUSTERING ORDER BY (time DESC)" % self.table)
except Exception:
logger.debug("Cassandra table %s already exist" % self.table)
return cluster, session | Init the connection to the Cassandra server. | Below is the the instruction that describes the task:
### Input:
Init the connection to the Cassandra server.
### Response:
def init(self):
"""Init the connection to the Cassandra server."""
if not self.export_enable:
return None
# if username and/or password are not set the connection will try to connect with no auth
auth_provider = PlainTextAuthProvider(
username=self.username, password=self.password)
# Cluster
try:
cluster = Cluster([self.host],
port=int(self.port),
protocol_version=int(self.protocol_version),
auth_provider=auth_provider)
session = cluster.connect()
except Exception as e:
logger.critical("Cannot connect to Cassandra cluster '%s:%s' (%s)" % (self.host, self.port, e))
sys.exit(2)
# Keyspace
try:
session.set_keyspace(self.keyspace)
except InvalidRequest as e:
logger.info("Create keyspace {} on the Cassandra cluster".format(self.keyspace))
c = "CREATE KEYSPACE %s WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '%s' }" % (self.keyspace, self.replication_factor)
session.execute(c)
session.set_keyspace(self.keyspace)
logger.info(
"Stats will be exported to Cassandra cluster {} ({}) in keyspace {}".format(
cluster.metadata.cluster_name, cluster.metadata.all_hosts(), self.keyspace))
# Table
try:
session.execute("CREATE TABLE %s (plugin text, time timeuuid, stat map<text,float>, PRIMARY KEY (plugin, time)) WITH CLUSTERING ORDER BY (time DESC)" % self.table)
except Exception:
logger.debug("Cassandra table %s already exist" % self.table)
return cluster, session |
def ParseBookmarkAnnotationRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a bookmark annotation row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = FirefoxPlacesBookmarkAnnotationEventData()
event_data.content = self._GetRowValue(query_hash, row, 'content')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.url = self._GetRowValue(query_hash, row, 'url')
timestamp = self._GetRowValue(query_hash, row, 'dateAdded')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastModified')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a bookmark annotation row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row. | Below is the the instruction that describes the task:
### Input:
Parses a bookmark annotation row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
### Response:
def ParseBookmarkAnnotationRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a bookmark annotation row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = FirefoxPlacesBookmarkAnnotationEventData()
event_data.content = self._GetRowValue(query_hash, row, 'content')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.url = self._GetRowValue(query_hash, row, 'url')
timestamp = self._GetRowValue(query_hash, row, 'dateAdded')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastModified')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def _route(self, action, method):
"""
Given an action method, generates a route for it.
"""
# First thing, determine the path for the method
path = method._wsgi_path
methods = None
if path is None:
map_rule = self.wsgi_method_map.get(method.__name__)
if map_rule is None:
# Can't connect this method
LOG.warning("No path specified for action method %s() of "
"resource %s" % (method.__name__, self.wsgi_name))
return
# Compute the path and the method list
path = utils.norm_path(map_rule[0] % self.wsgi_name)
methods = map_rule[1]
# Compute route name
name = '%s_%s' % (self.wsgi_name, action)
# Set up path
path = getattr(self, 'wsgi_path_prefix', '') + path
# Build up the conditions
conditions = {}
if hasattr(method, '_wsgi_methods'):
conditions['method'] = methods if methods else method._wsgi_methods
if hasattr(method, '_wsgi_condition'):
conditions['function'] = method._wsgi_condition
# Create the route
self.wsgi_mapper.connect(name, path,
controller=self,
action=action,
conditions=conditions,
**getattr(method, '_wsgi_keywords', {})) | Given an action method, generates a route for it. | Below is the the instruction that describes the task:
### Input:
Given an action method, generates a route for it.
### Response:
def _route(self, action, method):
"""
Given an action method, generates a route for it.
"""
# First thing, determine the path for the method
path = method._wsgi_path
methods = None
if path is None:
map_rule = self.wsgi_method_map.get(method.__name__)
if map_rule is None:
# Can't connect this method
LOG.warning("No path specified for action method %s() of "
"resource %s" % (method.__name__, self.wsgi_name))
return
# Compute the path and the method list
path = utils.norm_path(map_rule[0] % self.wsgi_name)
methods = map_rule[1]
# Compute route name
name = '%s_%s' % (self.wsgi_name, action)
# Set up path
path = getattr(self, 'wsgi_path_prefix', '') + path
# Build up the conditions
conditions = {}
if hasattr(method, '_wsgi_methods'):
conditions['method'] = methods if methods else method._wsgi_methods
if hasattr(method, '_wsgi_condition'):
conditions['function'] = method._wsgi_condition
# Create the route
self.wsgi_mapper.connect(name, path,
controller=self,
action=action,
conditions=conditions,
**getattr(method, '_wsgi_keywords', {})) |
def register_tool(self, tool, name=None):
"""Check tool and see if it is installed in the local cpp toolchain.
All cpp tasks should request their tools using this method. Tools are validated
and cached for quick lookup.
:param string tool: Name or path of program tool, eg 'g++'
:param string name: Logical name of tool, eg 'compiler'. If not supplied defaults to basename
of `tool`
"""
name = name or os.path.basename(tool)
if name in self._validated_tools:
return self._validated_tools[name]
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
tool_path = which(tool)
if tool_path is None:
raise self.Error('Failed to locate {0}. Please install.'.format(tool))
self._validated_tools[name] = tool_path
return tool_path | Check tool and see if it is installed in the local cpp toolchain.
All cpp tasks should request their tools using this method. Tools are validated
and cached for quick lookup.
:param string tool: Name or path of program tool, eg 'g++'
:param string name: Logical name of tool, eg 'compiler'. If not supplied defaults to basename
of `tool` | Below is the the instruction that describes the task:
### Input:
Check tool and see if it is installed in the local cpp toolchain.
All cpp tasks should request their tools using this method. Tools are validated
and cached for quick lookup.
:param string tool: Name or path of program tool, eg 'g++'
:param string name: Logical name of tool, eg 'compiler'. If not supplied defaults to basename
of `tool`
### Response:
def register_tool(self, tool, name=None):
"""Check tool and see if it is installed in the local cpp toolchain.
All cpp tasks should request their tools using this method. Tools are validated
and cached for quick lookup.
:param string tool: Name or path of program tool, eg 'g++'
:param string name: Logical name of tool, eg 'compiler'. If not supplied defaults to basename
of `tool`
"""
name = name or os.path.basename(tool)
if name in self._validated_tools:
return self._validated_tools[name]
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
tool_path = which(tool)
if tool_path is None:
raise self.Error('Failed to locate {0}. Please install.'.format(tool))
self._validated_tools[name] = tool_path
return tool_path |
def setExpertLevel(self):
"""
Set expert level
"""
g = get_root(self).globals
level = g.cpars['expert_level']
# first define which buttons are visible
if level == 0:
# simple layout
for button in self.all_buttons:
button.grid_forget()
# then re-grid the two simple ones
self.powerOn.grid(row=0, column=0)
self.powerOff.grid(row=0, column=1)
elif level == 1 or level == 2:
# first remove all possible buttons
for button in self.all_buttons:
button.grid_forget()
# restore detailed layout
self.cldcOn.grid(row=0, column=1)
self.cldcOff.grid(row=1, column=1)
self.seqStart.grid(row=2, column=1)
self.seqStop.grid(row=3, column=1)
self.ngcOnline.grid(row=0, column=0)
self.ngcOff.grid(row=1, column=0)
self.ngcStandby.grid(row=2, column=0)
self.ngcReset.grid(row=3, column=0)
# now set whether buttons are permanently enabled or not
if level == 0 or level == 1:
for button in self.all_buttons:
button.setNonExpert()
elif level == 2:
for button in self.all_buttons:
button.setExpert() | Set expert level | Below is the the instruction that describes the task:
### Input:
Set expert level
### Response:
def setExpertLevel(self):
"""
Set expert level
"""
g = get_root(self).globals
level = g.cpars['expert_level']
# first define which buttons are visible
if level == 0:
# simple layout
for button in self.all_buttons:
button.grid_forget()
# then re-grid the two simple ones
self.powerOn.grid(row=0, column=0)
self.powerOff.grid(row=0, column=1)
elif level == 1 or level == 2:
# first remove all possible buttons
for button in self.all_buttons:
button.grid_forget()
# restore detailed layout
self.cldcOn.grid(row=0, column=1)
self.cldcOff.grid(row=1, column=1)
self.seqStart.grid(row=2, column=1)
self.seqStop.grid(row=3, column=1)
self.ngcOnline.grid(row=0, column=0)
self.ngcOff.grid(row=1, column=0)
self.ngcStandby.grid(row=2, column=0)
self.ngcReset.grid(row=3, column=0)
# now set whether buttons are permanently enabled or not
if level == 0 or level == 1:
for button in self.all_buttons:
button.setNonExpert()
elif level == 2:
for button in self.all_buttons:
button.setExpert() |
def find_contexts(self, in_request=None, in_resolve=None):
"""Find contexts in the suite based on search criteria.
Args:
in_request (str): Match contexts that contain the given package in
their request.
in_resolve (str or `Requirement`): Match contexts that contain the
given package in their resolve. You can also supply a conflict
requirement - '!foo' will match any contexts whos resolve does
not contain any version of package 'foo'.
Returns:
List of context names that match the search criteria.
"""
names = self.context_names
if in_request:
def _in_request(name):
context = self.context(name)
packages = set(x.name for x in context.requested_packages(True))
return (in_request in packages)
names = [x for x in names if _in_request(x)]
if in_resolve:
if isinstance(in_resolve, basestring):
in_resolve = PackageRequest(in_resolve)
def _in_resolve(name):
context = self.context(name)
variant = context.get_resolved_package(in_resolve.name)
if variant:
overlap = (variant.version in in_resolve.range)
return ((in_resolve.conflict and not overlap)
or (overlap and not in_resolve.conflict))
else:
return in_resolve.conflict
names = [x for x in names if _in_resolve(x)]
return names | Find contexts in the suite based on search criteria.
Args:
in_request (str): Match contexts that contain the given package in
their request.
in_resolve (str or `Requirement`): Match contexts that contain the
given package in their resolve. You can also supply a conflict
requirement - '!foo' will match any contexts whos resolve does
not contain any version of package 'foo'.
Returns:
List of context names that match the search criteria. | Below is the the instruction that describes the task:
### Input:
Find contexts in the suite based on search criteria.
Args:
in_request (str): Match contexts that contain the given package in
their request.
in_resolve (str or `Requirement`): Match contexts that contain the
given package in their resolve. You can also supply a conflict
requirement - '!foo' will match any contexts whos resolve does
not contain any version of package 'foo'.
Returns:
List of context names that match the search criteria.
### Response:
def find_contexts(self, in_request=None, in_resolve=None):
"""Find contexts in the suite based on search criteria.
Args:
in_request (str): Match contexts that contain the given package in
their request.
in_resolve (str or `Requirement`): Match contexts that contain the
given package in their resolve. You can also supply a conflict
requirement - '!foo' will match any contexts whos resolve does
not contain any version of package 'foo'.
Returns:
List of context names that match the search criteria.
"""
names = self.context_names
if in_request:
def _in_request(name):
context = self.context(name)
packages = set(x.name for x in context.requested_packages(True))
return (in_request in packages)
names = [x for x in names if _in_request(x)]
if in_resolve:
if isinstance(in_resolve, basestring):
in_resolve = PackageRequest(in_resolve)
def _in_resolve(name):
context = self.context(name)
variant = context.get_resolved_package(in_resolve.name)
if variant:
overlap = (variant.version in in_resolve.range)
return ((in_resolve.conflict and not overlap)
or (overlap and not in_resolve.conflict))
else:
return in_resolve.conflict
names = [x for x in names if _in_resolve(x)]
return names |
def clear_children(parent_to_parse, element_path=None):
"""
Clears only children (not text or attributes) from the parsed parent
or named element.
"""
element = get_element(parent_to_parse, element_path)
if element is None:
return parent_to_parse
else:
elem_txt = element.text
elem_atr = element.attrib
element.clear()
element.text = elem_txt
element.attrib = elem_atr
return element | Clears only children (not text or attributes) from the parsed parent
or named element. | Below is the the instruction that describes the task:
### Input:
Clears only children (not text or attributes) from the parsed parent
or named element.
### Response:
def clear_children(parent_to_parse, element_path=None):
"""
Clears only children (not text or attributes) from the parsed parent
or named element.
"""
element = get_element(parent_to_parse, element_path)
if element is None:
return parent_to_parse
else:
elem_txt = element.text
elem_atr = element.attrib
element.clear()
element.text = elem_txt
element.attrib = elem_atr
return element |
def build(self, client,
nobuild=False,
usecache=True,
pull=False):
"""
Drives the build of the final image - get the list of steps and execute them.
Args:
client (docker.Client): docker client object that will build the image
nobuild (bool): just create dockerfiles, don't actually build the image
usecache (bool): use docker cache, or rebuild everything from scratch?
pull (bool): try to pull new versions of repository images?
"""
if not nobuild:
self.update_source_images(client,
usecache=usecache,
pull=pull)
width = utils.get_console_width()
cprint('\n' + '='*width,
color='white', attrs=['bold'])
line = 'STARTING BUILD for "%s" (image definition "%s" from %s)\n' % (
self.targetname, self.imagename, self.steps[-1].sourcefile)
cprint(_centered(line, width), color='blue', attrs=['bold'])
for istep, step in enumerate(self.steps):
print(colored('* Step','blue'),
colored('%d/%d' % (istep+1, len(self.steps)), 'blue', attrs=['bold']),
colored('for image', color='blue'),
colored(self.imagename, color='blue', attrs=['bold']))
if not nobuild:
if step.bust_cache:
stackkey = self._get_stack_key(istep)
if stackkey in _rebuilt:
step.bust_cache = False
step.build(client, usecache=usecache)
print(colored("* Created intermediate image", 'green'),
colored(step.buildname, 'green', attrs=['bold']),
end='\n\n')
if step.bust_cache:
_rebuilt.add(stackkey)
finalimage = step.buildname
if not nobuild:
self.finalizenames(client, finalimage)
line = 'FINISHED BUILDING "%s" (image definition "%s" from %s)'%(
self.targetname, self.imagename, self.steps[-1].sourcefile)
cprint(_centered(line, width),
color='green', attrs=['bold'])
cprint('=' * width, color='white', attrs=['bold'], end='\n\n') | Drives the build of the final image - get the list of steps and execute them.
Args:
client (docker.Client): docker client object that will build the image
nobuild (bool): just create dockerfiles, don't actually build the image
usecache (bool): use docker cache, or rebuild everything from scratch?
pull (bool): try to pull new versions of repository images? | Below is the the instruction that describes the task:
### Input:
Drives the build of the final image - get the list of steps and execute them.
Args:
client (docker.Client): docker client object that will build the image
nobuild (bool): just create dockerfiles, don't actually build the image
usecache (bool): use docker cache, or rebuild everything from scratch?
pull (bool): try to pull new versions of repository images?
### Response:
def build(self, client,
nobuild=False,
usecache=True,
pull=False):
"""
Drives the build of the final image - get the list of steps and execute them.
Args:
client (docker.Client): docker client object that will build the image
nobuild (bool): just create dockerfiles, don't actually build the image
usecache (bool): use docker cache, or rebuild everything from scratch?
pull (bool): try to pull new versions of repository images?
"""
if not nobuild:
self.update_source_images(client,
usecache=usecache,
pull=pull)
width = utils.get_console_width()
cprint('\n' + '='*width,
color='white', attrs=['bold'])
line = 'STARTING BUILD for "%s" (image definition "%s" from %s)\n' % (
self.targetname, self.imagename, self.steps[-1].sourcefile)
cprint(_centered(line, width), color='blue', attrs=['bold'])
for istep, step in enumerate(self.steps):
print(colored('* Step','blue'),
colored('%d/%d' % (istep+1, len(self.steps)), 'blue', attrs=['bold']),
colored('for image', color='blue'),
colored(self.imagename, color='blue', attrs=['bold']))
if not nobuild:
if step.bust_cache:
stackkey = self._get_stack_key(istep)
if stackkey in _rebuilt:
step.bust_cache = False
step.build(client, usecache=usecache)
print(colored("* Created intermediate image", 'green'),
colored(step.buildname, 'green', attrs=['bold']),
end='\n\n')
if step.bust_cache:
_rebuilt.add(stackkey)
finalimage = step.buildname
if not nobuild:
self.finalizenames(client, finalimage)
line = 'FINISHED BUILDING "%s" (image definition "%s" from %s)'%(
self.targetname, self.imagename, self.steps[-1].sourcefile)
cprint(_centered(line, width),
color='green', attrs=['bold'])
cprint('=' * width, color='white', attrs=['bold'], end='\n\n') |
def ansi_split(text, _re=re.compile(u"(\x1b\\[(\\d*;?)*\\S)")):
"""Yields (is_ansi, text)"""
for part in _re.split(text):
if part:
yield (bool(_re.match(part)), part) | Yields (is_ansi, text) | Below is the the instruction that describes the task:
### Input:
Yields (is_ansi, text)
### Response:
def ansi_split(text, _re=re.compile(u"(\x1b\\[(\\d*;?)*\\S)")):
"""Yields (is_ansi, text)"""
for part in _re.split(text):
if part:
yield (bool(_re.match(part)), part) |
def _check_completion(task, mark_incomplete, clear, stats, visited, to_clear):
"""Core recursion function for :func:`check_completion`, see there for more documentation
Args:
task (luigi.Task): task instance
mark_incomplete (bool): see :func:`check_completion`
clear (bool): see :func:`check_completion`
stats (dict): task checking statistics (counts of incomplete, to be cleared, ...)
visited (dict): cache for visited tasks: key = task name + parameter string, value = completion status
to_clear (dict): dict of dicts of tasks to be cleared, key = task id,
value = {task: task instance object, required_by: set of task IDs that this task is required by}
"""
# is this task (recursively) complete?
task_complete = task.complete()
is_complete = task_complete
# task identification: task name plus parameters
task_id = get_task_name(task) + ' ' + get_task_param_string(task)
# check any requirements
for req in task.requires():
# task/parameter ID string to identify this task instance
req_id = get_task_name(req) + ' ' + get_task_param_string(req)
# skip recursion on already visited tasks, get completion status from cache
if req_id in visited:
req_complete = visited[req_id]
else:
req_complete, _ = _check_completion(task=req,
mark_incomplete=mark_incomplete,
clear=clear,
stats=stats,
visited=visited,
to_clear=to_clear)
visited[req_id] = req_complete
# add any incomplete requirements to the list of tasks to clear, noting the current task as parent (required by)
if clear and not req_complete:
clear_entry = to_clear.setdefault(req_id, dict(task=req, required_by=set()))
clear_entry['required_by'].add(task_id)
is_complete &= req_complete
if not is_complete:
if task_complete:
config.logger.info("Task complete but requirements incomplete: " + task_id)
else:
config.logger.info("Task incomplete: " + task_id)
_increment_stats(stats, 'Incomplete tasks')
if mark_incomplete:
if isinstance(task, ORMTask):
task.mark_incomplete()
_increment_stats(stats, 'Marked incomplete')
config.logger.info("Marked task incomplete: " + task_id)
else:
config.logger.info('Cannot mark task incomplete, not an ORMTask: ' + task_id)
else:
_increment_stats(stats, 'Complete tasks')
config.logger.debug("Task complete: " + task_id)
# if we want to clear and the current task is not in the dict of tasks to clear,
# it is the root task, add it with no parent (required by) tasks
if clear and not is_complete and task_id not in to_clear:
to_clear[task_id] = dict(task=task, required_by=set())
return is_complete, stats | Core recursion function for :func:`check_completion`, see there for more documentation
Args:
task (luigi.Task): task instance
mark_incomplete (bool): see :func:`check_completion`
clear (bool): see :func:`check_completion`
stats (dict): task checking statistics (counts of incomplete, to be cleared, ...)
visited (dict): cache for visited tasks: key = task name + parameter string, value = completion status
to_clear (dict): dict of dicts of tasks to be cleared, key = task id,
value = {task: task instance object, required_by: set of task IDs that this task is required by} | Below is the the instruction that describes the task:
### Input:
Core recursion function for :func:`check_completion`, see there for more documentation
Args:
task (luigi.Task): task instance
mark_incomplete (bool): see :func:`check_completion`
clear (bool): see :func:`check_completion`
stats (dict): task checking statistics (counts of incomplete, to be cleared, ...)
visited (dict): cache for visited tasks: key = task name + parameter string, value = completion status
to_clear (dict): dict of dicts of tasks to be cleared, key = task id,
value = {task: task instance object, required_by: set of task IDs that this task is required by}
### Response:
def _check_completion(task, mark_incomplete, clear, stats, visited, to_clear):
"""Core recursion function for :func:`check_completion`, see there for more documentation
Args:
task (luigi.Task): task instance
mark_incomplete (bool): see :func:`check_completion`
clear (bool): see :func:`check_completion`
stats (dict): task checking statistics (counts of incomplete, to be cleared, ...)
visited (dict): cache for visited tasks: key = task name + parameter string, value = completion status
to_clear (dict): dict of dicts of tasks to be cleared, key = task id,
value = {task: task instance object, required_by: set of task IDs that this task is required by}
"""
# is this task (recursively) complete?
task_complete = task.complete()
is_complete = task_complete
# task identification: task name plus parameters
task_id = get_task_name(task) + ' ' + get_task_param_string(task)
# check any requirements
for req in task.requires():
# task/parameter ID string to identify this task instance
req_id = get_task_name(req) + ' ' + get_task_param_string(req)
# skip recursion on already visited tasks, get completion status from cache
if req_id in visited:
req_complete = visited[req_id]
else:
req_complete, _ = _check_completion(task=req,
mark_incomplete=mark_incomplete,
clear=clear,
stats=stats,
visited=visited,
to_clear=to_clear)
visited[req_id] = req_complete
# add any incomplete requirements to the list of tasks to clear, noting the current task as parent (required by)
if clear and not req_complete:
clear_entry = to_clear.setdefault(req_id, dict(task=req, required_by=set()))
clear_entry['required_by'].add(task_id)
is_complete &= req_complete
if not is_complete:
if task_complete:
config.logger.info("Task complete but requirements incomplete: " + task_id)
else:
config.logger.info("Task incomplete: " + task_id)
_increment_stats(stats, 'Incomplete tasks')
if mark_incomplete:
if isinstance(task, ORMTask):
task.mark_incomplete()
_increment_stats(stats, 'Marked incomplete')
config.logger.info("Marked task incomplete: " + task_id)
else:
config.logger.info('Cannot mark task incomplete, not an ORMTask: ' + task_id)
else:
_increment_stats(stats, 'Complete tasks')
config.logger.debug("Task complete: " + task_id)
# if we want to clear and the current task is not in the dict of tasks to clear,
# it is the root task, add it with no parent (required by) tasks
if clear and not is_complete and task_id not in to_clear:
to_clear[task_id] = dict(task=task, required_by=set())
return is_complete, stats |
def get_profile(fqa):
""" Given a fully-qualified username (username.namespace)
get the data associated with that fqu.
Return cached entries, if possible.
"""
profile_expired_grace = False
fqa = fqa.lower()
try:
try:
res = blockstack.lib.client.resolve_profile(
fqa, include_name_record=True, hostport=blockstack_indexer_url)
except ValueError:
# invalid name
res = {'error': 'Invalid name', 'status_code': 400}
if 'error' in res:
log.error('Error from profile.get_profile: {}'.format(res['error']))
if "no user record hash defined" in res['error']:
res['status_code'] = 404
if "Failed to load user profile" in res['error']:
res['status_code'] = 404
if res.get('http_status'):
# pass along
res['status_code'] = res['http_status']
del res['http_status']
return res
log.warn(json.dumps(res['name_record']))
profile = res['profile']
zonefile = res['zonefile']
public_key = res.get('public_key', None)
address = res['name_record']['address']
if 'expired' in res['name_record'] and res['name_record']['expired']:
profile_expired_grace = True
except Exception as e:
log.exception(e)
abort(500, json.dumps({'error': 'Server error fetching profile'}))
if profile is None or 'error' in zonefile:
log.error("{}".format(zonefile))
abort(404)
prof_data = {'response' : profile}
data = format_profile(prof_data['response'], fqa, zonefile, address, public_key)
if profile_expired_grace:
data['expired'] = (
'This name has expired! It is still in the renewal grace period, ' +
'but must be renewed or it will eventually expire and be available' +
' for others to register.')
return data | Given a fully-qualified username (username.namespace)
get the data associated with that fqu.
Return cached entries, if possible. | Below is the the instruction that describes the task:
### Input:
Given a fully-qualified username (username.namespace)
get the data associated with that fqu.
Return cached entries, if possible.
### Response:
def get_profile(fqa):
""" Given a fully-qualified username (username.namespace)
get the data associated with that fqu.
Return cached entries, if possible.
"""
profile_expired_grace = False
fqa = fqa.lower()
try:
try:
res = blockstack.lib.client.resolve_profile(
fqa, include_name_record=True, hostport=blockstack_indexer_url)
except ValueError:
# invalid name
res = {'error': 'Invalid name', 'status_code': 400}
if 'error' in res:
log.error('Error from profile.get_profile: {}'.format(res['error']))
if "no user record hash defined" in res['error']:
res['status_code'] = 404
if "Failed to load user profile" in res['error']:
res['status_code'] = 404
if res.get('http_status'):
# pass along
res['status_code'] = res['http_status']
del res['http_status']
return res
log.warn(json.dumps(res['name_record']))
profile = res['profile']
zonefile = res['zonefile']
public_key = res.get('public_key', None)
address = res['name_record']['address']
if 'expired' in res['name_record'] and res['name_record']['expired']:
profile_expired_grace = True
except Exception as e:
log.exception(e)
abort(500, json.dumps({'error': 'Server error fetching profile'}))
if profile is None or 'error' in zonefile:
log.error("{}".format(zonefile))
abort(404)
prof_data = {'response' : profile}
data = format_profile(prof_data['response'], fqa, zonefile, address, public_key)
if profile_expired_grace:
data['expired'] = (
'This name has expired! It is still in the renewal grace period, ' +
'but must be renewed or it will eventually expire and be available' +
' for others to register.')
return data |
def frequency2fractional(frequency, mean_frequency=-1):
""" Convert frequency in Hz to fractional frequency
Parameters
----------
frequency: np.array
Data array of frequency in Hz
mean_frequency: float
(optional) The nominal mean frequency, in Hz
if omitted, defaults to mean frequency=np.mean(frequency)
Returns
-------
y:
Data array of fractional frequency
"""
if mean_frequency == -1:
mu = np.mean(frequency)
else:
mu = mean_frequency
y = [(x-mu)/mu for x in frequency]
return y | Convert frequency in Hz to fractional frequency
Parameters
----------
frequency: np.array
Data array of frequency in Hz
mean_frequency: float
(optional) The nominal mean frequency, in Hz
if omitted, defaults to mean frequency=np.mean(frequency)
Returns
-------
y:
Data array of fractional frequency | Below is the the instruction that describes the task:
### Input:
Convert frequency in Hz to fractional frequency
Parameters
----------
frequency: np.array
Data array of frequency in Hz
mean_frequency: float
(optional) The nominal mean frequency, in Hz
if omitted, defaults to mean frequency=np.mean(frequency)
Returns
-------
y:
Data array of fractional frequency
### Response:
def frequency2fractional(frequency, mean_frequency=-1):
""" Convert frequency in Hz to fractional frequency
Parameters
----------
frequency: np.array
Data array of frequency in Hz
mean_frequency: float
(optional) The nominal mean frequency, in Hz
if omitted, defaults to mean frequency=np.mean(frequency)
Returns
-------
y:
Data array of fractional frequency
"""
if mean_frequency == -1:
mu = np.mean(frequency)
else:
mu = mean_frequency
y = [(x-mu)/mu for x in frequency]
return y |
def get_default_assets_zip_provider():
"""Opens stock TensorBoard web assets collection.
Returns:
Returns function that returns a newly opened file handle to zip file
containing static assets for stock TensorBoard, or None if webfiles.zip
could not be found. The value the callback returns must be closed. The
paths inside the zip file are considered absolute paths on the web server.
"""
path = os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))),
'webfiles.zip')
if not os.path.exists(path):
logger.warning('webfiles.zip static assets not found: %s', path)
return None
return lambda: open(path, 'rb') | Opens stock TensorBoard web assets collection.
Returns:
Returns function that returns a newly opened file handle to zip file
containing static assets for stock TensorBoard, or None if webfiles.zip
could not be found. The value the callback returns must be closed. The
paths inside the zip file are considered absolute paths on the web server. | Below is the the instruction that describes the task:
### Input:
Opens stock TensorBoard web assets collection.
Returns:
Returns function that returns a newly opened file handle to zip file
containing static assets for stock TensorBoard, or None if webfiles.zip
could not be found. The value the callback returns must be closed. The
paths inside the zip file are considered absolute paths on the web server.
### Response:
def get_default_assets_zip_provider():
"""Opens stock TensorBoard web assets collection.
Returns:
Returns function that returns a newly opened file handle to zip file
containing static assets for stock TensorBoard, or None if webfiles.zip
could not be found. The value the callback returns must be closed. The
paths inside the zip file are considered absolute paths on the web server.
"""
path = os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))),
'webfiles.zip')
if not os.path.exists(path):
logger.warning('webfiles.zip static assets not found: %s', path)
return None
return lambda: open(path, 'rb') |
def calculate(self, T, P, zs, ws, method):
r'''Method to calculate molar volume of a gas mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
Vm : float
Molar volume of the gas mixture at the given conditions, [m^3/mol]
'''
if method == SIMPLE:
Vms = [i(T, P) for i in self.VolumeGases]
return mixing_simple(zs, Vms)
elif method == IDEAL:
return ideal_gas(T, P)
elif method == EOS:
self.eos[0] = self.eos[0].to_TP_zs(T=T, P=P, zs=zs)
return self.eos[0].V_g
else:
raise Exception('Method not valid') | r'''Method to calculate molar volume of a gas mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
Vm : float
Molar volume of the gas mixture at the given conditions, [m^3/mol] | Below is the the instruction that describes the task:
### Input:
r'''Method to calculate molar volume of a gas mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
Vm : float
Molar volume of the gas mixture at the given conditions, [m^3/mol]
### Response:
def calculate(self, T, P, zs, ws, method):
r'''Method to calculate molar volume of a gas mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
Vm : float
Molar volume of the gas mixture at the given conditions, [m^3/mol]
'''
if method == SIMPLE:
Vms = [i(T, P) for i in self.VolumeGases]
return mixing_simple(zs, Vms)
elif method == IDEAL:
return ideal_gas(T, P)
elif method == EOS:
self.eos[0] = self.eos[0].to_TP_zs(T=T, P=P, zs=zs)
return self.eos[0].V_g
else:
raise Exception('Method not valid') |
def set_void_declension(self, number_type, case_type):
"""
>>> decl = DeclinableOneGender("armr", Gender.masculine)
>>> decl.declension
[]
>>> decl.set_void_declension(Number, Case)
>>> decl.declension
[['', '', '', ''], ['', '', '', '']]
:param number_type:
:param case_type:
:return:
"""
self.declension = []
for i, a_number in enumerate(number_type):
self.declension.append([])
for _ in case_type:
self.declension[i].append("") | >>> decl = DeclinableOneGender("armr", Gender.masculine)
>>> decl.declension
[]
>>> decl.set_void_declension(Number, Case)
>>> decl.declension
[['', '', '', ''], ['', '', '', '']]
:param number_type:
:param case_type:
:return: | Below is the the instruction that describes the task:
### Input:
>>> decl = DeclinableOneGender("armr", Gender.masculine)
>>> decl.declension
[]
>>> decl.set_void_declension(Number, Case)
>>> decl.declension
[['', '', '', ''], ['', '', '', '']]
:param number_type:
:param case_type:
:return:
### Response:
def set_void_declension(self, number_type, case_type):
"""
>>> decl = DeclinableOneGender("armr", Gender.masculine)
>>> decl.declension
[]
>>> decl.set_void_declension(Number, Case)
>>> decl.declension
[['', '', '', ''], ['', '', '', '']]
:param number_type:
:param case_type:
:return:
"""
self.declension = []
for i, a_number in enumerate(number_type):
self.declension.append([])
for _ in case_type:
self.declension[i].append("") |
def _log_enabled_protocols(self, flags, protocols):
"""Given a list of single character strings of 1's and 0's and a list
of protocol names. Log the status of each protocol where ``"1"`` is
enabled and ``"0"`` is disabled. The order of the lists here is
important as they need to be zipped together to create the mapping.
Then return a tuple of two lists containing the names of the enabled
and disabled protocols.
:param character: A list of single character strings of 1's and 0's
:type character: list
:param protocols: A list of protocol names.
:type protocols: list
:return: Tuple containing two lists which contain n strings.
:rtype: tuple
"""
enabled, disabled = [], []
for procol, flag in sorted(zip(protocols, flags)):
if flag == '1':
enabled.append(procol)
status = 'Enabled'
else:
disabled.append(procol)
status = 'Disabled'
message = "{0:21}: {1}".format(procol, status)
self.log.info(message)
return enabled, disabled | Given a list of single character strings of 1's and 0's and a list
of protocol names. Log the status of each protocol where ``"1"`` is
enabled and ``"0"`` is disabled. The order of the lists here is
important as they need to be zipped together to create the mapping.
Then return a tuple of two lists containing the names of the enabled
and disabled protocols.
:param character: A list of single character strings of 1's and 0's
:type character: list
:param protocols: A list of protocol names.
:type protocols: list
:return: Tuple containing two lists which contain n strings.
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Given a list of single character strings of 1's and 0's and a list
of protocol names. Log the status of each protocol where ``"1"`` is
enabled and ``"0"`` is disabled. The order of the lists here is
important as they need to be zipped together to create the mapping.
Then return a tuple of two lists containing the names of the enabled
and disabled protocols.
:param character: A list of single character strings of 1's and 0's
:type character: list
:param protocols: A list of protocol names.
:type protocols: list
:return: Tuple containing two lists which contain n strings.
:rtype: tuple
### Response:
def _log_enabled_protocols(self, flags, protocols):
"""Given a list of single character strings of 1's and 0's and a list
of protocol names. Log the status of each protocol where ``"1"`` is
enabled and ``"0"`` is disabled. The order of the lists here is
important as they need to be zipped together to create the mapping.
Then return a tuple of two lists containing the names of the enabled
and disabled protocols.
:param character: A list of single character strings of 1's and 0's
:type character: list
:param protocols: A list of protocol names.
:type protocols: list
:return: Tuple containing two lists which contain n strings.
:rtype: tuple
"""
enabled, disabled = [], []
for procol, flag in sorted(zip(protocols, flags)):
if flag == '1':
enabled.append(procol)
status = 'Enabled'
else:
disabled.append(procol)
status = 'Disabled'
message = "{0:21}: {1}".format(procol, status)
self.log.info(message)
return enabled, disabled |
def use_comparative_bin_view(self):
"""Pass through to provider ResourceBinSession.use_comparative_bin_view"""
self._bin_view = COMPARATIVE
# self._get_provider_session('resource_bin_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_comparative_bin_view()
except AttributeError:
pass | Pass through to provider ResourceBinSession.use_comparative_bin_view | Below is the the instruction that describes the task:
### Input:
Pass through to provider ResourceBinSession.use_comparative_bin_view
### Response:
def use_comparative_bin_view(self):
"""Pass through to provider ResourceBinSession.use_comparative_bin_view"""
self._bin_view = COMPARATIVE
# self._get_provider_session('resource_bin_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_comparative_bin_view()
except AttributeError:
pass |
def _contents(self):
"""Define the contents of new Infos.
transmit() -> _what() -> create_information() -> _contents().
"""
stories = [
"ghosts.md",
"cricket.md",
"moochi.md",
"outwit.md",
"raid.md",
"species.md",
"tennis.md",
"vagabond.md"
]
story = random.choice(stories)
with open("static/stimuli/{}".format(story), "r") as f:
return f.read() | Define the contents of new Infos.
transmit() -> _what() -> create_information() -> _contents(). | Below is the the instruction that describes the task:
### Input:
Define the contents of new Infos.
transmit() -> _what() -> create_information() -> _contents().
### Response:
def _contents(self):
"""Define the contents of new Infos.
transmit() -> _what() -> create_information() -> _contents().
"""
stories = [
"ghosts.md",
"cricket.md",
"moochi.md",
"outwit.md",
"raid.md",
"species.md",
"tennis.md",
"vagabond.md"
]
story = random.choice(stories)
with open("static/stimuli/{}".format(story), "r") as f:
return f.read() |
def check_file(self, filename):
# type: (str) -> bool
"""
Overrides :py:meth:`.Config.check_file`
"""
can_read = super(SecuredConfig, self).check_file(filename)
if not can_read:
return False
mode = get_stat(filename).st_mode
if (mode & stat.S_IRGRP) or (mode & stat.S_IROTH):
msg = "File %r is not secure enough. Change it's mode to 600"
self._log.warning(msg, filename)
return False
return True | Overrides :py:meth:`.Config.check_file` | Below is the the instruction that describes the task:
### Input:
Overrides :py:meth:`.Config.check_file`
### Response:
def check_file(self, filename):
# type: (str) -> bool
"""
Overrides :py:meth:`.Config.check_file`
"""
can_read = super(SecuredConfig, self).check_file(filename)
if not can_read:
return False
mode = get_stat(filename).st_mode
if (mode & stat.S_IRGRP) or (mode & stat.S_IROTH):
msg = "File %r is not secure enough. Change it's mode to 600"
self._log.warning(msg, filename)
return False
return True |
def from_hdf(cls, fp, template_hash, root=None, load_to_memory=True,
load_now=False):
"""Load a compressed waveform from the given hdf file handler.
The waveform is retrieved from:
`fp['[{root}/]compressed_waveforms/{template_hash}/{param}']`,
where `param` is the `sample_points`, `amplitude`, and `phase`.
Parameters
----------
fp : h5py.File
An open hdf file to write the compressed waveform to.
template_hash : {hash, int, str}
The id of the waveform.
root : {None, str}
Retrieve the `compressed_waveforms` group from the given string.
If `None`, `compressed_waveforms` will be assumed to be in the
top level.
load_to_memory : {True, bool}
Set the `load_to_memory` attribute to the given value in the
returned instance.
load_now : {False, bool}
Immediately load the `sample_points`/`amplitude`/`phase` to memory.
Returns
-------
CompressedWaveform
An instance of this class with parameters loaded from the hdf file.
"""
if root is None:
root = ''
else:
root = '%s/'%(root)
group = '%scompressed_waveforms/%s' %(root, str(template_hash))
fp_group = fp[group]
sample_points = fp_group['sample_points']
amp = fp_group['amplitude']
phase = fp_group['phase']
if load_now:
sample_points = sample_points[:]
amp = amp[:]
phase = phase[:]
return cls(sample_points, amp, phase,
interpolation=fp_group.attrs['interpolation'],
tolerance=fp_group.attrs['tolerance'],
mismatch=fp_group.attrs['mismatch'],
precision=fp_group.attrs['precision'],
load_to_memory=load_to_memory) | Load a compressed waveform from the given hdf file handler.
The waveform is retrieved from:
`fp['[{root}/]compressed_waveforms/{template_hash}/{param}']`,
where `param` is the `sample_points`, `amplitude`, and `phase`.
Parameters
----------
fp : h5py.File
An open hdf file to write the compressed waveform to.
template_hash : {hash, int, str}
The id of the waveform.
root : {None, str}
Retrieve the `compressed_waveforms` group from the given string.
If `None`, `compressed_waveforms` will be assumed to be in the
top level.
load_to_memory : {True, bool}
Set the `load_to_memory` attribute to the given value in the
returned instance.
load_now : {False, bool}
Immediately load the `sample_points`/`amplitude`/`phase` to memory.
Returns
-------
CompressedWaveform
An instance of this class with parameters loaded from the hdf file. | Below is the the instruction that describes the task:
### Input:
Load a compressed waveform from the given hdf file handler.
The waveform is retrieved from:
`fp['[{root}/]compressed_waveforms/{template_hash}/{param}']`,
where `param` is the `sample_points`, `amplitude`, and `phase`.
Parameters
----------
fp : h5py.File
An open hdf file to write the compressed waveform to.
template_hash : {hash, int, str}
The id of the waveform.
root : {None, str}
Retrieve the `compressed_waveforms` group from the given string.
If `None`, `compressed_waveforms` will be assumed to be in the
top level.
load_to_memory : {True, bool}
Set the `load_to_memory` attribute to the given value in the
returned instance.
load_now : {False, bool}
Immediately load the `sample_points`/`amplitude`/`phase` to memory.
Returns
-------
CompressedWaveform
An instance of this class with parameters loaded from the hdf file.
### Response:
def from_hdf(cls, fp, template_hash, root=None, load_to_memory=True,
load_now=False):
"""Load a compressed waveform from the given hdf file handler.
The waveform is retrieved from:
`fp['[{root}/]compressed_waveforms/{template_hash}/{param}']`,
where `param` is the `sample_points`, `amplitude`, and `phase`.
Parameters
----------
fp : h5py.File
An open hdf file to write the compressed waveform to.
template_hash : {hash, int, str}
The id of the waveform.
root : {None, str}
Retrieve the `compressed_waveforms` group from the given string.
If `None`, `compressed_waveforms` will be assumed to be in the
top level.
load_to_memory : {True, bool}
Set the `load_to_memory` attribute to the given value in the
returned instance.
load_now : {False, bool}
Immediately load the `sample_points`/`amplitude`/`phase` to memory.
Returns
-------
CompressedWaveform
An instance of this class with parameters loaded from the hdf file.
"""
if root is None:
root = ''
else:
root = '%s/'%(root)
group = '%scompressed_waveforms/%s' %(root, str(template_hash))
fp_group = fp[group]
sample_points = fp_group['sample_points']
amp = fp_group['amplitude']
phase = fp_group['phase']
if load_now:
sample_points = sample_points[:]
amp = amp[:]
phase = phase[:]
return cls(sample_points, amp, phase,
interpolation=fp_group.attrs['interpolation'],
tolerance=fp_group.attrs['tolerance'],
mismatch=fp_group.attrs['mismatch'],
precision=fp_group.attrs['precision'],
load_to_memory=load_to_memory) |
def get_point(self, *position):
"""Return the noise value of a specific position.
Example usage: value = noise.getPoint(x, y, z)
Args:
position (Tuple[float, ...]): The point to sample at.
Returns:
float: The noise value at position.
This will be a floating point in the 0.0-1.0 range.
"""
#array = self._array
#for d, pos in enumerate(position):
# array[d] = pos
#array = self._cFloatArray(*position)
array = _ffi.new(self._arrayType, position)
if self._useOctaves:
return (self._noiseFunc(self._noise, array, self._octaves) + 1) * 0.5
return (self._noiseFunc(self._noise, array) + 1) * 0.5 | Return the noise value of a specific position.
Example usage: value = noise.getPoint(x, y, z)
Args:
position (Tuple[float, ...]): The point to sample at.
Returns:
float: The noise value at position.
This will be a floating point in the 0.0-1.0 range. | Below is the the instruction that describes the task:
### Input:
Return the noise value of a specific position.
Example usage: value = noise.getPoint(x, y, z)
Args:
position (Tuple[float, ...]): The point to sample at.
Returns:
float: The noise value at position.
This will be a floating point in the 0.0-1.0 range.
### Response:
def get_point(self, *position):
"""Return the noise value of a specific position.
Example usage: value = noise.getPoint(x, y, z)
Args:
position (Tuple[float, ...]): The point to sample at.
Returns:
float: The noise value at position.
This will be a floating point in the 0.0-1.0 range.
"""
#array = self._array
#for d, pos in enumerate(position):
# array[d] = pos
#array = self._cFloatArray(*position)
array = _ffi.new(self._arrayType, position)
if self._useOctaves:
return (self._noiseFunc(self._noise, array, self._octaves) + 1) * 0.5
return (self._noiseFunc(self._noise, array) + 1) * 0.5 |
def _stop_processes(paths):
""" Scans process list trying to terminate processes matching paths
specified. Uses checksums to identify processes that are duplicates of
those specified to terminate.
`paths`
List of full paths to executables for processes to terminate.
"""
def cache_checksum(path):
""" Checksum provided path, cache, and return value.
"""
if not path:
return None
if not path in _process_checksums:
checksum = _get_checksum(path)
_process_checksums[path] = checksum
return _process_checksums[path]
if not paths:
return
target_checksums = dict((cache_checksum(p), 1) for p in paths)
if not target_checksums:
return
for proc, path in _get_user_processes():
# path's checksum matches targets, attempt to terminate
if cache_checksum(path) in target_checksums:
try:
proc.terminate()
except (psutil.AccessDenied, psutil.NoSuchProcess):
pass | Scans process list trying to terminate processes matching paths
specified. Uses checksums to identify processes that are duplicates of
those specified to terminate.
`paths`
List of full paths to executables for processes to terminate. | Below is the the instruction that describes the task:
### Input:
Scans process list trying to terminate processes matching paths
specified. Uses checksums to identify processes that are duplicates of
those specified to terminate.
`paths`
List of full paths to executables for processes to terminate.
### Response:
def _stop_processes(paths):
""" Scans process list trying to terminate processes matching paths
specified. Uses checksums to identify processes that are duplicates of
those specified to terminate.
`paths`
List of full paths to executables for processes to terminate.
"""
def cache_checksum(path):
""" Checksum provided path, cache, and return value.
"""
if not path:
return None
if not path in _process_checksums:
checksum = _get_checksum(path)
_process_checksums[path] = checksum
return _process_checksums[path]
if not paths:
return
target_checksums = dict((cache_checksum(p), 1) for p in paths)
if not target_checksums:
return
for proc, path in _get_user_processes():
# path's checksum matches targets, attempt to terminate
if cache_checksum(path) in target_checksums:
try:
proc.terminate()
except (psutil.AccessDenied, psutil.NoSuchProcess):
pass |
def set_coords(self, names, inplace=None):
"""Given names of one or more variables, set them as coordinates
Parameters
----------
names : str or list of str
Name(s) of variables in this dataset to convert into coordinates.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
See also
--------
Dataset.swap_dims
"""
# TODO: allow inserting new coordinates with this method, like
# DataFrame.set_index?
# nb. check in self._variables, not self.data_vars to insure that the
# operation is idempotent
inplace = _check_inplace(inplace)
if isinstance(names, str):
names = [names]
self._assert_all_in_dataset(names)
obj = self if inplace else self.copy()
obj._coord_names.update(names)
return obj | Given names of one or more variables, set them as coordinates
Parameters
----------
names : str or list of str
Name(s) of variables in this dataset to convert into coordinates.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
See also
--------
Dataset.swap_dims | Below is the the instruction that describes the task:
### Input:
Given names of one or more variables, set them as coordinates
Parameters
----------
names : str or list of str
Name(s) of variables in this dataset to convert into coordinates.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
See also
--------
Dataset.swap_dims
### Response:
def set_coords(self, names, inplace=None):
"""Given names of one or more variables, set them as coordinates
Parameters
----------
names : str or list of str
Name(s) of variables in this dataset to convert into coordinates.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
See also
--------
Dataset.swap_dims
"""
# TODO: allow inserting new coordinates with this method, like
# DataFrame.set_index?
# nb. check in self._variables, not self.data_vars to insure that the
# operation is idempotent
inplace = _check_inplace(inplace)
if isinstance(names, str):
names = [names]
self._assert_all_in_dataset(names)
obj = self if inplace else self.copy()
obj._coord_names.update(names)
return obj |
def get_delimited_message_bytes(byte_stream, nr=4):
''' Parse a delimited protobuf message. This is done by first getting a protobuf varint from
the stream that represents the length of the message, then reading that amount of
from the message and then parse it.
Since the int can be represented as max 4 bytes, first get 4 bytes and try to decode.
The decoder returns the value and the position where the value was found, so we need
to rewind the buffer to the position, because the remaining bytes belong to the message
after.
'''
(length, pos) = decoder._DecodeVarint32(byte_stream.read(nr), 0)
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Delimited message length (pos %d): %d" % (pos, length))
delimiter_bytes = nr - pos
byte_stream.rewind(delimiter_bytes)
message_bytes = byte_stream.read(length)
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Delimited message bytes (%d): %s" % (len(message_bytes), format_bytes(message_bytes)))
total_len = length + pos
return (total_len, message_bytes) | Parse a delimited protobuf message. This is done by first getting a protobuf varint from
the stream that represents the length of the message, then reading that amount of
from the message and then parse it.
Since the int can be represented as max 4 bytes, first get 4 bytes and try to decode.
The decoder returns the value and the position where the value was found, so we need
to rewind the buffer to the position, because the remaining bytes belong to the message
after. | Below is the the instruction that describes the task:
### Input:
Parse a delimited protobuf message. This is done by first getting a protobuf varint from
the stream that represents the length of the message, then reading that amount of
from the message and then parse it.
Since the int can be represented as max 4 bytes, first get 4 bytes and try to decode.
The decoder returns the value and the position where the value was found, so we need
to rewind the buffer to the position, because the remaining bytes belong to the message
after.
### Response:
def get_delimited_message_bytes(byte_stream, nr=4):
''' Parse a delimited protobuf message. This is done by first getting a protobuf varint from
the stream that represents the length of the message, then reading that amount of
from the message and then parse it.
Since the int can be represented as max 4 bytes, first get 4 bytes and try to decode.
The decoder returns the value and the position where the value was found, so we need
to rewind the buffer to the position, because the remaining bytes belong to the message
after.
'''
(length, pos) = decoder._DecodeVarint32(byte_stream.read(nr), 0)
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Delimited message length (pos %d): %d" % (pos, length))
delimiter_bytes = nr - pos
byte_stream.rewind(delimiter_bytes)
message_bytes = byte_stream.read(length)
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Delimited message bytes (%d): %s" % (len(message_bytes), format_bytes(message_bytes)))
total_len = length + pos
return (total_len, message_bytes) |
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
typecheck(key, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
typecheck(key, str)
typecheck(filename, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body | Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306 | Below is the the instruction that describes the task:
### Input:
Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
### Response:
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
typecheck(key, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
typecheck(key, str)
typecheck(filename, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body |
def verify_checksum(self, progress_callback=None, chunk_size=None,
throws=True, checksum_kwargs=None, **kwargs):
"""Verify checksum of file instance.
:param bool throws: If `True`, exceptions raised during checksum
calculation will be re-raised after logging. If set to `False`, and
an exception occurs, the `last_check` field is set to `None`
(`last_check_at` of course is updated), since no check actually was
performed.
:param dict checksum_kwargs: Passed as `**kwargs`` to
``storage().checksum``.
"""
try:
real_checksum = self.storage(**kwargs).checksum(
progress_callback=progress_callback, chunk_size=chunk_size,
**(checksum_kwargs or {}))
except Exception as exc:
current_app.logger.exception(str(exc))
if throws:
raise
real_checksum = None
with db.session.begin_nested():
self.last_check = (None if real_checksum is None
else (self.checksum == real_checksum))
self.last_check_at = datetime.utcnow()
return self.last_check | Verify checksum of file instance.
:param bool throws: If `True`, exceptions raised during checksum
calculation will be re-raised after logging. If set to `False`, and
an exception occurs, the `last_check` field is set to `None`
(`last_check_at` of course is updated), since no check actually was
performed.
:param dict checksum_kwargs: Passed as `**kwargs`` to
``storage().checksum``. | Below is the the instruction that describes the task:
### Input:
Verify checksum of file instance.
:param bool throws: If `True`, exceptions raised during checksum
calculation will be re-raised after logging. If set to `False`, and
an exception occurs, the `last_check` field is set to `None`
(`last_check_at` of course is updated), since no check actually was
performed.
:param dict checksum_kwargs: Passed as `**kwargs`` to
``storage().checksum``.
### Response:
def verify_checksum(self, progress_callback=None, chunk_size=None,
throws=True, checksum_kwargs=None, **kwargs):
"""Verify checksum of file instance.
:param bool throws: If `True`, exceptions raised during checksum
calculation will be re-raised after logging. If set to `False`, and
an exception occurs, the `last_check` field is set to `None`
(`last_check_at` of course is updated), since no check actually was
performed.
:param dict checksum_kwargs: Passed as `**kwargs`` to
``storage().checksum``.
"""
try:
real_checksum = self.storage(**kwargs).checksum(
progress_callback=progress_callback, chunk_size=chunk_size,
**(checksum_kwargs or {}))
except Exception as exc:
current_app.logger.exception(str(exc))
if throws:
raise
real_checksum = None
with db.session.begin_nested():
self.last_check = (None if real_checksum is None
else (self.checksum == real_checksum))
self.last_check_at = datetime.utcnow()
return self.last_check |
def _start_lock_renewer(self):
"""
Starts the lock refresher thread.
"""
if self._lock_renewal_thread is not None:
raise AlreadyStarted("Lock refresh thread already started")
logger.debug(
"Starting thread to refresh lock every %s seconds",
self._lock_renewal_interval
)
self._lock_renewal_stop = threading.Event()
self._lock_renewal_thread = threading.Thread(
group=None,
target=self._lock_renewer,
kwargs={'lockref': weakref.ref(self),
'interval': self._lock_renewal_interval,
'stop': self._lock_renewal_stop}
)
self._lock_renewal_thread.setDaemon(True)
self._lock_renewal_thread.start() | Starts the lock refresher thread. | Below is the the instruction that describes the task:
### Input:
Starts the lock refresher thread.
### Response:
def _start_lock_renewer(self):
"""
Starts the lock refresher thread.
"""
if self._lock_renewal_thread is not None:
raise AlreadyStarted("Lock refresh thread already started")
logger.debug(
"Starting thread to refresh lock every %s seconds",
self._lock_renewal_interval
)
self._lock_renewal_stop = threading.Event()
self._lock_renewal_thread = threading.Thread(
group=None,
target=self._lock_renewer,
kwargs={'lockref': weakref.ref(self),
'interval': self._lock_renewal_interval,
'stop': self._lock_renewal_stop}
)
self._lock_renewal_thread.setDaemon(True)
self._lock_renewal_thread.start() |
def render(self, context):
"""Render markdown."""
import markdown
content = self.get_content_from_context(context)
return markdown.markdown(content) | Render markdown. | Below is the the instruction that describes the task:
### Input:
Render markdown.
### Response:
def render(self, context):
"""Render markdown."""
import markdown
content = self.get_content_from_context(context)
return markdown.markdown(content) |
def user_agent(self):
"""
its a user agent string!
"""
version = ""
project_root = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(project_root, 'VERSION')) as version_file:
version = version_file.read().strip()
return "Python Snow Api Client (Version %s)" % version | its a user agent string! | Below is the the instruction that describes the task:
### Input:
its a user agent string!
### Response:
def user_agent(self):
"""
its a user agent string!
"""
version = ""
project_root = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(project_root, 'VERSION')) as version_file:
version = version_file.read().strip()
return "Python Snow Api Client (Version %s)" % version |
def save_models(self, models_file):
"""
Saves model parameters at each iteration of the optimization
:param models_file: name of the file or a file buffer, in which the results are saved.
"""
if self.model_parameters_iterations is None:
raise ValueError("No iterations have been carried out yet and hence no iterations of the BO can be saved")
iterations = np.array(range(1,self.model_parameters_iterations.shape[0]+1))[:,None]
results = np.hstack((iterations,self.model_parameters_iterations))
header = ['Iteration'] + self.model.get_model_parameters_names()
data = [header] + results.tolist()
self._write_csv(models_file, data) | Saves model parameters at each iteration of the optimization
:param models_file: name of the file or a file buffer, in which the results are saved. | Below is the the instruction that describes the task:
### Input:
Saves model parameters at each iteration of the optimization
:param models_file: name of the file or a file buffer, in which the results are saved.
### Response:
def save_models(self, models_file):
"""
Saves model parameters at each iteration of the optimization
:param models_file: name of the file or a file buffer, in which the results are saved.
"""
if self.model_parameters_iterations is None:
raise ValueError("No iterations have been carried out yet and hence no iterations of the BO can be saved")
iterations = np.array(range(1,self.model_parameters_iterations.shape[0]+1))[:,None]
results = np.hstack((iterations,self.model_parameters_iterations))
header = ['Iteration'] + self.model.get_model_parameters_names()
data = [header] + results.tolist()
self._write_csv(models_file, data) |
def output_clusters(labels, cluster_centers_indices):
"""Write in tab-separated files the vectors of cluster identities and
of indices of cluster centers.
"""
here = os.getcwd()
try:
output_directory = os.path.join(here, 'concurrent_AP_output')
os.makedirs(output_directory)
except OSError:
if not os.path.isdir(output_directory):
print("ERROR: concurrent_AP: output_clusters: cannot create a directory "
"for storage of the results of Affinity Propagation clustering "
"in your current working directory")
sys.exit(1)
if any(np.isnan(labels)):
fmt = '%.1f'
else:
fmt = '%d'
with open(os.path.join(output_directory, 'labels.tsv'), 'w') as fh:
np.savetxt(fh, labels, fmt = fmt, delimiter = '\t')
if cluster_centers_indices is not None:
with open(os.path.join(output_directory, 'cluster_centers_indices.tsv'), 'w') as fh:
np.savetxt(fh, cluster_centers_indices, fmt = '%.1f',
delimiter = '\t') | Write in tab-separated files the vectors of cluster identities and
of indices of cluster centers. | Below is the the instruction that describes the task:
### Input:
Write in tab-separated files the vectors of cluster identities and
of indices of cluster centers.
### Response:
def output_clusters(labels, cluster_centers_indices):
"""Write in tab-separated files the vectors of cluster identities and
of indices of cluster centers.
"""
here = os.getcwd()
try:
output_directory = os.path.join(here, 'concurrent_AP_output')
os.makedirs(output_directory)
except OSError:
if not os.path.isdir(output_directory):
print("ERROR: concurrent_AP: output_clusters: cannot create a directory "
"for storage of the results of Affinity Propagation clustering "
"in your current working directory")
sys.exit(1)
if any(np.isnan(labels)):
fmt = '%.1f'
else:
fmt = '%d'
with open(os.path.join(output_directory, 'labels.tsv'), 'w') as fh:
np.savetxt(fh, labels, fmt = fmt, delimiter = '\t')
if cluster_centers_indices is not None:
with open(os.path.join(output_directory, 'cluster_centers_indices.tsv'), 'w') as fh:
np.savetxt(fh, cluster_centers_indices, fmt = '%.1f',
delimiter = '\t') |
def data(self):
"""Get the data, after performing post-processing if necessary."""
data = super(DynamicListSerializer, self).data
processed_data = ReturnDict(
SideloadingProcessor(self, data).data,
serializer=self
) if self.child.envelope else ReturnList(
data,
serializer=self
)
processed_data = post_process(processed_data)
return processed_data | Get the data, after performing post-processing if necessary. | Below is the the instruction that describes the task:
### Input:
Get the data, after performing post-processing if necessary.
### Response:
def data(self):
"""Get the data, after performing post-processing if necessary."""
data = super(DynamicListSerializer, self).data
processed_data = ReturnDict(
SideloadingProcessor(self, data).data,
serializer=self
) if self.child.envelope else ReturnList(
data,
serializer=self
)
processed_data = post_process(processed_data)
return processed_data |
def _get_path_pattern_tornado45(self, router=None):
"""Return the path pattern used when routing a request. (Tornado>=4.5)
:param tornado.routing.Router router: (Optional) The router to scan.
Defaults to the application's router.
:rtype: str
"""
if router is None:
router = self.application.default_router
for rule in router.rules:
if rule.matcher.match(self.request) is not None:
if isinstance(rule.matcher, routing.PathMatches):
return rule.matcher.regex.pattern
elif isinstance(rule.target, routing.Router):
return self._get_path_pattern_tornado45(rule.target) | Return the path pattern used when routing a request. (Tornado>=4.5)
:param tornado.routing.Router router: (Optional) The router to scan.
Defaults to the application's router.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Return the path pattern used when routing a request. (Tornado>=4.5)
:param tornado.routing.Router router: (Optional) The router to scan.
Defaults to the application's router.
:rtype: str
### Response:
def _get_path_pattern_tornado45(self, router=None):
"""Return the path pattern used when routing a request. (Tornado>=4.5)
:param tornado.routing.Router router: (Optional) The router to scan.
Defaults to the application's router.
:rtype: str
"""
if router is None:
router = self.application.default_router
for rule in router.rules:
if rule.matcher.match(self.request) is not None:
if isinstance(rule.matcher, routing.PathMatches):
return rule.matcher.regex.pattern
elif isinstance(rule.target, routing.Router):
return self._get_path_pattern_tornado45(rule.target) |
def _update_advertised(self, advertised):
"""Called when advertisement data is received."""
# Advertisement data was received, pull out advertised service UUIDs and
# name from advertisement data.
if 'kCBAdvDataServiceUUIDs' in advertised:
self._advertised = self._advertised + map(cbuuid_to_uuid, advertised['kCBAdvDataServiceUUIDs']) | Called when advertisement data is received. | Below is the the instruction that describes the task:
### Input:
Called when advertisement data is received.
### Response:
def _update_advertised(self, advertised):
"""Called when advertisement data is received."""
# Advertisement data was received, pull out advertised service UUIDs and
# name from advertisement data.
if 'kCBAdvDataServiceUUIDs' in advertised:
self._advertised = self._advertised + map(cbuuid_to_uuid, advertised['kCBAdvDataServiceUUIDs']) |
def get_tokens(max_value):
"""Defines tokens.
Args:
max_value: the maximum numeric range for the token.
Returns:
list of string tokens in vocabulary.
"""
vocab = [str(i) for i in range(max_value)]
vocab = set(vocab)
vocab.update(CodeOp.LITERALS)
vocab.update(CodeOp.KEYWORDS)
vocab |= set("".join(vocab))
return sorted(vocab) | Defines tokens.
Args:
max_value: the maximum numeric range for the token.
Returns:
list of string tokens in vocabulary. | Below is the the instruction that describes the task:
### Input:
Defines tokens.
Args:
max_value: the maximum numeric range for the token.
Returns:
list of string tokens in vocabulary.
### Response:
def get_tokens(max_value):
"""Defines tokens.
Args:
max_value: the maximum numeric range for the token.
Returns:
list of string tokens in vocabulary.
"""
vocab = [str(i) for i in range(max_value)]
vocab = set(vocab)
vocab.update(CodeOp.LITERALS)
vocab.update(CodeOp.KEYWORDS)
vocab |= set("".join(vocab))
return sorted(vocab) |
def readACTIONRECORD(self):
""" Read a SWFActionRecord """
action = None
actionCode = self.readUI8()
if actionCode != 0:
actionLength = self.readUI16() if actionCode >= 0x80 else 0
#print "0x%x"%actionCode, actionLength
action = SWFActionFactory.create(actionCode, actionLength)
action.parse(self)
return action | Read a SWFActionRecord | Below is the the instruction that describes the task:
### Input:
Read a SWFActionRecord
### Response:
def readACTIONRECORD(self):
""" Read a SWFActionRecord """
action = None
actionCode = self.readUI8()
if actionCode != 0:
actionLength = self.readUI16() if actionCode >= 0x80 else 0
#print "0x%x"%actionCode, actionLength
action = SWFActionFactory.create(actionCode, actionLength)
action.parse(self)
return action |
def update_index(entries):
"""find the last 10 entries in the database and create the main
page.
Each entry in has an doc_id, so we only get the last 10 doc_ids.
This method also updates the ATOM feed.
"""
context = GLOBAL_TEMPLATE_CONTEXT.copy()
context['entries'] = entries
context['last_build'] = datetime.datetime.now().strftime(
"%Y-%m-%dT%H:%M:%SZ")
list(map(lambda x: _render(context, x[0],
os.path.join(CONFIG['output_to'], x[1])),
(('entry_index.html', 'index.html'), ('atom.xml', 'atom.xml')))) | find the last 10 entries in the database and create the main
page.
Each entry in has an doc_id, so we only get the last 10 doc_ids.
This method also updates the ATOM feed. | Below is the the instruction that describes the task:
### Input:
find the last 10 entries in the database and create the main
page.
Each entry in has an doc_id, so we only get the last 10 doc_ids.
This method also updates the ATOM feed.
### Response:
def update_index(entries):
"""find the last 10 entries in the database and create the main
page.
Each entry in has an doc_id, so we only get the last 10 doc_ids.
This method also updates the ATOM feed.
"""
context = GLOBAL_TEMPLATE_CONTEXT.copy()
context['entries'] = entries
context['last_build'] = datetime.datetime.now().strftime(
"%Y-%m-%dT%H:%M:%SZ")
list(map(lambda x: _render(context, x[0],
os.path.join(CONFIG['output_to'], x[1])),
(('entry_index.html', 'index.html'), ('atom.xml', 'atom.xml')))) |
def func_accepts_var_args(func):
"""
Return True if function 'func' accepts positional arguments *args.
"""
if six.PY2:
return inspect.getargspec(func)[1] is not None
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_POSITIONAL
) | Return True if function 'func' accepts positional arguments *args. | Below is the the instruction that describes the task:
### Input:
Return True if function 'func' accepts positional arguments *args.
### Response:
def func_accepts_var_args(func):
"""
Return True if function 'func' accepts positional arguments *args.
"""
if six.PY2:
return inspect.getargspec(func)[1] is not None
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_POSITIONAL
) |
def GetOutputDir(self, base_dir, config_filename):
"""Add the repack config filename onto the base output directory.
This allows us to repack lots of different configs to the same installer
name and still be able to distinguish them.
Args:
base_dir: output directory string
config_filename: the secondary config filename string
Returns:
String to be used as output directory for this repack.
"""
return os.path.join(base_dir,
os.path.basename(config_filename.replace(".yaml", ""))) | Add the repack config filename onto the base output directory.
This allows us to repack lots of different configs to the same installer
name and still be able to distinguish them.
Args:
base_dir: output directory string
config_filename: the secondary config filename string
Returns:
String to be used as output directory for this repack. | Below is the the instruction that describes the task:
### Input:
Add the repack config filename onto the base output directory.
This allows us to repack lots of different configs to the same installer
name and still be able to distinguish them.
Args:
base_dir: output directory string
config_filename: the secondary config filename string
Returns:
String to be used as output directory for this repack.
### Response:
def GetOutputDir(self, base_dir, config_filename):
"""Add the repack config filename onto the base output directory.
This allows us to repack lots of different configs to the same installer
name and still be able to distinguish them.
Args:
base_dir: output directory string
config_filename: the secondary config filename string
Returns:
String to be used as output directory for this repack.
"""
return os.path.join(base_dir,
os.path.basename(config_filename.replace(".yaml", ""))) |
def getBeamline(self, beamlineKw):
""" get beamline definition from all_elements, return as a list
:param beamlineKw: keyword of beamline
"""
lattice_string = list(self.all_elements.get(beamlineKw.upper()).values())[0].get('lattice')
return lattice_string[1:-1].split() | get beamline definition from all_elements, return as a list
:param beamlineKw: keyword of beamline | Below is the the instruction that describes the task:
### Input:
get beamline definition from all_elements, return as a list
:param beamlineKw: keyword of beamline
### Response:
def getBeamline(self, beamlineKw):
""" get beamline definition from all_elements, return as a list
:param beamlineKw: keyword of beamline
"""
lattice_string = list(self.all_elements.get(beamlineKw.upper()).values())[0].get('lattice')
return lattice_string[1:-1].split() |
def semester_feature(catalog, soup):
"""The year and semester information that this xml file hold courses for.
"""
raw = soup.coursedb['semesternumber']
catalog.year = int(raw[:4])
month_mapping = {1: 'Spring', 5: 'Summer', 9: 'Fall'}
catalog.month = int(raw[4:])
catalog.semester = month_mapping[catalog.month]
catalog.name = soup.coursedb['semesterdesc']
logger.info('Catalog type: %s' % catalog.name) | The year and semester information that this xml file hold courses for. | Below is the the instruction that describes the task:
### Input:
The year and semester information that this xml file hold courses for.
### Response:
def semester_feature(catalog, soup):
"""The year and semester information that this xml file hold courses for.
"""
raw = soup.coursedb['semesternumber']
catalog.year = int(raw[:4])
month_mapping = {1: 'Spring', 5: 'Summer', 9: 'Fall'}
catalog.month = int(raw[4:])
catalog.semester = month_mapping[catalog.month]
catalog.name = soup.coursedb['semesterdesc']
logger.info('Catalog type: %s' % catalog.name) |
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url | Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment' | Below is the the instruction that describes the task:
### Input:
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
### Response:
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url |
def run(args):
"""Process command line arguments and walk inputs."""
raw_arguments = get_arguments(args[1:])
process_arguments(raw_arguments)
walk.run()
return True | Process command line arguments and walk inputs. | Below is the the instruction that describes the task:
### Input:
Process command line arguments and walk inputs.
### Response:
def run(args):
"""Process command line arguments and walk inputs."""
raw_arguments = get_arguments(args[1:])
process_arguments(raw_arguments)
walk.run()
return True |
def split_comp_info(self, catalog_name, split_ver, split_key):
""" Return the info for a particular split key """
return self._split_comp_info_dicts["%s_%s" % (catalog_name, split_ver)][split_key] | Return the info for a particular split key | Below is the the instruction that describes the task:
### Input:
Return the info for a particular split key
### Response:
def split_comp_info(self, catalog_name, split_ver, split_key):
""" Return the info for a particular split key """
return self._split_comp_info_dicts["%s_%s" % (catalog_name, split_ver)][split_key] |
def _normalize_name(name):
"""Returns a normalized element tag or attribute name. A tag name or attribute name can be given
either as a string, or as a 2-tuple. If a 2-tuple, it is interpreted as (namespace, name) and is
returned joined by a colon ":"
:name: a string or a 2-tuple of strings
"""
if isinstance(name, tuple):
ns, nm = name
return "{}:{}".format(ns, nm)
else:
return name | Returns a normalized element tag or attribute name. A tag name or attribute name can be given
either as a string, or as a 2-tuple. If a 2-tuple, it is interpreted as (namespace, name) and is
returned joined by a colon ":"
:name: a string or a 2-tuple of strings | Below is the the instruction that describes the task:
### Input:
Returns a normalized element tag or attribute name. A tag name or attribute name can be given
either as a string, or as a 2-tuple. If a 2-tuple, it is interpreted as (namespace, name) and is
returned joined by a colon ":"
:name: a string or a 2-tuple of strings
### Response:
def _normalize_name(name):
"""Returns a normalized element tag or attribute name. A tag name or attribute name can be given
either as a string, or as a 2-tuple. If a 2-tuple, it is interpreted as (namespace, name) and is
returned joined by a colon ":"
:name: a string or a 2-tuple of strings
"""
if isinstance(name, tuple):
ns, nm = name
return "{}:{}".format(ns, nm)
else:
return name |
def file_close(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /file-xxxx/close API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Files#API-method%3A-%2Ffile-xxxx%2Fclose
"""
return DXHTTPRequest('/%s/close' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /file-xxxx/close API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Files#API-method%3A-%2Ffile-xxxx%2Fclose | Below is the the instruction that describes the task:
### Input:
Invokes the /file-xxxx/close API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Files#API-method%3A-%2Ffile-xxxx%2Fclose
### Response:
def file_close(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /file-xxxx/close API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Files#API-method%3A-%2Ffile-xxxx%2Fclose
"""
return DXHTTPRequest('/%s/close' % object_id, input_params, always_retry=always_retry, **kwargs) |
def optimise(args, **kwargs):
"""
Optimise the model parameters.
"""
expected_output_files = kwargs.pop("expected_output_files", None)
if not expected_output_files:
expected_output_files = ["optimised.pkl"]
if args.plotting:
expected_output_files.extend([
"projection-estimate.{}".format(args.plot_format),
"projection-optimised.{}".format(args.plot_format)
])
# Estimate the model parameters, unless they are already specified.
model = sick.models.Model(args.model)
initial_theta = model._configuration.get("initial_theta", {})
if len(set(model.parameters).difference(initial_theta)) == 0:
model, data, metadata = _pre_solving(args, expected_output_files)
else:
model, data, metadata, initial_theta = estimate(args,
expected_output_files=expected_output_files, __return_result=True)
try:
theta, chisq, dof, model_fluxes = model.optimise(data,
initial_theta=initial_theta, full_output=True, debug=args.debug)
except:
logger.exception("Failed to optimise model parameters")
raise
metadata["optimised"] = {
"theta": theta,
"chi_sq": chisq,
"dof": dof,
"r_chi_sq": chisq/dof
}
logger.info("Optimised model parameters are:")
_announce_theta(theta)
logger.info("With a chi-sq value of {0:.1f} (reduced {1:.1f}; DOF {2:.1f})"\
.format(chisq, chisq/dof, dof))
if args.plotting:
fig = sick.plot.spectrum(data, model_flux=model_fluxes)
filename = _prefix(args, "projection-optimised.{}".format(
args.plot_format))
fig.savefig(filename)
logger.info("Created figure {}".format(filename))
if kwargs.pop("__return_result", False):
return (model, data, metadata, theta)
# Write the results to file.
_write_output(_prefix(args, "optimised.pkl"), metadata)
return None | Optimise the model parameters. | Below is the the instruction that describes the task:
### Input:
Optimise the model parameters.
### Response:
def optimise(args, **kwargs):
"""
Optimise the model parameters.
"""
expected_output_files = kwargs.pop("expected_output_files", None)
if not expected_output_files:
expected_output_files = ["optimised.pkl"]
if args.plotting:
expected_output_files.extend([
"projection-estimate.{}".format(args.plot_format),
"projection-optimised.{}".format(args.plot_format)
])
# Estimate the model parameters, unless they are already specified.
model = sick.models.Model(args.model)
initial_theta = model._configuration.get("initial_theta", {})
if len(set(model.parameters).difference(initial_theta)) == 0:
model, data, metadata = _pre_solving(args, expected_output_files)
else:
model, data, metadata, initial_theta = estimate(args,
expected_output_files=expected_output_files, __return_result=True)
try:
theta, chisq, dof, model_fluxes = model.optimise(data,
initial_theta=initial_theta, full_output=True, debug=args.debug)
except:
logger.exception("Failed to optimise model parameters")
raise
metadata["optimised"] = {
"theta": theta,
"chi_sq": chisq,
"dof": dof,
"r_chi_sq": chisq/dof
}
logger.info("Optimised model parameters are:")
_announce_theta(theta)
logger.info("With a chi-sq value of {0:.1f} (reduced {1:.1f}; DOF {2:.1f})"\
.format(chisq, chisq/dof, dof))
if args.plotting:
fig = sick.plot.spectrum(data, model_flux=model_fluxes)
filename = _prefix(args, "projection-optimised.{}".format(
args.plot_format))
fig.savefig(filename)
logger.info("Created figure {}".format(filename))
if kwargs.pop("__return_result", False):
return (model, data, metadata, theta)
# Write the results to file.
_write_output(_prefix(args, "optimised.pkl"), metadata)
return None |
def to_cloudformation(self, **kwargs):
"""Returns the Lambda EventSourceMapping to which this pull event corresponds. Adds the appropriate managed
policy to the function's execution role, if such a role is provided.
:param dict kwargs: a dict containing the execution role generated for the function
:returns: a list of vanilla CloudFormation Resources, to which this pull event expands
:rtype: list
"""
function = kwargs.get('function')
if not function:
raise TypeError("Missing required keyword argument: function")
resources = []
lambda_eventsourcemapping = LambdaEventSourceMapping(self.logical_id)
resources.append(lambda_eventsourcemapping)
try:
# Name will not be available for Alias resources
function_name_or_arn = function.get_runtime_attr("name")
except NotImplementedError:
function_name_or_arn = function.get_runtime_attr("arn")
if not self.Stream and not self.Queue:
raise InvalidEventException(
self.relative_id, "No Queue (for SQS) or Stream (for Kinesis or DynamoDB) provided.")
if self.Stream and not self.StartingPosition:
raise InvalidEventException(
self.relative_id, "StartingPosition is required for Kinesis and DynamoDB.")
lambda_eventsourcemapping.FunctionName = function_name_or_arn
lambda_eventsourcemapping.EventSourceArn = self.Stream or self.Queue
lambda_eventsourcemapping.StartingPosition = self.StartingPosition
lambda_eventsourcemapping.BatchSize = self.BatchSize
lambda_eventsourcemapping.Enabled = self.Enabled
if 'Condition' in function.resource_attributes:
lambda_eventsourcemapping.set_resource_attribute('Condition', function.resource_attributes['Condition'])
if 'role' in kwargs:
self._link_policy(kwargs['role'])
return resources | Returns the Lambda EventSourceMapping to which this pull event corresponds. Adds the appropriate managed
policy to the function's execution role, if such a role is provided.
:param dict kwargs: a dict containing the execution role generated for the function
:returns: a list of vanilla CloudFormation Resources, to which this pull event expands
:rtype: list | Below is the the instruction that describes the task:
### Input:
Returns the Lambda EventSourceMapping to which this pull event corresponds. Adds the appropriate managed
policy to the function's execution role, if such a role is provided.
:param dict kwargs: a dict containing the execution role generated for the function
:returns: a list of vanilla CloudFormation Resources, to which this pull event expands
:rtype: list
### Response:
def to_cloudformation(self, **kwargs):
"""Returns the Lambda EventSourceMapping to which this pull event corresponds. Adds the appropriate managed
policy to the function's execution role, if such a role is provided.
:param dict kwargs: a dict containing the execution role generated for the function
:returns: a list of vanilla CloudFormation Resources, to which this pull event expands
:rtype: list
"""
function = kwargs.get('function')
if not function:
raise TypeError("Missing required keyword argument: function")
resources = []
lambda_eventsourcemapping = LambdaEventSourceMapping(self.logical_id)
resources.append(lambda_eventsourcemapping)
try:
# Name will not be available for Alias resources
function_name_or_arn = function.get_runtime_attr("name")
except NotImplementedError:
function_name_or_arn = function.get_runtime_attr("arn")
if not self.Stream and not self.Queue:
raise InvalidEventException(
self.relative_id, "No Queue (for SQS) or Stream (for Kinesis or DynamoDB) provided.")
if self.Stream and not self.StartingPosition:
raise InvalidEventException(
self.relative_id, "StartingPosition is required for Kinesis and DynamoDB.")
lambda_eventsourcemapping.FunctionName = function_name_or_arn
lambda_eventsourcemapping.EventSourceArn = self.Stream or self.Queue
lambda_eventsourcemapping.StartingPosition = self.StartingPosition
lambda_eventsourcemapping.BatchSize = self.BatchSize
lambda_eventsourcemapping.Enabled = self.Enabled
if 'Condition' in function.resource_attributes:
lambda_eventsourcemapping.set_resource_attribute('Condition', function.resource_attributes['Condition'])
if 'role' in kwargs:
self._link_policy(kwargs['role'])
return resources |
def _get_base_interface_info(interface):
'''
return base details about given interface
'''
blacklist = {
'tcpip': {
'name': [],
'type': [],
'additional_protocol': False
},
'disabled': {
'name': ['eth0'],
'type': ['gadget'],
'additional_protocol': False
},
'ethercat': {
'name': ['eth0'],
'type': ['gadget', 'usb', 'wlan'],
'additional_protocol': True
},
'_': {
'usb': 'sys',
'gadget': 'uevent',
'wlan': 'uevent'
}
}
return {
'label': interface.name,
'connectionid': interface.name,
'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist),
'adapter_mode': _get_adapter_mode_info(interface.name),
'up': interface.flags & IFF_RUNNING != 0,
'ipv4': {
'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'],
'requestmode': _get_request_mode_info(interface.name)
},
'hwaddr': interface.hwaddr[:-1]
} | return base details about given interface | Below is the the instruction that describes the task:
### Input:
return base details about given interface
### Response:
def _get_base_interface_info(interface):
'''
return base details about given interface
'''
blacklist = {
'tcpip': {
'name': [],
'type': [],
'additional_protocol': False
},
'disabled': {
'name': ['eth0'],
'type': ['gadget'],
'additional_protocol': False
},
'ethercat': {
'name': ['eth0'],
'type': ['gadget', 'usb', 'wlan'],
'additional_protocol': True
},
'_': {
'usb': 'sys',
'gadget': 'uevent',
'wlan': 'uevent'
}
}
return {
'label': interface.name,
'connectionid': interface.name,
'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist),
'adapter_mode': _get_adapter_mode_info(interface.name),
'up': interface.flags & IFF_RUNNING != 0,
'ipv4': {
'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'],
'requestmode': _get_request_mode_info(interface.name)
},
'hwaddr': interface.hwaddr[:-1]
} |
def append_fallback(model, fields):
"""
If translated field is encountered, add also all its fallback fields.
Returns tuple: (set_of_new_fields_to_use, set_of_translated_field_names)
"""
fields = set(fields)
trans = set()
from modeltranslation.translator import translator
opts = translator.get_options_for_model(model)
for key, _ in opts.fields.items():
if key in fields:
langs = resolution_order(get_language(), getattr(model, key).fallback_languages)
fields = fields.union(build_localized_fieldname(key, lang) for lang in langs)
fields.remove(key)
trans.add(key)
return fields, trans | If translated field is encountered, add also all its fallback fields.
Returns tuple: (set_of_new_fields_to_use, set_of_translated_field_names) | Below is the the instruction that describes the task:
### Input:
If translated field is encountered, add also all its fallback fields.
Returns tuple: (set_of_new_fields_to_use, set_of_translated_field_names)
### Response:
def append_fallback(model, fields):
"""
If translated field is encountered, add also all its fallback fields.
Returns tuple: (set_of_new_fields_to_use, set_of_translated_field_names)
"""
fields = set(fields)
trans = set()
from modeltranslation.translator import translator
opts = translator.get_options_for_model(model)
for key, _ in opts.fields.items():
if key in fields:
langs = resolution_order(get_language(), getattr(model, key).fallback_languages)
fields = fields.union(build_localized_fieldname(key, lang) for lang in langs)
fields.remove(key)
trans.add(key)
return fields, trans |
def insert_element_to_dict_of_dicts_of_list(dict_of_dict_of_list, first_key, second_key, parser):
"""
Utility method
:param dict_of_dict_of_list:
:param first_key:
:param second_key:
:param parser:
:return:
"""
list_to_insert = parser if isinstance(parser, list) else [parser]
if first_key not in dict_of_dict_of_list.keys():
dict_of_dict_of_list[first_key] = {second_key: list_to_insert}
else:
if second_key not in dict_of_dict_of_list[first_key].keys():
dict_of_dict_of_list[first_key][second_key] = list_to_insert
else:
dict_of_dict_of_list[first_key][second_key] += list_to_insert | Utility method
:param dict_of_dict_of_list:
:param first_key:
:param second_key:
:param parser:
:return: | Below is the the instruction that describes the task:
### Input:
Utility method
:param dict_of_dict_of_list:
:param first_key:
:param second_key:
:param parser:
:return:
### Response:
def insert_element_to_dict_of_dicts_of_list(dict_of_dict_of_list, first_key, second_key, parser):
"""
Utility method
:param dict_of_dict_of_list:
:param first_key:
:param second_key:
:param parser:
:return:
"""
list_to_insert = parser if isinstance(parser, list) else [parser]
if first_key not in dict_of_dict_of_list.keys():
dict_of_dict_of_list[first_key] = {second_key: list_to_insert}
else:
if second_key not in dict_of_dict_of_list[first_key].keys():
dict_of_dict_of_list[first_key][second_key] = list_to_insert
else:
dict_of_dict_of_list[first_key][second_key] += list_to_insert |
def p_block(self, p):
""" block_decl : block_open declaration_list brace_close
"""
p[0] = Block(list(p)[1:-1], p.lineno(3))
self.scope.pop()
self.scope.add_block(p[0]) | block_decl : block_open declaration_list brace_close | Below is the the instruction that describes the task:
### Input:
block_decl : block_open declaration_list brace_close
### Response:
def p_block(self, p):
""" block_decl : block_open declaration_list brace_close
"""
p[0] = Block(list(p)[1:-1], p.lineno(3))
self.scope.pop()
self.scope.add_block(p[0]) |
def viewit(self, post_id):
'''
View the link.
'''
rec = MLink.get_by_uid(post_id)
if not rec:
kwd = {'info': '您要找的分类不存在。'}
self.render('misc/html/404.html', kwd=kwd)
return False
kwd = {
'pager': '',
'editable': self.editable(),
}
self.render('misc/link/link_view.html',
view=rec,
kwd=kwd,
userinfo=self.userinfo,
cfg=CMS_CFG, ) | View the link. | Below is the the instruction that describes the task:
### Input:
View the link.
### Response:
def viewit(self, post_id):
'''
View the link.
'''
rec = MLink.get_by_uid(post_id)
if not rec:
kwd = {'info': '您要找的分类不存在。'}
self.render('misc/html/404.html', kwd=kwd)
return False
kwd = {
'pager': '',
'editable': self.editable(),
}
self.render('misc/link/link_view.html',
view=rec,
kwd=kwd,
userinfo=self.userinfo,
cfg=CMS_CFG, ) |
def setParameter(self, parameterName, index, parameterValue):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.setParameter`.
Set the value of a Spec parameter. Most parameters are handled
automatically by PyRegion's parameter set mechanism. The ones that need
special treatment are explicitly handled here.
"""
if parameterName in self._spatialArgNames:
setattr(self._sfdr, parameterName, parameterValue)
elif parameterName == "logPathInput":
self.logPathInput = parameterValue
# Close any existing log file
if self._fpLogSPInput:
self._fpLogSPInput.close()
self._fpLogSPInput = None
# Open a new log file
if parameterValue:
self._fpLogSPInput = open(self.logPathInput, 'w')
elif parameterName == "logPathOutput":
self.logPathOutput = parameterValue
# Close any existing log file
if self._fpLogSP:
self._fpLogSP.close()
self._fpLogSP = None
# Open a new log file
if parameterValue:
self._fpLogSP = open(self.logPathOutput, 'w')
elif parameterName == "logPathOutputDense":
self.logPathOutputDense = parameterValue
# Close any existing log file
if self._fpLogSPDense:
self._fpLogSPDense.close()
self._fpLogSPDense = None
# Open a new log file
if parameterValue:
self._fpLogSPDense = open(self.logPathOutputDense, 'w')
elif hasattr(self, parameterName):
setattr(self, parameterName, parameterValue)
else:
raise Exception('Unknown parameter: ' + parameterName) | Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.setParameter`.
Set the value of a Spec parameter. Most parameters are handled
automatically by PyRegion's parameter set mechanism. The ones that need
special treatment are explicitly handled here. | Below is the the instruction that describes the task:
### Input:
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.setParameter`.
Set the value of a Spec parameter. Most parameters are handled
automatically by PyRegion's parameter set mechanism. The ones that need
special treatment are explicitly handled here.
### Response:
def setParameter(self, parameterName, index, parameterValue):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.setParameter`.
Set the value of a Spec parameter. Most parameters are handled
automatically by PyRegion's parameter set mechanism. The ones that need
special treatment are explicitly handled here.
"""
if parameterName in self._spatialArgNames:
setattr(self._sfdr, parameterName, parameterValue)
elif parameterName == "logPathInput":
self.logPathInput = parameterValue
# Close any existing log file
if self._fpLogSPInput:
self._fpLogSPInput.close()
self._fpLogSPInput = None
# Open a new log file
if parameterValue:
self._fpLogSPInput = open(self.logPathInput, 'w')
elif parameterName == "logPathOutput":
self.logPathOutput = parameterValue
# Close any existing log file
if self._fpLogSP:
self._fpLogSP.close()
self._fpLogSP = None
# Open a new log file
if parameterValue:
self._fpLogSP = open(self.logPathOutput, 'w')
elif parameterName == "logPathOutputDense":
self.logPathOutputDense = parameterValue
# Close any existing log file
if self._fpLogSPDense:
self._fpLogSPDense.close()
self._fpLogSPDense = None
# Open a new log file
if parameterValue:
self._fpLogSPDense = open(self.logPathOutputDense, 'w')
elif hasattr(self, parameterName):
setattr(self, parameterName, parameterValue)
else:
raise Exception('Unknown parameter: ' + parameterName) |
def sort(words, context="", strict=True, relative=True, service=YAHOO_SEARCH,
wait=10, asynchronous=False, cached=False):
"""Performs a Yahoo sort on the given list.
Sorts the items in the list according to
the result count Yahoo yields on an item.
Setting a context sorts the items according
to their relation to this context;
for example sorting [red, green, blue] by "love"
yields red as the highest results,
likely because red is the color commonly associated with love.
"""
results = []
for word in words:
q = word + " " + context
q.strip()
if strict: q = "\""+q+"\""
r = YahooSearch(q, 1, 1, service, context, wait, asynchronous, cached)
results.append(r)
results.sort(YahooResults.__cmp__)
results.reverse()
if relative and len(results) > 0:
sum = 0.000000000000000001
for r in results: sum += r.total
for r in results: r.total /= float(sum)
results = [(r.query, r.total) for r in results]
return results | Performs a Yahoo sort on the given list.
Sorts the items in the list according to
the result count Yahoo yields on an item.
Setting a context sorts the items according
to their relation to this context;
for example sorting [red, green, blue] by "love"
yields red as the highest results,
likely because red is the color commonly associated with love. | Below is the the instruction that describes the task:
### Input:
Performs a Yahoo sort on the given list.
Sorts the items in the list according to
the result count Yahoo yields on an item.
Setting a context sorts the items according
to their relation to this context;
for example sorting [red, green, blue] by "love"
yields red as the highest results,
likely because red is the color commonly associated with love.
### Response:
def sort(words, context="", strict=True, relative=True, service=YAHOO_SEARCH,
wait=10, asynchronous=False, cached=False):
"""Performs a Yahoo sort on the given list.
Sorts the items in the list according to
the result count Yahoo yields on an item.
Setting a context sorts the items according
to their relation to this context;
for example sorting [red, green, blue] by "love"
yields red as the highest results,
likely because red is the color commonly associated with love.
"""
results = []
for word in words:
q = word + " " + context
q.strip()
if strict: q = "\""+q+"\""
r = YahooSearch(q, 1, 1, service, context, wait, asynchronous, cached)
results.append(r)
results.sort(YahooResults.__cmp__)
results.reverse()
if relative and len(results) > 0:
sum = 0.000000000000000001
for r in results: sum += r.total
for r in results: r.total /= float(sum)
results = [(r.query, r.total) for r in results]
return results |
def QA_util_add_months(dt, months):
"""
#返回dt隔months个月后的日期,months相当于步长
"""
dt = datetime.datetime.strptime(
dt, "%Y-%m-%d") + relativedelta(months=months)
return(dt) | #返回dt隔months个月后的日期,months相当于步长 | Below is the the instruction that describes the task:
### Input:
#返回dt隔months个月后的日期,months相当于步长
### Response:
def QA_util_add_months(dt, months):
"""
#返回dt隔months个月后的日期,months相当于步长
"""
dt = datetime.datetime.strptime(
dt, "%Y-%m-%d") + relativedelta(months=months)
return(dt) |
def copy(self):
"""
Return a shallow copy of a pqdict.
"""
return self.__class__(self, key=self._keyfn, precedes=self._precedes) | Return a shallow copy of a pqdict. | Below is the the instruction that describes the task:
### Input:
Return a shallow copy of a pqdict.
### Response:
def copy(self):
"""
Return a shallow copy of a pqdict.
"""
return self.__class__(self, key=self._keyfn, precedes=self._precedes) |
def prettylist(list_):
"""
Filter out duplicate values while keeping order.
"""
if not list_:
return ''
values = set()
uniqueList = []
for entry in list_:
if not entry in values:
values.add(entry)
uniqueList.append(entry)
return uniqueList[0] if len(uniqueList) == 1 \
else '[' + '; '.join(uniqueList) + ']' | Filter out duplicate values while keeping order. | Below is the the instruction that describes the task:
### Input:
Filter out duplicate values while keeping order.
### Response:
def prettylist(list_):
"""
Filter out duplicate values while keeping order.
"""
if not list_:
return ''
values = set()
uniqueList = []
for entry in list_:
if not entry in values:
values.add(entry)
uniqueList.append(entry)
return uniqueList[0] if len(uniqueList) == 1 \
else '[' + '; '.join(uniqueList) + ']' |
def get_settings(self):
"""
Returns current settings.
Only accessible if authenticated as the user.
"""
url = self._imgur._base_url + "/3/account/{0}/settings".format(self.name)
return self._imgur._send_request(url) | Returns current settings.
Only accessible if authenticated as the user. | Below is the the instruction that describes the task:
### Input:
Returns current settings.
Only accessible if authenticated as the user.
### Response:
def get_settings(self):
"""
Returns current settings.
Only accessible if authenticated as the user.
"""
url = self._imgur._base_url + "/3/account/{0}/settings".format(self.name)
return self._imgur._send_request(url) |
def delete_container_instance_group(access_token, subscription_id, resource_group,
container_group_name):
'''Delete a container group from a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerInstance/ContainerGroups/',
container_group_name,
'?api-version=', CONTAINER_API])
return do_delete(endpoint, access_token) | Delete a container group from a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
Returns:
HTTP response. | Below is the the instruction that describes the task:
### Input:
Delete a container group from a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
Returns:
HTTP response.
### Response:
def delete_container_instance_group(access_token, subscription_id, resource_group,
container_group_name):
'''Delete a container group from a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerInstance/ContainerGroups/',
container_group_name,
'?api-version=', CONTAINER_API])
return do_delete(endpoint, access_token) |
def get_configuration_dict(self, secret_attrs=False):
"""Overrides superclass method and renames some properties"""
cd = super(TreeCollectionsShard, self).get_configuration_dict(secret_attrs=secret_attrs)
# "rename" some keys in the dict provided
cd['number of collections'] = cd.pop('number of documents')
cd['collections'] = cd.pop('documents')
return cd | Overrides superclass method and renames some properties | Below is the the instruction that describes the task:
### Input:
Overrides superclass method and renames some properties
### Response:
def get_configuration_dict(self, secret_attrs=False):
"""Overrides superclass method and renames some properties"""
cd = super(TreeCollectionsShard, self).get_configuration_dict(secret_attrs=secret_attrs)
# "rename" some keys in the dict provided
cd['number of collections'] = cd.pop('number of documents')
cd['collections'] = cd.pop('documents')
return cd |
def fit(self, X, y, **kwargs):
"""
Fits the learning curve with the wrapped model to the specified data.
Draws training and test score curves and saves the scores to the
estimator.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
self : instance
"""
self.cv_scores_ = cross_val_score(
self.estimator, X, y, cv=self.cv, scoring=self.scoring
)
self.cv_scores_mean_ = self.cv_scores_.mean()
self.draw()
return self | Fits the learning curve with the wrapped model to the specified data.
Draws training and test score curves and saves the scores to the
estimator.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
self : instance | Below is the the instruction that describes the task:
### Input:
Fits the learning curve with the wrapped model to the specified data.
Draws training and test score curves and saves the scores to the
estimator.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
self : instance
### Response:
def fit(self, X, y, **kwargs):
"""
Fits the learning curve with the wrapped model to the specified data.
Draws training and test score curves and saves the scores to the
estimator.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
self : instance
"""
self.cv_scores_ = cross_val_score(
self.estimator, X, y, cv=self.cv, scoring=self.scoring
)
self.cv_scores_mean_ = self.cv_scores_.mean()
self.draw()
return self |
def transform(self, attrs):
"""Perform all actions on a given attribute dict."""
self.collect(attrs)
self.add_missing_implementations()
self.fill_attrs(attrs) | Perform all actions on a given attribute dict. | Below is the the instruction that describes the task:
### Input:
Perform all actions on a given attribute dict.
### Response:
def transform(self, attrs):
"""Perform all actions on a given attribute dict."""
self.collect(attrs)
self.add_missing_implementations()
self.fill_attrs(attrs) |
def store_oui(self, port_uuid, oui_type, oui_data):
"""Function for storing the OUI.
param uuid: UUID of the vNIC
param oui_type: OUI ID
param oui_data: OUI Opaque Data
"""
self.oui_vif_map[port_uuid] = {'oui_id': oui_type,
'oui_data': oui_data} | Function for storing the OUI.
param uuid: UUID of the vNIC
param oui_type: OUI ID
param oui_data: OUI Opaque Data | Below is the the instruction that describes the task:
### Input:
Function for storing the OUI.
param uuid: UUID of the vNIC
param oui_type: OUI ID
param oui_data: OUI Opaque Data
### Response:
def store_oui(self, port_uuid, oui_type, oui_data):
"""Function for storing the OUI.
param uuid: UUID of the vNIC
param oui_type: OUI ID
param oui_data: OUI Opaque Data
"""
self.oui_vif_map[port_uuid] = {'oui_id': oui_type,
'oui_data': oui_data} |
def getAll(self):
"""Get all objects
Returns:
List: list of all objects
"""
objs = []
for obj in self.model.db:
objs.append(self._cast_model(obj))
return objs | Get all objects
Returns:
List: list of all objects | Below is the the instruction that describes the task:
### Input:
Get all objects
Returns:
List: list of all objects
### Response:
def getAll(self):
"""Get all objects
Returns:
List: list of all objects
"""
objs = []
for obj in self.model.db:
objs.append(self._cast_model(obj))
return objs |
def arange(start, stop=None, step=1.0, repeat=1, infer_range=None, ctx=None, dtype=mx_real_t):
"""Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.
Parameters
----------
start : number, optional
Start of interval. The default start value is 0.
stop : number
End of interval.
step : number, optional
Spacing between values. The default step size is 1.
repeat : int, optional
Number of times to repeat each element. The default repeat count is 1.
infer_range : boolean, optional
When set to True, infer the stop position from the start, step,
repeat, and output tensor size.
ctx : Context, optional
Device context. Default context is the current default context.
dtype : str or numpy.dtype, optional
The data type of the `NDArray`. The default datatype is `np.float32`.
Returns
-------
NDArray
`NDArray` of evenly spaced values in the specified range.
Examples
--------
>>> mx.nd.arange(3).asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> mx.nd.arange(2, 6).asnumpy()
array([ 2., 3., 4., 5.], dtype=float32)
>>> mx.nd.arange(2, 6, step=2).asnumpy()
array([ 2., 4.], dtype=float32)
>>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy()
array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32)
>>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy()
array([2, 2, 2, 4, 4, 4], dtype=int32)
"""
if infer_range is not None:
warnings.warn('`infer_range` argument has been deprecated',
DeprecationWarning)
if ctx is None:
ctx = current_context()
return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
infer_range=False, dtype=dtype, ctx=str(ctx)) | Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.
Parameters
----------
start : number, optional
Start of interval. The default start value is 0.
stop : number
End of interval.
step : number, optional
Spacing between values. The default step size is 1.
repeat : int, optional
Number of times to repeat each element. The default repeat count is 1.
infer_range : boolean, optional
When set to True, infer the stop position from the start, step,
repeat, and output tensor size.
ctx : Context, optional
Device context. Default context is the current default context.
dtype : str or numpy.dtype, optional
The data type of the `NDArray`. The default datatype is `np.float32`.
Returns
-------
NDArray
`NDArray` of evenly spaced values in the specified range.
Examples
--------
>>> mx.nd.arange(3).asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> mx.nd.arange(2, 6).asnumpy()
array([ 2., 3., 4., 5.], dtype=float32)
>>> mx.nd.arange(2, 6, step=2).asnumpy()
array([ 2., 4.], dtype=float32)
>>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy()
array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32)
>>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy()
array([2, 2, 2, 4, 4, 4], dtype=int32) | Below is the the instruction that describes the task:
### Input:
Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.
Parameters
----------
start : number, optional
Start of interval. The default start value is 0.
stop : number
End of interval.
step : number, optional
Spacing between values. The default step size is 1.
repeat : int, optional
Number of times to repeat each element. The default repeat count is 1.
infer_range : boolean, optional
When set to True, infer the stop position from the start, step,
repeat, and output tensor size.
ctx : Context, optional
Device context. Default context is the current default context.
dtype : str or numpy.dtype, optional
The data type of the `NDArray`. The default datatype is `np.float32`.
Returns
-------
NDArray
`NDArray` of evenly spaced values in the specified range.
Examples
--------
>>> mx.nd.arange(3).asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> mx.nd.arange(2, 6).asnumpy()
array([ 2., 3., 4., 5.], dtype=float32)
>>> mx.nd.arange(2, 6, step=2).asnumpy()
array([ 2., 4.], dtype=float32)
>>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy()
array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32)
>>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy()
array([2, 2, 2, 4, 4, 4], dtype=int32)
### Response:
def arange(start, stop=None, step=1.0, repeat=1, infer_range=None, ctx=None, dtype=mx_real_t):
"""Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.
Parameters
----------
start : number, optional
Start of interval. The default start value is 0.
stop : number
End of interval.
step : number, optional
Spacing between values. The default step size is 1.
repeat : int, optional
Number of times to repeat each element. The default repeat count is 1.
infer_range : boolean, optional
When set to True, infer the stop position from the start, step,
repeat, and output tensor size.
ctx : Context, optional
Device context. Default context is the current default context.
dtype : str or numpy.dtype, optional
The data type of the `NDArray`. The default datatype is `np.float32`.
Returns
-------
NDArray
`NDArray` of evenly spaced values in the specified range.
Examples
--------
>>> mx.nd.arange(3).asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> mx.nd.arange(2, 6).asnumpy()
array([ 2., 3., 4., 5.], dtype=float32)
>>> mx.nd.arange(2, 6, step=2).asnumpy()
array([ 2., 4.], dtype=float32)
>>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy()
array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32)
>>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy()
array([2, 2, 2, 4, 4, 4], dtype=int32)
"""
if infer_range is not None:
warnings.warn('`infer_range` argument has been deprecated',
DeprecationWarning)
if ctx is None:
ctx = current_context()
return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
infer_range=False, dtype=dtype, ctx=str(ctx)) |
def __normalize(self,text):
"""
Substitute words in the string, according to the specified Normal,
e.g. "I'm" -> "I am"
:type str: str
:param str: The string to be mapped
:rtype: str
"""
return self._normalizer_regex.sub(lambda mo:
self._normalizer[mo.string[mo.start():mo.end()]],
text.lower()) | Substitute words in the string, according to the specified Normal,
e.g. "I'm" -> "I am"
:type str: str
:param str: The string to be mapped
:rtype: str | Below is the the instruction that describes the task:
### Input:
Substitute words in the string, according to the specified Normal,
e.g. "I'm" -> "I am"
:type str: str
:param str: The string to be mapped
:rtype: str
### Response:
def __normalize(self,text):
"""
Substitute words in the string, according to the specified Normal,
e.g. "I'm" -> "I am"
:type str: str
:param str: The string to be mapped
:rtype: str
"""
return self._normalizer_regex.sub(lambda mo:
self._normalizer[mo.string[mo.start():mo.end()]],
text.lower()) |
def _draw_rectangle(data, obj, draw_options):
"""Return the PGFPlots code for rectangles.
"""
# Objects with labels are plot objects (from bar charts, etc). Even those without
# labels explicitly set have a label of "_nolegend_". Everything else should be
# skipped because they likely correspong to axis/legend objects which are handled by
# PGFPlots
label = obj.get_label()
if label == "":
return data, []
# Get actual label, bar charts by default only give rectangles labels of
# "_nolegend_". See <https://stackoverflow.com/q/35881290/353337>.
handles, labels = obj.axes.get_legend_handles_labels()
labelsFound = [
label for h, label in zip(handles, labels) if obj in h.get_children()
]
if len(labelsFound) == 1:
label = labelsFound[0]
left_lower_x = obj.get_x()
left_lower_y = obj.get_y()
ff = data["float format"]
cont = (
"\\draw[{}] (axis cs:" + ff + "," + ff + ") "
"rectangle (axis cs:" + ff + "," + ff + ");\n"
).format(
",".join(draw_options),
left_lower_x,
left_lower_y,
left_lower_x + obj.get_width(),
left_lower_y + obj.get_height(),
)
if label != "_nolegend_" and label not in data["rectangle_legends"]:
data["rectangle_legends"].add(label)
cont += "\\addlegendimage{{ybar,ybar legend,{}}};\n".format(
",".join(draw_options)
)
cont += "\\addlegendentry{{{}}}\n\n".format(label)
return data, cont | Return the PGFPlots code for rectangles. | Below is the the instruction that describes the task:
### Input:
Return the PGFPlots code for rectangles.
### Response:
def _draw_rectangle(data, obj, draw_options):
"""Return the PGFPlots code for rectangles.
"""
# Objects with labels are plot objects (from bar charts, etc). Even those without
# labels explicitly set have a label of "_nolegend_". Everything else should be
# skipped because they likely correspong to axis/legend objects which are handled by
# PGFPlots
label = obj.get_label()
if label == "":
return data, []
# Get actual label, bar charts by default only give rectangles labels of
# "_nolegend_". See <https://stackoverflow.com/q/35881290/353337>.
handles, labels = obj.axes.get_legend_handles_labels()
labelsFound = [
label for h, label in zip(handles, labels) if obj in h.get_children()
]
if len(labelsFound) == 1:
label = labelsFound[0]
left_lower_x = obj.get_x()
left_lower_y = obj.get_y()
ff = data["float format"]
cont = (
"\\draw[{}] (axis cs:" + ff + "," + ff + ") "
"rectangle (axis cs:" + ff + "," + ff + ");\n"
).format(
",".join(draw_options),
left_lower_x,
left_lower_y,
left_lower_x + obj.get_width(),
left_lower_y + obj.get_height(),
)
if label != "_nolegend_" and label not in data["rectangle_legends"]:
data["rectangle_legends"].add(label)
cont += "\\addlegendimage{{ybar,ybar legend,{}}};\n".format(
",".join(draw_options)
)
cont += "\\addlegendentry{{{}}}\n\n".format(label)
return data, cont |
def outer_left_join(self, join_streamlet, window_config, join_function):
"""Return a new Streamlet by left join_streamlet with this streamlet
"""
from heronpy.streamlet.impl.joinbolt import JoinStreamlet, JoinBolt
join_streamlet_result = JoinStreamlet(JoinBolt.OUTER_LEFT, window_config,
join_function, self, join_streamlet)
self._add_child(join_streamlet_result)
join_streamlet._add_child(join_streamlet_result)
return join_streamlet_result | Return a new Streamlet by left join_streamlet with this streamlet | Below is the the instruction that describes the task:
### Input:
Return a new Streamlet by left join_streamlet with this streamlet
### Response:
def outer_left_join(self, join_streamlet, window_config, join_function):
"""Return a new Streamlet by left join_streamlet with this streamlet
"""
from heronpy.streamlet.impl.joinbolt import JoinStreamlet, JoinBolt
join_streamlet_result = JoinStreamlet(JoinBolt.OUTER_LEFT, window_config,
join_function, self, join_streamlet)
self._add_child(join_streamlet_result)
join_streamlet._add_child(join_streamlet_result)
return join_streamlet_result |
def __reset_weights(self, target_phis, res_idxs, obs_idxs):
"""private method to reset weights based on target phi values
for each group. This method should not be called directly
Parameters
----------
target_phis : dict
target phi contribution for groups to reweight
res_idxs : dict
the index positions of each group of interest
in the res dataframe
obs_idxs : dict
the index positions of each group of interest
in the observation data dataframe
"""
for item in target_phis.keys():
assert item in res_idxs.keys(),\
"Pst.__reset_weights(): " + str(item) +\
" not in residual group indices"
assert item in obs_idxs.keys(), \
"Pst.__reset_weights(): " + str(item) +\
" not in observation group indices"
actual_phi = ((self.res.loc[res_idxs[item], "residual"] *
self.observation_data.loc
[obs_idxs[item], "weight"])**2).sum()
if actual_phi > 0.0:
weight_mult = np.sqrt(target_phis[item] / actual_phi)
self.observation_data.loc[obs_idxs[item], "weight"] *= weight_mult
else:
("Pst.__reset_weights() warning: phi group {0} has zero phi, skipping...".format(item)) | private method to reset weights based on target phi values
for each group. This method should not be called directly
Parameters
----------
target_phis : dict
target phi contribution for groups to reweight
res_idxs : dict
the index positions of each group of interest
in the res dataframe
obs_idxs : dict
the index positions of each group of interest
in the observation data dataframe | Below is the the instruction that describes the task:
### Input:
private method to reset weights based on target phi values
for each group. This method should not be called directly
Parameters
----------
target_phis : dict
target phi contribution for groups to reweight
res_idxs : dict
the index positions of each group of interest
in the res dataframe
obs_idxs : dict
the index positions of each group of interest
in the observation data dataframe
### Response:
def __reset_weights(self, target_phis, res_idxs, obs_idxs):
"""private method to reset weights based on target phi values
for each group. This method should not be called directly
Parameters
----------
target_phis : dict
target phi contribution for groups to reweight
res_idxs : dict
the index positions of each group of interest
in the res dataframe
obs_idxs : dict
the index positions of each group of interest
in the observation data dataframe
"""
for item in target_phis.keys():
assert item in res_idxs.keys(),\
"Pst.__reset_weights(): " + str(item) +\
" not in residual group indices"
assert item in obs_idxs.keys(), \
"Pst.__reset_weights(): " + str(item) +\
" not in observation group indices"
actual_phi = ((self.res.loc[res_idxs[item], "residual"] *
self.observation_data.loc
[obs_idxs[item], "weight"])**2).sum()
if actual_phi > 0.0:
weight_mult = np.sqrt(target_phis[item] / actual_phi)
self.observation_data.loc[obs_idxs[item], "weight"] *= weight_mult
else:
("Pst.__reset_weights() warning: phi group {0} has zero phi, skipping...".format(item)) |
def trailing_whitespace(physical_line):
r"""Trailing whitespace is superfluous.
The warning returned varies on whether the line itself is blank, for easier
filtering for those who want to indent their blank lines.
Okay: spam(1)\n#
W291: spam(1) \n#
W293: class Foo(object):\n \n bang = 12
"""
physical_line = physical_line.rstrip('\n') # chr(10), newline
physical_line = physical_line.rstrip('\r') # chr(13), carriage return
physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L
stripped = physical_line.rstrip(' \t\v')
if physical_line != stripped:
if stripped:
return len(stripped), "W291 trailing whitespace"
else:
return 0, "W293 blank line contains whitespace" | r"""Trailing whitespace is superfluous.
The warning returned varies on whether the line itself is blank, for easier
filtering for those who want to indent their blank lines.
Okay: spam(1)\n#
W291: spam(1) \n#
W293: class Foo(object):\n \n bang = 12 | Below is the the instruction that describes the task:
### Input:
r"""Trailing whitespace is superfluous.
The warning returned varies on whether the line itself is blank, for easier
filtering for those who want to indent their blank lines.
Okay: spam(1)\n#
W291: spam(1) \n#
W293: class Foo(object):\n \n bang = 12
### Response:
def trailing_whitespace(physical_line):
r"""Trailing whitespace is superfluous.
The warning returned varies on whether the line itself is blank, for easier
filtering for those who want to indent their blank lines.
Okay: spam(1)\n#
W291: spam(1) \n#
W293: class Foo(object):\n \n bang = 12
"""
physical_line = physical_line.rstrip('\n') # chr(10), newline
physical_line = physical_line.rstrip('\r') # chr(13), carriage return
physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L
stripped = physical_line.rstrip(' \t\v')
if physical_line != stripped:
if stripped:
return len(stripped), "W291 trailing whitespace"
else:
return 0, "W293 blank line contains whitespace" |
def do_format(self, format):
"""Apply format selection."""
infile = self.tmpfile
outfile = self.basename + '.out'
outfile_jp2 = self.basename + '.jp2'
# Now convert finished pnm file to output format
# simeon@ice ~>cat m3.pnm | pnmtojpeg > m4.jpg
# simeon@ice ~>cat m3.pnm | pnmtotiff > m4.jpg
# pnmtotiff: computing colormap...
# pnmtotiff: Too many colors - proceeding to write a 24-bit RGB file.
# pnmtotiff: If you want an 8-bit palette file, try doing a 'ppmquant 256'.
# simeon@ice ~>cat m3.pnm | pnmtopng > m4.png
fmt = ('png' if (format is None) else format)
if (fmt == 'png'):
# print "format: png"
if (self.shell_call(self.pnmtopng + ' ' + infile + ' > ' + outfile)):
raise IIIFError(
text="Oops... got nonzero output from pnmtopng.")
mime_type = "image/png"
elif (fmt == 'jpg'):
# print "format: jpg"
if (self.shell_call(self.pnmtojpeg + ' ' + infile + ' > ' + outfile)):
raise IIIFError(
text="Oops... got nonzero output from pnmtojpeg.")
mime_type = "image/jpeg"
elif (fmt == 'tiff' or fmt == 'jp2'):
# print "format: tiff/jp2"
if (self.shell_call(self.pnmtotiff + ' ' + infile + ' > ' + outfile)):
raise IIIFError(
text="Oops... got nonzero output from pnmtotiff.")
mime_type = "image/tiff"
if (fmt == 'jp2'):
# use djatoka after tiff
if (self.shell_call(DJATOKA_COMP + ' -i ' + outfile + ' -o ' + outfile_jp2)):
raise IIIFError(
text="Oops... got nonzero output from DJATOKA_COMP.")
mime_type = "image/jp2"
outfile = tmpfile_jp2
else:
raise IIIFError(code=415, parameter='format',
text="Unsupported output file format (%s), only png,jpg,tiff are supported." % (fmt))
self.outfile = outfile
self.output_format = fmt
self.mime_type = mime_type | Apply format selection. | Below is the the instruction that describes the task:
### Input:
Apply format selection.
### Response:
def do_format(self, format):
"""Apply format selection."""
infile = self.tmpfile
outfile = self.basename + '.out'
outfile_jp2 = self.basename + '.jp2'
# Now convert finished pnm file to output format
# simeon@ice ~>cat m3.pnm | pnmtojpeg > m4.jpg
# simeon@ice ~>cat m3.pnm | pnmtotiff > m4.jpg
# pnmtotiff: computing colormap...
# pnmtotiff: Too many colors - proceeding to write a 24-bit RGB file.
# pnmtotiff: If you want an 8-bit palette file, try doing a 'ppmquant 256'.
# simeon@ice ~>cat m3.pnm | pnmtopng > m4.png
fmt = ('png' if (format is None) else format)
if (fmt == 'png'):
# print "format: png"
if (self.shell_call(self.pnmtopng + ' ' + infile + ' > ' + outfile)):
raise IIIFError(
text="Oops... got nonzero output from pnmtopng.")
mime_type = "image/png"
elif (fmt == 'jpg'):
# print "format: jpg"
if (self.shell_call(self.pnmtojpeg + ' ' + infile + ' > ' + outfile)):
raise IIIFError(
text="Oops... got nonzero output from pnmtojpeg.")
mime_type = "image/jpeg"
elif (fmt == 'tiff' or fmt == 'jp2'):
# print "format: tiff/jp2"
if (self.shell_call(self.pnmtotiff + ' ' + infile + ' > ' + outfile)):
raise IIIFError(
text="Oops... got nonzero output from pnmtotiff.")
mime_type = "image/tiff"
if (fmt == 'jp2'):
# use djatoka after tiff
if (self.shell_call(DJATOKA_COMP + ' -i ' + outfile + ' -o ' + outfile_jp2)):
raise IIIFError(
text="Oops... got nonzero output from DJATOKA_COMP.")
mime_type = "image/jp2"
outfile = tmpfile_jp2
else:
raise IIIFError(code=415, parameter='format',
text="Unsupported output file format (%s), only png,jpg,tiff are supported." % (fmt))
self.outfile = outfile
self.output_format = fmt
self.mime_type = mime_type |
def get(tzid):
"""Return timezone data"""
ns = {}
path = os.path.join(DATA_DIR, tzid)
with open(path) as f:
raw_data = f.read()
exec(raw_data, ns, ns)
z = ZoneData()
z.types = [(delta(offset), delta(save), abbr)
for offset, save, abbr in ns['types']]
z.times = [(datetime(*time), i)
for time, i in ns['times']]
z.rules = ns['posix']
return z | Return timezone data | Below is the the instruction that describes the task:
### Input:
Return timezone data
### Response:
def get(tzid):
"""Return timezone data"""
ns = {}
path = os.path.join(DATA_DIR, tzid)
with open(path) as f:
raw_data = f.read()
exec(raw_data, ns, ns)
z = ZoneData()
z.types = [(delta(offset), delta(save), abbr)
for offset, save, abbr in ns['types']]
z.times = [(datetime(*time), i)
for time, i in ns['times']]
z.rules = ns['posix']
return z |
def read_unsigned_byte(self, cmd):
"""
Read an unsigned byte from the specified command register
"""
result = self.bus.read_byte_data(self.address, cmd)
self.log.debug(
"read_unsigned_byte: Read 0x%02X from command register 0x%02X" % (
result, cmd
)
)
return result | Read an unsigned byte from the specified command register | Below is the the instruction that describes the task:
### Input:
Read an unsigned byte from the specified command register
### Response:
def read_unsigned_byte(self, cmd):
"""
Read an unsigned byte from the specified command register
"""
result = self.bus.read_byte_data(self.address, cmd)
self.log.debug(
"read_unsigned_byte: Read 0x%02X from command register 0x%02X" % (
result, cmd
)
)
return result |
def _is_notation(ip, notation, _isnm):
"""Internally used to check if an IP/netmask is in the given notation."""
notation_orig = notation
notation = _get_notation(notation)
if notation not in _CHECK_FUNCT_KEYS:
raise ValueError('_is_notation: unkown notation: "%s"' % notation_orig)
return _CHECK_FUNCT[notation][_isnm](ip) | Internally used to check if an IP/netmask is in the given notation. | Below is the the instruction that describes the task:
### Input:
Internally used to check if an IP/netmask is in the given notation.
### Response:
def _is_notation(ip, notation, _isnm):
"""Internally used to check if an IP/netmask is in the given notation."""
notation_orig = notation
notation = _get_notation(notation)
if notation not in _CHECK_FUNCT_KEYS:
raise ValueError('_is_notation: unkown notation: "%s"' % notation_orig)
return _CHECK_FUNCT[notation][_isnm](ip) |
def parse_headerline(self, line):
""" Parses header lines
Header example:
Date 2012/11/15 User anonymous
Time 06:07:08PM Software version: 4.0
Example laboratory
Arizona
"""
if line.startswith('Date'):
splitted = self.splitLine(line)
if len(splitted) > 1:
self._header['Date'] = splitted[1]
if len(splitted) > 2 and splitted[2] == 'User':
self._header['Date'] = splitted[1]
self._header['User'] = splitted[3] \
if len(splitted) > 3 else ''
else:
self.warn("Unexpected header format", numline=self._numline)
else:
self.warn("Unexpected header format", numline=self._numline)
return 0
if line.startswith('Time'):
splitted = self.splitLine(line)
if len(splitted) > 1:
self._header['Time'] = splitted[1]
else:
self.warn("Unexpected header format", numline=self._numline)
return 0
if line.startswith('Sample/ctrl'):
# Sample/ctrl ID Pat/Ctr/cAl Test name Test type
if len(self._header) == 0:
self.warn("No header found", numline=self._numline)
return -1
#Grab column names
self._end_header = True
self._columns = self.splitLine(line)
return 1 | Parses header lines
Header example:
Date 2012/11/15 User anonymous
Time 06:07:08PM Software version: 4.0
Example laboratory
Arizona | Below is the the instruction that describes the task:
### Input:
Parses header lines
Header example:
Date 2012/11/15 User anonymous
Time 06:07:08PM Software version: 4.0
Example laboratory
Arizona
### Response:
def parse_headerline(self, line):
""" Parses header lines
Header example:
Date 2012/11/15 User anonymous
Time 06:07:08PM Software version: 4.0
Example laboratory
Arizona
"""
if line.startswith('Date'):
splitted = self.splitLine(line)
if len(splitted) > 1:
self._header['Date'] = splitted[1]
if len(splitted) > 2 and splitted[2] == 'User':
self._header['Date'] = splitted[1]
self._header['User'] = splitted[3] \
if len(splitted) > 3 else ''
else:
self.warn("Unexpected header format", numline=self._numline)
else:
self.warn("Unexpected header format", numline=self._numline)
return 0
if line.startswith('Time'):
splitted = self.splitLine(line)
if len(splitted) > 1:
self._header['Time'] = splitted[1]
else:
self.warn("Unexpected header format", numline=self._numline)
return 0
if line.startswith('Sample/ctrl'):
# Sample/ctrl ID Pat/Ctr/cAl Test name Test type
if len(self._header) == 0:
self.warn("No header found", numline=self._numline)
return -1
#Grab column names
self._end_header = True
self._columns = self.splitLine(line)
return 1 |
def list(self, search_from=None, search_to=None, limit=None):
"""List all the objects saved in the namespace.
:param search_from: TBI
:param search_to: TBI
:param offset: TBI
:param limit: max number of values to be shows.
:return: list with transactions.
"""
l = []
for i in self._list():
l.append(i['data']['data'])
return l[0:limit] | List all the objects saved in the namespace.
:param search_from: TBI
:param search_to: TBI
:param offset: TBI
:param limit: max number of values to be shows.
:return: list with transactions. | Below is the the instruction that describes the task:
### Input:
List all the objects saved in the namespace.
:param search_from: TBI
:param search_to: TBI
:param offset: TBI
:param limit: max number of values to be shows.
:return: list with transactions.
### Response:
def list(self, search_from=None, search_to=None, limit=None):
"""List all the objects saved in the namespace.
:param search_from: TBI
:param search_to: TBI
:param offset: TBI
:param limit: max number of values to be shows.
:return: list with transactions.
"""
l = []
for i in self._list():
l.append(i['data']['data'])
return l[0:limit] |
def handle(send, msg, args):
"""Implements several XKCD comics."""
output = textutils.gen_xkcd_sub(msg, True)
if output is None:
return
if args['type'] == 'action':
send("correction: * %s %s" % (args['nick'], output))
else:
send("%s actually meant: %s" % (args['nick'], output)) | Implements several XKCD comics. | Below is the the instruction that describes the task:
### Input:
Implements several XKCD comics.
### Response:
def handle(send, msg, args):
"""Implements several XKCD comics."""
output = textutils.gen_xkcd_sub(msg, True)
if output is None:
return
if args['type'] == 'action':
send("correction: * %s %s" % (args['nick'], output))
else:
send("%s actually meant: %s" % (args['nick'], output)) |
def deprecated_conditional(predicate,
removal_version,
entity_description,
hint_message=None,
stacklevel=4):
"""Marks a certain configuration as deprecated.
The predicate is used to determine if that configuration is deprecated. It is a function that
will be called, if true, then the deprecation warning will issue.
:param () -> bool predicate: A function that returns True if the deprecation warning should be on.
:param string removal_version: The pants version which will remove the deprecated functionality.
:param string entity_description: A description of the deprecated entity.
:param string hint_message: An optional hint pointing to alternatives to the deprecation.
:param int stacklevel: How far up in the stack do we go to find the calling fn to report
:raises DeprecationApplicationError if the deprecation is applied improperly.
"""
validate_deprecation_semver(removal_version, 'removal version')
if predicate():
warn_or_error(removal_version, entity_description, hint_message, stacklevel=stacklevel) | Marks a certain configuration as deprecated.
The predicate is used to determine if that configuration is deprecated. It is a function that
will be called, if true, then the deprecation warning will issue.
:param () -> bool predicate: A function that returns True if the deprecation warning should be on.
:param string removal_version: The pants version which will remove the deprecated functionality.
:param string entity_description: A description of the deprecated entity.
:param string hint_message: An optional hint pointing to alternatives to the deprecation.
:param int stacklevel: How far up in the stack do we go to find the calling fn to report
:raises DeprecationApplicationError if the deprecation is applied improperly. | Below is the the instruction that describes the task:
### Input:
Marks a certain configuration as deprecated.
The predicate is used to determine if that configuration is deprecated. It is a function that
will be called, if true, then the deprecation warning will issue.
:param () -> bool predicate: A function that returns True if the deprecation warning should be on.
:param string removal_version: The pants version which will remove the deprecated functionality.
:param string entity_description: A description of the deprecated entity.
:param string hint_message: An optional hint pointing to alternatives to the deprecation.
:param int stacklevel: How far up in the stack do we go to find the calling fn to report
:raises DeprecationApplicationError if the deprecation is applied improperly.
### Response:
def deprecated_conditional(predicate,
removal_version,
entity_description,
hint_message=None,
stacklevel=4):
"""Marks a certain configuration as deprecated.
The predicate is used to determine if that configuration is deprecated. It is a function that
will be called, if true, then the deprecation warning will issue.
:param () -> bool predicate: A function that returns True if the deprecation warning should be on.
:param string removal_version: The pants version which will remove the deprecated functionality.
:param string entity_description: A description of the deprecated entity.
:param string hint_message: An optional hint pointing to alternatives to the deprecation.
:param int stacklevel: How far up in the stack do we go to find the calling fn to report
:raises DeprecationApplicationError if the deprecation is applied improperly.
"""
validate_deprecation_semver(removal_version, 'removal version')
if predicate():
warn_or_error(removal_version, entity_description, hint_message, stacklevel=stacklevel) |
def _evaluate(self, indices, norm_distances, out=None):
"""Evaluate nearest interpolation."""
idx_res = []
for i, yi in zip(indices, norm_distances):
if self.variant == 'left':
idx_res.append(np.where(yi <= .5, i, i + 1))
else:
idx_res.append(np.where(yi < .5, i, i + 1))
idx_res = tuple(idx_res)
if out is not None:
out[:] = self.values[idx_res]
return out
else:
return self.values[idx_res] | Evaluate nearest interpolation. | Below is the the instruction that describes the task:
### Input:
Evaluate nearest interpolation.
### Response:
def _evaluate(self, indices, norm_distances, out=None):
"""Evaluate nearest interpolation."""
idx_res = []
for i, yi in zip(indices, norm_distances):
if self.variant == 'left':
idx_res.append(np.where(yi <= .5, i, i + 1))
else:
idx_res.append(np.where(yi < .5, i, i + 1))
idx_res = tuple(idx_res)
if out is not None:
out[:] = self.values[idx_res]
return out
else:
return self.values[idx_res] |
def surveys(self):
"""
Returns the list of timesteps when survey measures has been captured
None if xml document is mailformed
xpath: /scenario/monitoring/survey
https://github.com/SwissTPH/openmalaria/wiki/GeneratedSchema32Doc#survey-times-time-steps
"""
survey_time_list = list()
# Extract surveyTimes from /scenario/monitoring/surveys section
# Using root element instead of xpath to avoid problems with namespaces
# (root tag was <scenario> prior to schema 32, and then it was switched to <om:scenario>)
try:
for item in self.et.find("surveys").findall("surveyTime"):
# Converting to float first to allow values like 730.0
survey_time_list.append(int(item.text))
except AttributeError:
return None
return survey_time_list | Returns the list of timesteps when survey measures has been captured
None if xml document is mailformed
xpath: /scenario/monitoring/survey
https://github.com/SwissTPH/openmalaria/wiki/GeneratedSchema32Doc#survey-times-time-steps | Below is the the instruction that describes the task:
### Input:
Returns the list of timesteps when survey measures has been captured
None if xml document is mailformed
xpath: /scenario/monitoring/survey
https://github.com/SwissTPH/openmalaria/wiki/GeneratedSchema32Doc#survey-times-time-steps
### Response:
def surveys(self):
"""
Returns the list of timesteps when survey measures has been captured
None if xml document is mailformed
xpath: /scenario/monitoring/survey
https://github.com/SwissTPH/openmalaria/wiki/GeneratedSchema32Doc#survey-times-time-steps
"""
survey_time_list = list()
# Extract surveyTimes from /scenario/monitoring/surveys section
# Using root element instead of xpath to avoid problems with namespaces
# (root tag was <scenario> prior to schema 32, and then it was switched to <om:scenario>)
try:
for item in self.et.find("surveys").findall("surveyTime"):
# Converting to float first to allow values like 730.0
survey_time_list.append(int(item.text))
except AttributeError:
return None
return survey_time_list |
def get_assembly_size(assembly_file):
"""Returns the number of nucleotides and the size per contig for the
provided assembly file path
Parameters
----------
assembly_file : str
Path to assembly file.
Returns
-------
assembly_size : int
Size of the assembly in nucleotides
contig_size : dict
Length of each contig (contig name as key and length as value)
"""
assembly_size = 0
contig_size = {}
header = ""
with open(assembly_file) as fh:
for line in fh:
# Skip empty lines
if line.strip() == "":
continue
if line.startswith(">"):
header = line.strip()[1:]
contig_size[header] = 0
else:
line_len = len(line.strip())
assembly_size += line_len
contig_size[header] += line_len
return assembly_size, contig_size | Returns the number of nucleotides and the size per contig for the
provided assembly file path
Parameters
----------
assembly_file : str
Path to assembly file.
Returns
-------
assembly_size : int
Size of the assembly in nucleotides
contig_size : dict
Length of each contig (contig name as key and length as value) | Below is the the instruction that describes the task:
### Input:
Returns the number of nucleotides and the size per contig for the
provided assembly file path
Parameters
----------
assembly_file : str
Path to assembly file.
Returns
-------
assembly_size : int
Size of the assembly in nucleotides
contig_size : dict
Length of each contig (contig name as key and length as value)
### Response:
def get_assembly_size(assembly_file):
"""Returns the number of nucleotides and the size per contig for the
provided assembly file path
Parameters
----------
assembly_file : str
Path to assembly file.
Returns
-------
assembly_size : int
Size of the assembly in nucleotides
contig_size : dict
Length of each contig (contig name as key and length as value)
"""
assembly_size = 0
contig_size = {}
header = ""
with open(assembly_file) as fh:
for line in fh:
# Skip empty lines
if line.strip() == "":
continue
if line.startswith(">"):
header = line.strip()[1:]
contig_size[header] = 0
else:
line_len = len(line.strip())
assembly_size += line_len
contig_size[header] += line_len
return assembly_size, contig_size |
def register_on_serial_port_changed(self, callback):
"""Set the callback function to consume on serial port changed events.
Callback receives a ISerialPortChangedEvent object.
Returns the callback_id
"""
event_type = library.VBoxEventType.on_serial_port_changed
return self.event_source.register_callback(callback, event_type) | Set the callback function to consume on serial port changed events.
Callback receives a ISerialPortChangedEvent object.
Returns the callback_id | Below is the the instruction that describes the task:
### Input:
Set the callback function to consume on serial port changed events.
Callback receives a ISerialPortChangedEvent object.
Returns the callback_id
### Response:
def register_on_serial_port_changed(self, callback):
"""Set the callback function to consume on serial port changed events.
Callback receives a ISerialPortChangedEvent object.
Returns the callback_id
"""
event_type = library.VBoxEventType.on_serial_port_changed
return self.event_source.register_callback(callback, event_type) |
def _approximate_unkown_bond_lengths(self):
"""Completes the bond length database with approximations based on VDW radii"""
dataset = self.lengths[BOND_SINGLE]
for n1 in periodic.iter_numbers():
for n2 in periodic.iter_numbers():
if n1 <= n2:
pair = frozenset([n1, n2])
atom1 = periodic[n1]
atom2 = periodic[n2]
#if (pair not in dataset) and hasattr(atom1, "covalent_radius") and hasattr(atom2, "covalent_radius"):
if (pair not in dataset) and (atom1.covalent_radius is not None) and (atom2.covalent_radius is not None):
dataset[pair] = (atom1.covalent_radius + atom2.covalent_radius) | Completes the bond length database with approximations based on VDW radii | Below is the the instruction that describes the task:
### Input:
Completes the bond length database with approximations based on VDW radii
### Response:
def _approximate_unkown_bond_lengths(self):
"""Completes the bond length database with approximations based on VDW radii"""
dataset = self.lengths[BOND_SINGLE]
for n1 in periodic.iter_numbers():
for n2 in periodic.iter_numbers():
if n1 <= n2:
pair = frozenset([n1, n2])
atom1 = periodic[n1]
atom2 = periodic[n2]
#if (pair not in dataset) and hasattr(atom1, "covalent_radius") and hasattr(atom2, "covalent_radius"):
if (pair not in dataset) and (atom1.covalent_radius is not None) and (atom2.covalent_radius is not None):
dataset[pair] = (atom1.covalent_radius + atom2.covalent_radius) |
def get_enthalpy(self, temperature, electronic_energy = 'Default'):
"""Returns the internal energy of an adsorbed molecule.
Parameters
----------
temperature : numeric
temperature in K
electronic_energy : numeric
energy in eV
Returns
-------
internal_energy : numeric
Internal energy in eV
"""
if not temperature: # either None or 0
return(0, 0, 0)
if electronic_energy == 'Default':
electronic_energy = molecule_dict[self.name]['electronic_energy']
if overbinding == True:
electronic_energy += molecule_dict[self.name]['overbinding']
else:
ideal_gas_object = IdealGasThermo(vib_energies=self.get_vib_energies(),
potentialenergy=electronic_energy,
atoms=self.atom_object,
geometry=molecule_dict[self.name]['geometry'],
symmetrynumber=molecule_dict[self.name]['symmetrynumber'],
spin=molecule_dict[self.name]['spin'])
energy = ideal_gas_object.get_enthalpy(temperature=temperature, verbose=False)
self.enthalpy = energy
return(self.enthalpy) | Returns the internal energy of an adsorbed molecule.
Parameters
----------
temperature : numeric
temperature in K
electronic_energy : numeric
energy in eV
Returns
-------
internal_energy : numeric
Internal energy in eV | Below is the the instruction that describes the task:
### Input:
Returns the internal energy of an adsorbed molecule.
Parameters
----------
temperature : numeric
temperature in K
electronic_energy : numeric
energy in eV
Returns
-------
internal_energy : numeric
Internal energy in eV
### Response:
def get_enthalpy(self, temperature, electronic_energy = 'Default'):
"""Returns the internal energy of an adsorbed molecule.
Parameters
----------
temperature : numeric
temperature in K
electronic_energy : numeric
energy in eV
Returns
-------
internal_energy : numeric
Internal energy in eV
"""
if not temperature: # either None or 0
return(0, 0, 0)
if electronic_energy == 'Default':
electronic_energy = molecule_dict[self.name]['electronic_energy']
if overbinding == True:
electronic_energy += molecule_dict[self.name]['overbinding']
else:
ideal_gas_object = IdealGasThermo(vib_energies=self.get_vib_energies(),
potentialenergy=electronic_energy,
atoms=self.atom_object,
geometry=molecule_dict[self.name]['geometry'],
symmetrynumber=molecule_dict[self.name]['symmetrynumber'],
spin=molecule_dict[self.name]['spin'])
energy = ideal_gas_object.get_enthalpy(temperature=temperature, verbose=False)
self.enthalpy = energy
return(self.enthalpy) |
def save_or_overwrite_slice(
self, args, slc, slice_add_perm, slice_overwrite_perm, slice_download_perm,
datasource_id, datasource_type, datasource_name):
"""Save or overwrite a slice"""
slice_name = args.get('slice_name')
action = args.get('action')
form_data = get_form_data()[0]
if action in ('saveas'):
if 'slice_id' in form_data:
form_data.pop('slice_id') # don't save old slice_id
slc = models.Slice(owners=[g.user] if g.user else [])
slc.params = json.dumps(form_data, indent=2, sort_keys=True)
slc.datasource_name = datasource_name
slc.viz_type = form_data['viz_type']
slc.datasource_type = datasource_type
slc.datasource_id = datasource_id
slc.slice_name = slice_name
if action in ('saveas') and slice_add_perm:
self.save_slice(slc)
elif action == 'overwrite' and slice_overwrite_perm:
self.overwrite_slice(slc)
# Adding slice to a dashboard if requested
dash = None
if request.args.get('add_to_dash') == 'existing':
dash = (
db.session.query(models.Dashboard)
.filter_by(id=int(request.args.get('save_to_dashboard_id')))
.one()
)
# check edit dashboard permissions
dash_overwrite_perm = check_ownership(dash, raise_if_false=False)
if not dash_overwrite_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('alter this ') +
_('dashboard'),
status=400)
flash(
_('Chart [{}] was added to dashboard [{}]').format(
slc.slice_name,
dash.dashboard_title),
'info')
elif request.args.get('add_to_dash') == 'new':
# check create dashboard permissions
dash_add_perm = security_manager.can_access('can_add', 'DashboardModelView')
if not dash_add_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('create a ') + _('dashboard'),
status=400)
dash = models.Dashboard(
dashboard_title=request.args.get('new_dashboard_name'),
owners=[g.user] if g.user else [])
flash(
_('Dashboard [{}] just got created and chart [{}] was added '
'to it').format(
dash.dashboard_title,
slc.slice_name),
'info')
if dash and slc not in dash.slices:
dash.slices.append(slc)
db.session.commit()
response = {
'can_add': slice_add_perm,
'can_download': slice_download_perm,
'can_overwrite': is_owner(slc, g.user),
'form_data': slc.form_data,
'slice': slc.data,
'dashboard_id': dash.id if dash else None,
}
if request.args.get('goto_dash') == 'true':
response.update({'dashboard': dash.url})
return json_success(json.dumps(response)) | Save or overwrite a slice | Below is the the instruction that describes the task:
### Input:
Save or overwrite a slice
### Response:
def save_or_overwrite_slice(
self, args, slc, slice_add_perm, slice_overwrite_perm, slice_download_perm,
datasource_id, datasource_type, datasource_name):
"""Save or overwrite a slice"""
slice_name = args.get('slice_name')
action = args.get('action')
form_data = get_form_data()[0]
if action in ('saveas'):
if 'slice_id' in form_data:
form_data.pop('slice_id') # don't save old slice_id
slc = models.Slice(owners=[g.user] if g.user else [])
slc.params = json.dumps(form_data, indent=2, sort_keys=True)
slc.datasource_name = datasource_name
slc.viz_type = form_data['viz_type']
slc.datasource_type = datasource_type
slc.datasource_id = datasource_id
slc.slice_name = slice_name
if action in ('saveas') and slice_add_perm:
self.save_slice(slc)
elif action == 'overwrite' and slice_overwrite_perm:
self.overwrite_slice(slc)
# Adding slice to a dashboard if requested
dash = None
if request.args.get('add_to_dash') == 'existing':
dash = (
db.session.query(models.Dashboard)
.filter_by(id=int(request.args.get('save_to_dashboard_id')))
.one()
)
# check edit dashboard permissions
dash_overwrite_perm = check_ownership(dash, raise_if_false=False)
if not dash_overwrite_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('alter this ') +
_('dashboard'),
status=400)
flash(
_('Chart [{}] was added to dashboard [{}]').format(
slc.slice_name,
dash.dashboard_title),
'info')
elif request.args.get('add_to_dash') == 'new':
# check create dashboard permissions
dash_add_perm = security_manager.can_access('can_add', 'DashboardModelView')
if not dash_add_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('create a ') + _('dashboard'),
status=400)
dash = models.Dashboard(
dashboard_title=request.args.get('new_dashboard_name'),
owners=[g.user] if g.user else [])
flash(
_('Dashboard [{}] just got created and chart [{}] was added '
'to it').format(
dash.dashboard_title,
slc.slice_name),
'info')
if dash and slc not in dash.slices:
dash.slices.append(slc)
db.session.commit()
response = {
'can_add': slice_add_perm,
'can_download': slice_download_perm,
'can_overwrite': is_owner(slc, g.user),
'form_data': slc.form_data,
'slice': slc.data,
'dashboard_id': dash.id if dash else None,
}
if request.args.get('goto_dash') == 'true':
response.update({'dashboard': dash.url})
return json_success(json.dumps(response)) |
def module(self):
"""Returns the module that this code element belongs to."""
if self._module is None:
root = self
while self._module is None and root is not None:
if isinstance(root, Module):
self._module = root
else:
root = root.parent
return self._module | Returns the module that this code element belongs to. | Below is the the instruction that describes the task:
### Input:
Returns the module that this code element belongs to.
### Response:
def module(self):
"""Returns the module that this code element belongs to."""
if self._module is None:
root = self
while self._module is None and root is not None:
if isinstance(root, Module):
self._module = root
else:
root = root.parent
return self._module |
def install_plugin(pkgpath, plugin_type, install_path, register_func):
"""Install specified plugin.
:param pkgpath: Name of plugin to be downloaded from online repo or path to plugin folder or zip file.
:param install_path: Path where plugin will be installed.
:param register_func: Method used to register and validate plugin.
"""
service_name = os.path.basename(pkgpath)
if os.path.exists(os.path.join(install_path, service_name)):
raise exceptions.PluginAlreadyInstalled(pkgpath)
if os.path.exists(pkgpath):
logger.debug("%s exists in filesystem", pkgpath)
if os.path.isdir(pkgpath):
pip_status = install_dir(pkgpath, install_path, register_func)
else: # pkgpath is file
pip_status = install_from_zip(pkgpath, install_path, register_func)
else:
logger.debug("cannot find %s locally, checking github repo", pkgpath)
click.secho("Collecting {}..".format(pkgpath))
pip_status = install_from_repo(pkgpath, plugin_type, install_path, register_func)
if pip_status == 0:
click.secho("[+] Great success!")
else:
# TODO: rephrase
click.secho("[-] Service installed but something was odd with dependency install, please review debug logs") | Install specified plugin.
:param pkgpath: Name of plugin to be downloaded from online repo or path to plugin folder or zip file.
:param install_path: Path where plugin will be installed.
:param register_func: Method used to register and validate plugin. | Below is the the instruction that describes the task:
### Input:
Install specified plugin.
:param pkgpath: Name of plugin to be downloaded from online repo or path to plugin folder or zip file.
:param install_path: Path where plugin will be installed.
:param register_func: Method used to register and validate plugin.
### Response:
def install_plugin(pkgpath, plugin_type, install_path, register_func):
"""Install specified plugin.
:param pkgpath: Name of plugin to be downloaded from online repo or path to plugin folder or zip file.
:param install_path: Path where plugin will be installed.
:param register_func: Method used to register and validate plugin.
"""
service_name = os.path.basename(pkgpath)
if os.path.exists(os.path.join(install_path, service_name)):
raise exceptions.PluginAlreadyInstalled(pkgpath)
if os.path.exists(pkgpath):
logger.debug("%s exists in filesystem", pkgpath)
if os.path.isdir(pkgpath):
pip_status = install_dir(pkgpath, install_path, register_func)
else: # pkgpath is file
pip_status = install_from_zip(pkgpath, install_path, register_func)
else:
logger.debug("cannot find %s locally, checking github repo", pkgpath)
click.secho("Collecting {}..".format(pkgpath))
pip_status = install_from_repo(pkgpath, plugin_type, install_path, register_func)
if pip_status == 0:
click.secho("[+] Great success!")
else:
# TODO: rephrase
click.secho("[-] Service installed but something was odd with dependency install, please review debug logs") |
def should_stale_item_be_fetched_synchronously(self, delta, *args, **kwargs):
"""
Return whether to refresh an item synchronously when it is found in the
cache but stale
"""
if self.fetch_on_stale_threshold is None:
return False
return delta > (self.fetch_on_stale_threshold - self.lifetime) | Return whether to refresh an item synchronously when it is found in the
cache but stale | Below is the the instruction that describes the task:
### Input:
Return whether to refresh an item synchronously when it is found in the
cache but stale
### Response:
def should_stale_item_be_fetched_synchronously(self, delta, *args, **kwargs):
"""
Return whether to refresh an item synchronously when it is found in the
cache but stale
"""
if self.fetch_on_stale_threshold is None:
return False
return delta > (self.fetch_on_stale_threshold - self.lifetime) |
def new_status(self, new_status):
"""
Sets the new_status of this BuildSetStatusChangedEvent.
:param new_status: The new_status of this BuildSetStatusChangedEvent.
:type: str
"""
allowed_values = ["NEW", "DONE", "REJECTED"]
if new_status not in allowed_values:
raise ValueError(
"Invalid value for `new_status` ({0}), must be one of {1}"
.format(new_status, allowed_values)
)
self._new_status = new_status | Sets the new_status of this BuildSetStatusChangedEvent.
:param new_status: The new_status of this BuildSetStatusChangedEvent.
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the new_status of this BuildSetStatusChangedEvent.
:param new_status: The new_status of this BuildSetStatusChangedEvent.
:type: str
### Response:
def new_status(self, new_status):
"""
Sets the new_status of this BuildSetStatusChangedEvent.
:param new_status: The new_status of this BuildSetStatusChangedEvent.
:type: str
"""
allowed_values = ["NEW", "DONE", "REJECTED"]
if new_status not in allowed_values:
raise ValueError(
"Invalid value for `new_status` ({0}), must be one of {1}"
.format(new_status, allowed_values)
)
self._new_status = new_status |
def scale(self, w=1.0, h=1.0):
"""Resizes the layer to the given width and height.
When width w or height h is a floating-point number,
scales percentual,
otherwise scales to the given size in pixels.
"""
from types import FloatType
w0, h0 = self.img.size
if type(w) == FloatType: w = int(w*w0)
if type(h) == FloatType: h = int(h*h0)
self.img = self.img.resize((w,h), INTERPOLATION)
self.w = w
self.h = h | Resizes the layer to the given width and height.
When width w or height h is a floating-point number,
scales percentual,
otherwise scales to the given size in pixels. | Below is the the instruction that describes the task:
### Input:
Resizes the layer to the given width and height.
When width w or height h is a floating-point number,
scales percentual,
otherwise scales to the given size in pixels.
### Response:
def scale(self, w=1.0, h=1.0):
"""Resizes the layer to the given width and height.
When width w or height h is a floating-point number,
scales percentual,
otherwise scales to the given size in pixels.
"""
from types import FloatType
w0, h0 = self.img.size
if type(w) == FloatType: w = int(w*w0)
if type(h) == FloatType: h = int(h*h0)
self.img = self.img.resize((w,h), INTERPOLATION)
self.w = w
self.h = h |
def imshow(self, key):
"""Show item's image"""
data = self.model.get_data()
import spyder.pyplot as plt
plt.figure()
plt.imshow(data[key])
plt.show() | Show item's image | Below is the the instruction that describes the task:
### Input:
Show item's image
### Response:
def imshow(self, key):
"""Show item's image"""
data = self.model.get_data()
import spyder.pyplot as plt
plt.figure()
plt.imshow(data[key])
plt.show() |
def _load_all_in_directory(self) -> Dict[str, Iterable[DataSourceType]]:
"""
Loads all of the data from the files in directory location.
:return: a origin map of all the loaded data
"""
origin_mapped_data = dict() # type: Dict[str, Iterable[DataSourceType]]
for file_path in glob.iglob("%s/**/*" % self._directory_location, recursive=True):
if self.is_data_file(file_path):
origin_mapped_data[file_path] = self.no_error_extract_data_from_file(file_path)
return origin_mapped_data | Loads all of the data from the files in directory location.
:return: a origin map of all the loaded data | Below is the the instruction that describes the task:
### Input:
Loads all of the data from the files in directory location.
:return: a origin map of all the loaded data
### Response:
def _load_all_in_directory(self) -> Dict[str, Iterable[DataSourceType]]:
"""
Loads all of the data from the files in directory location.
:return: a origin map of all the loaded data
"""
origin_mapped_data = dict() # type: Dict[str, Iterable[DataSourceType]]
for file_path in glob.iglob("%s/**/*" % self._directory_location, recursive=True):
if self.is_data_file(file_path):
origin_mapped_data[file_path] = self.no_error_extract_data_from_file(file_path)
return origin_mapped_data |
def getStage(self, personID, nextStageIndex=0):
"""getStage(string, int) -> int
Returns the type of the nth next stage
0 for not-yet-departed
1 for waiting
2 for walking
3 for driving
nextStageIndex 0 retrieves value for the current stage.
nextStageIndex must be lower then value of getRemainingStages(personID)
"""
self._connection._beginMessage(
tc.CMD_GET_PERSON_VARIABLE, tc.VAR_STAGE, personID, 1 + 4)
self._connection._string += struct.pack("!Bi",
tc.TYPE_INTEGER, nextStageIndex)
return self._connection._checkResult(tc.CMD_GET_PERSON_VARIABLE,
tc.VAR_STAGE, personID).readInt() | getStage(string, int) -> int
Returns the type of the nth next stage
0 for not-yet-departed
1 for waiting
2 for walking
3 for driving
nextStageIndex 0 retrieves value for the current stage.
nextStageIndex must be lower then value of getRemainingStages(personID) | Below is the the instruction that describes the task:
### Input:
getStage(string, int) -> int
Returns the type of the nth next stage
0 for not-yet-departed
1 for waiting
2 for walking
3 for driving
nextStageIndex 0 retrieves value for the current stage.
nextStageIndex must be lower then value of getRemainingStages(personID)
### Response:
def getStage(self, personID, nextStageIndex=0):
"""getStage(string, int) -> int
Returns the type of the nth next stage
0 for not-yet-departed
1 for waiting
2 for walking
3 for driving
nextStageIndex 0 retrieves value for the current stage.
nextStageIndex must be lower then value of getRemainingStages(personID)
"""
self._connection._beginMessage(
tc.CMD_GET_PERSON_VARIABLE, tc.VAR_STAGE, personID, 1 + 4)
self._connection._string += struct.pack("!Bi",
tc.TYPE_INTEGER, nextStageIndex)
return self._connection._checkResult(tc.CMD_GET_PERSON_VARIABLE,
tc.VAR_STAGE, personID).readInt() |
def linreg_mle(y, X, algorithm='Nelder-Mead', debug=False):
"""MLE for Linear Regression Model
Parameters:
-----------
y : ndarray
target variable with N observations
X : ndarray
The <N x C> design matrix with C independent
variables, features, factors, etc.
algorithm : str
Optional. Default 'Nelder-Mead' (Simplex).
The algorithm used in scipy.optimize.minimize
debug : bool
Optional.
Returns:
--------
beta : ndarray
Estimated regression coefficients.
results : scipy.optimize.optimize.OptimizeResult
Optional. If debug=True then only scipy's
optimization result variable is returned.
"""
import numpy as np
import scipy.stats as sstat
import scipy.optimize as sopt
def objective_nll_linreg(theta, y, X):
yhat = np.dot(X, theta[:-1]) # =X*beta
return -1.0 * sstat.norm.logpdf(y, loc=yhat, scale=theta[-1]).sum()
# check eligible algorithm
if algorithm not in ('Nelder-Mead', 'CG', 'BFGS'):
raise Exception('Optimization Algorithm not supported.')
# set start values
theta0 = np.ones((X.shape[1] + 1, ))
# run solver
results = sopt.minimize(
objective_nll_linreg,
theta0,
args=(y, X),
method=algorithm,
options={'disp': False})
# debug?
if debug:
return results
# done
return results.x[:-1] | MLE for Linear Regression Model
Parameters:
-----------
y : ndarray
target variable with N observations
X : ndarray
The <N x C> design matrix with C independent
variables, features, factors, etc.
algorithm : str
Optional. Default 'Nelder-Mead' (Simplex).
The algorithm used in scipy.optimize.minimize
debug : bool
Optional.
Returns:
--------
beta : ndarray
Estimated regression coefficients.
results : scipy.optimize.optimize.OptimizeResult
Optional. If debug=True then only scipy's
optimization result variable is returned. | Below is the the instruction that describes the task:
### Input:
MLE for Linear Regression Model
Parameters:
-----------
y : ndarray
target variable with N observations
X : ndarray
The <N x C> design matrix with C independent
variables, features, factors, etc.
algorithm : str
Optional. Default 'Nelder-Mead' (Simplex).
The algorithm used in scipy.optimize.minimize
debug : bool
Optional.
Returns:
--------
beta : ndarray
Estimated regression coefficients.
results : scipy.optimize.optimize.OptimizeResult
Optional. If debug=True then only scipy's
optimization result variable is returned.
### Response:
def linreg_mle(y, X, algorithm='Nelder-Mead', debug=False):
"""MLE for Linear Regression Model
Parameters:
-----------
y : ndarray
target variable with N observations
X : ndarray
The <N x C> design matrix with C independent
variables, features, factors, etc.
algorithm : str
Optional. Default 'Nelder-Mead' (Simplex).
The algorithm used in scipy.optimize.minimize
debug : bool
Optional.
Returns:
--------
beta : ndarray
Estimated regression coefficients.
results : scipy.optimize.optimize.OptimizeResult
Optional. If debug=True then only scipy's
optimization result variable is returned.
"""
import numpy as np
import scipy.stats as sstat
import scipy.optimize as sopt
def objective_nll_linreg(theta, y, X):
yhat = np.dot(X, theta[:-1]) # =X*beta
return -1.0 * sstat.norm.logpdf(y, loc=yhat, scale=theta[-1]).sum()
# check eligible algorithm
if algorithm not in ('Nelder-Mead', 'CG', 'BFGS'):
raise Exception('Optimization Algorithm not supported.')
# set start values
theta0 = np.ones((X.shape[1] + 1, ))
# run solver
results = sopt.minimize(
objective_nll_linreg,
theta0,
args=(y, X),
method=algorithm,
options={'disp': False})
# debug?
if debug:
return results
# done
return results.x[:-1] |
def _split_text(text, width, height, unicode_aware=True):
"""
Split text to required dimensions.
This will first try to split the text into multiple lines, then put a "..." on the last
3 characters of the last line if this still doesn't fit.
:param text: The text to split.
:param width: The maximum width for any line.
:param height: The maximum height for the resulting text.
:return: A list of strings of the broken up text.
"""
# At a high level, just try to split on whitespace for the best results.
tokens = text.split(" ")
result = []
current_line = ""
string_len = wcswidth if unicode_aware else len
for token in tokens:
for i, line_token in enumerate(token.split("\n")):
if string_len(current_line + line_token) > width or i > 0:
# Don't bother inserting completely blank lines - which should only happen on the very first
# line (as the rest will inject whitespace/newlines)
if len(current_line) > 0:
result.append(current_line.rstrip())
current_line = line_token + " "
else:
current_line += line_token + " "
# At this point we've either split nicely or have a hugely long unbroken string (e.g. because the
# language doesn't use whitespace. Either way, break this last line up as best we can.
current_line = current_line.rstrip()
while string_len(current_line) > 0:
new_line = _enforce_width(current_line, width, unicode_aware)
result.append(new_line)
current_line = current_line[len(new_line):]
# Check for a height overrun and truncate.
if len(result) > height:
result = result[:height]
result[height - 1] = result[height - 1][:width - 3] + "..."
# Very small columns could be shorter than individual words - truncate
# each line if necessary.
for i, line in enumerate(result):
if len(line) > width:
result[i] = line[:width - 3] + "..."
return result | Split text to required dimensions.
This will first try to split the text into multiple lines, then put a "..." on the last
3 characters of the last line if this still doesn't fit.
:param text: The text to split.
:param width: The maximum width for any line.
:param height: The maximum height for the resulting text.
:return: A list of strings of the broken up text. | Below is the the instruction that describes the task:
### Input:
Split text to required dimensions.
This will first try to split the text into multiple lines, then put a "..." on the last
3 characters of the last line if this still doesn't fit.
:param text: The text to split.
:param width: The maximum width for any line.
:param height: The maximum height for the resulting text.
:return: A list of strings of the broken up text.
### Response:
def _split_text(text, width, height, unicode_aware=True):
"""
Split text to required dimensions.
This will first try to split the text into multiple lines, then put a "..." on the last
3 characters of the last line if this still doesn't fit.
:param text: The text to split.
:param width: The maximum width for any line.
:param height: The maximum height for the resulting text.
:return: A list of strings of the broken up text.
"""
# At a high level, just try to split on whitespace for the best results.
tokens = text.split(" ")
result = []
current_line = ""
string_len = wcswidth if unicode_aware else len
for token in tokens:
for i, line_token in enumerate(token.split("\n")):
if string_len(current_line + line_token) > width or i > 0:
# Don't bother inserting completely blank lines - which should only happen on the very first
# line (as the rest will inject whitespace/newlines)
if len(current_line) > 0:
result.append(current_line.rstrip())
current_line = line_token + " "
else:
current_line += line_token + " "
# At this point we've either split nicely or have a hugely long unbroken string (e.g. because the
# language doesn't use whitespace. Either way, break this last line up as best we can.
current_line = current_line.rstrip()
while string_len(current_line) > 0:
new_line = _enforce_width(current_line, width, unicode_aware)
result.append(new_line)
current_line = current_line[len(new_line):]
# Check for a height overrun and truncate.
if len(result) > height:
result = result[:height]
result[height - 1] = result[height - 1][:width - 3] + "..."
# Very small columns could be shorter than individual words - truncate
# each line if necessary.
for i, line in enumerate(result):
if len(line) > width:
result[i] = line[:width - 3] + "..."
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.