code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _find_image_ext(path, number=None):
"""Find an image, tolerant of different file extensions."""
if number is not None:
path = path.format(number)
path = os.path.splitext(path)[0]
for ext in _KNOWN_IMG_EXTS:
this_path = '%s.%s' % (path, ext)
if os.path.isfile(this_path):
break
else:
ext = 'png'
return ('%s.%s' % (path, ext), ext) | Find an image, tolerant of different file extensions. | Below is the the instruction that describes the task:
### Input:
Find an image, tolerant of different file extensions.
### Response:
def _find_image_ext(path, number=None):
"""Find an image, tolerant of different file extensions."""
if number is not None:
path = path.format(number)
path = os.path.splitext(path)[0]
for ext in _KNOWN_IMG_EXTS:
this_path = '%s.%s' % (path, ext)
if os.path.isfile(this_path):
break
else:
ext = 'png'
return ('%s.%s' % (path, ext), ext) |
def generate(topic, add_punctuation, character_count=None):
""" Generate the text for a given topic """
corpus_cursor = db.markovify.find({'topic': topic})
if(corpus_cursor):
corpus = ''
for text in corpus_cursor:
corpus = punctuate(corpus, text['text'], add_punctuation)
text_model = markovify.Text(corpus)
sentence = text_model.make_short_sentence(character_count) \
if character_count else text_model.make_sentence()
if not sentence:
raise Exception('There is not enough in the corpus to generate a sentence.')
return sentence
raise Exception('No text found for topic: ' + topic) | Generate the text for a given topic | Below is the the instruction that describes the task:
### Input:
Generate the text for a given topic
### Response:
def generate(topic, add_punctuation, character_count=None):
""" Generate the text for a given topic """
corpus_cursor = db.markovify.find({'topic': topic})
if(corpus_cursor):
corpus = ''
for text in corpus_cursor:
corpus = punctuate(corpus, text['text'], add_punctuation)
text_model = markovify.Text(corpus)
sentence = text_model.make_short_sentence(character_count) \
if character_count else text_model.make_sentence()
if not sentence:
raise Exception('There is not enough in the corpus to generate a sentence.')
return sentence
raise Exception('No text found for topic: ' + topic) |
def combine_xml_points(l, units, handle_units):
"""Combine multiple Point tags into an array."""
ret = {}
for item in l:
for key, value in item.items():
ret.setdefault(key, []).append(value)
for key, value in ret.items():
if key != 'date':
ret[key] = handle_units(value, units.get(key, None))
return ret | Combine multiple Point tags into an array. | Below is the the instruction that describes the task:
### Input:
Combine multiple Point tags into an array.
### Response:
def combine_xml_points(l, units, handle_units):
"""Combine multiple Point tags into an array."""
ret = {}
for item in l:
for key, value in item.items():
ret.setdefault(key, []).append(value)
for key, value in ret.items():
if key != 'date':
ret[key] = handle_units(value, units.get(key, None))
return ret |
def _validate_controls(self, proposal):
'''Validate controls list.
Makes sure only one instance of any given layer can exist in the
controls list.
'''
self._control_ids = [c.model_id for c in proposal.value]
if len(set(self._control_ids)) != len(self._control_ids):
raise ControlException('duplicate control detected, only use each control once')
return proposal.value | Validate controls list.
Makes sure only one instance of any given layer can exist in the
controls list. | Below is the the instruction that describes the task:
### Input:
Validate controls list.
Makes sure only one instance of any given layer can exist in the
controls list.
### Response:
def _validate_controls(self, proposal):
'''Validate controls list.
Makes sure only one instance of any given layer can exist in the
controls list.
'''
self._control_ids = [c.model_id for c in proposal.value]
if len(set(self._control_ids)) != len(self._control_ids):
raise ControlException('duplicate control detected, only use each control once')
return proposal.value |
def get_prime(bits):
"""Creates (probable) prime number of given size
:param bits: size of number to generate
:return: prime number of given size
"""
while True:
num = random.randrange(2 ** (bits - 1), 2 ** bits)
if Integer(str(num)).is_probably_prime():
return num | Creates (probable) prime number of given size
:param bits: size of number to generate
:return: prime number of given size | Below is the the instruction that describes the task:
### Input:
Creates (probable) prime number of given size
:param bits: size of number to generate
:return: prime number of given size
### Response:
def get_prime(bits):
"""Creates (probable) prime number of given size
:param bits: size of number to generate
:return: prime number of given size
"""
while True:
num = random.randrange(2 ** (bits - 1), 2 ** bits)
if Integer(str(num)).is_probably_prime():
return num |
def _set_redist_static(self, v, load=False):
"""
Setter method for redist_static, mapped from YANG variable /isis_state/router_isis_config/is_address_family_v6/redist_static (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_redist_static is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_redist_static() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=redist_static.redist_static, is_container='container', presence=False, yang_name="redist-static", rest_name="redist-static", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-redistribution-redist-static-1'}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """redist_static must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=redist_static.redist_static, is_container='container', presence=False, yang_name="redist-static", rest_name="redist-static", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-redistribution-redist-static-1'}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""",
})
self.__redist_static = t
if hasattr(self, '_set'):
self._set() | Setter method for redist_static, mapped from YANG variable /isis_state/router_isis_config/is_address_family_v6/redist_static (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_redist_static is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_redist_static() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for redist_static, mapped from YANG variable /isis_state/router_isis_config/is_address_family_v6/redist_static (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_redist_static is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_redist_static() directly.
### Response:
def _set_redist_static(self, v, load=False):
"""
Setter method for redist_static, mapped from YANG variable /isis_state/router_isis_config/is_address_family_v6/redist_static (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_redist_static is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_redist_static() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=redist_static.redist_static, is_container='container', presence=False, yang_name="redist-static", rest_name="redist-static", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-redistribution-redist-static-1'}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """redist_static must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=redist_static.redist_static, is_container='container', presence=False, yang_name="redist-static", rest_name="redist-static", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-redistribution-redist-static-1'}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""",
})
self.__redist_static = t
if hasattr(self, '_set'):
self._set() |
def _mass_from_knownmass_eta(known_mass, eta, known_is_secondary=False,
force_real=True):
r"""Returns the other component mass given one of the component masses
and the symmetric mass ratio.
This requires finding the roots of the quadratic equation:
.. math::
\eta m_2^2 + (2\eta - 1)m_1 m_2 + \eta m_1^2 = 0.
This has two solutions which correspond to :math:`m_1` being the heavier
mass or it being the lighter mass. By default, `known_mass` is assumed to
be the heavier (primary) mass, and the smaller solution is returned. Use
the `other_is_secondary` to invert.
Parameters
----------
known_mass : float
The known component mass.
eta : float
The symmetric mass ratio.
known_is_secondary : {False, bool}
Whether the known component mass is the primary or the secondary. If
True, `known_mass` is assumed to be the secondary (lighter) mass and
the larger solution is returned. Otherwise, the smaller solution is
returned. Default is False.
force_real : {True, bool}
Force the returned mass to be real.
Returns
-------
float
The other component mass.
"""
roots = numpy.roots([eta, (2*eta - 1)*known_mass, eta*known_mass**2.])
if force_real:
roots = numpy.real(roots)
if known_is_secondary:
return roots[roots.argmax()]
else:
return roots[roots.argmin()] | r"""Returns the other component mass given one of the component masses
and the symmetric mass ratio.
This requires finding the roots of the quadratic equation:
.. math::
\eta m_2^2 + (2\eta - 1)m_1 m_2 + \eta m_1^2 = 0.
This has two solutions which correspond to :math:`m_1` being the heavier
mass or it being the lighter mass. By default, `known_mass` is assumed to
be the heavier (primary) mass, and the smaller solution is returned. Use
the `other_is_secondary` to invert.
Parameters
----------
known_mass : float
The known component mass.
eta : float
The symmetric mass ratio.
known_is_secondary : {False, bool}
Whether the known component mass is the primary or the secondary. If
True, `known_mass` is assumed to be the secondary (lighter) mass and
the larger solution is returned. Otherwise, the smaller solution is
returned. Default is False.
force_real : {True, bool}
Force the returned mass to be real.
Returns
-------
float
The other component mass. | Below is the the instruction that describes the task:
### Input:
r"""Returns the other component mass given one of the component masses
and the symmetric mass ratio.
This requires finding the roots of the quadratic equation:
.. math::
\eta m_2^2 + (2\eta - 1)m_1 m_2 + \eta m_1^2 = 0.
This has two solutions which correspond to :math:`m_1` being the heavier
mass or it being the lighter mass. By default, `known_mass` is assumed to
be the heavier (primary) mass, and the smaller solution is returned. Use
the `other_is_secondary` to invert.
Parameters
----------
known_mass : float
The known component mass.
eta : float
The symmetric mass ratio.
known_is_secondary : {False, bool}
Whether the known component mass is the primary or the secondary. If
True, `known_mass` is assumed to be the secondary (lighter) mass and
the larger solution is returned. Otherwise, the smaller solution is
returned. Default is False.
force_real : {True, bool}
Force the returned mass to be real.
Returns
-------
float
The other component mass.
### Response:
def _mass_from_knownmass_eta(known_mass, eta, known_is_secondary=False,
force_real=True):
r"""Returns the other component mass given one of the component masses
and the symmetric mass ratio.
This requires finding the roots of the quadratic equation:
.. math::
\eta m_2^2 + (2\eta - 1)m_1 m_2 + \eta m_1^2 = 0.
This has two solutions which correspond to :math:`m_1` being the heavier
mass or it being the lighter mass. By default, `known_mass` is assumed to
be the heavier (primary) mass, and the smaller solution is returned. Use
the `other_is_secondary` to invert.
Parameters
----------
known_mass : float
The known component mass.
eta : float
The symmetric mass ratio.
known_is_secondary : {False, bool}
Whether the known component mass is the primary or the secondary. If
True, `known_mass` is assumed to be the secondary (lighter) mass and
the larger solution is returned. Otherwise, the smaller solution is
returned. Default is False.
force_real : {True, bool}
Force the returned mass to be real.
Returns
-------
float
The other component mass.
"""
roots = numpy.roots([eta, (2*eta - 1)*known_mass, eta*known_mass**2.])
if force_real:
roots = numpy.real(roots)
if known_is_secondary:
return roots[roots.argmax()]
else:
return roots[roots.argmin()] |
def describe_cache_clusters(name=None, conn=None, region=None, key=None,
keyid=None, profile=None, **args):
'''
Return details about all (or just one) Elasticache cache clusters.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.describe_cache_clusters
salt myminion boto3_elasticache.describe_cache_clusters myelasticache
'''
return _describe_resource(name=name, name_param='CacheClusterId', res_type='cache_cluster',
info_node='CacheClusters', conn=conn, region=region, key=key,
keyid=keyid, profile=profile, **args) | Return details about all (or just one) Elasticache cache clusters.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.describe_cache_clusters
salt myminion boto3_elasticache.describe_cache_clusters myelasticache | Below is the the instruction that describes the task:
### Input:
Return details about all (or just one) Elasticache cache clusters.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.describe_cache_clusters
salt myminion boto3_elasticache.describe_cache_clusters myelasticache
### Response:
def describe_cache_clusters(name=None, conn=None, region=None, key=None,
keyid=None, profile=None, **args):
'''
Return details about all (or just one) Elasticache cache clusters.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.describe_cache_clusters
salt myminion boto3_elasticache.describe_cache_clusters myelasticache
'''
return _describe_resource(name=name, name_param='CacheClusterId', res_type='cache_cluster',
info_node='CacheClusters', conn=conn, region=region, key=key,
keyid=keyid, profile=profile, **args) |
def write(self)->None:
"Writes model gradient statistics to Tensorboard."
if len(self.gradients) == 0: return
norms = [x.data.norm() for x in self.gradients]
self._write_avg_norm(norms=norms)
self._write_median_norm(norms=norms)
self._write_max_norm(norms=norms)
self._write_min_norm(norms=norms)
self._write_num_zeros()
self._write_avg_gradient()
self._write_median_gradient()
self._write_max_gradient()
self._write_min_gradient() | Writes model gradient statistics to Tensorboard. | Below is the the instruction that describes the task:
### Input:
Writes model gradient statistics to Tensorboard.
### Response:
def write(self)->None:
"Writes model gradient statistics to Tensorboard."
if len(self.gradients) == 0: return
norms = [x.data.norm() for x in self.gradients]
self._write_avg_norm(norms=norms)
self._write_median_norm(norms=norms)
self._write_max_norm(norms=norms)
self._write_min_norm(norms=norms)
self._write_num_zeros()
self._write_avg_gradient()
self._write_median_gradient()
self._write_max_gradient()
self._write_min_gradient() |
def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
name = obj.getFullname()
url = obj.absolute_url()
item["replace"]["getFullname"] = get_link(url, value=name)
return item | Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item | Below is the the instruction that describes the task:
### Input:
Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
### Response:
def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
name = obj.getFullname()
url = obj.absolute_url()
item["replace"]["getFullname"] = get_link(url, value=name)
return item |
def sg_layer_func(func):
r"""Decorates a function `func` as a sg_layer function.
Args:
func: function to decorate
"""
@wraps(func)
def wrapper(tensor, **kwargs):
r"""Manages arguments of `tf.sg_opt`.
Args:
tensor: A `tensor` (automatically passed by decorator).
kwargs:
shape: A list of integers. The shape of `tensor`. Inferred if not specified.
in_dim: An integer. The size of input dimension, which is set to the last one by default.
dim: An integer. The size of output dimension. Has the same value as in_dim by default.
bn: Boolean. If True, batch normalization is applied.
ln: Boolean. If True, layer normalization is applied.
scale: If true, multiple by a trainable gamma variable. When the activation is
linear (relu included), this can be disabled because it can be implicitly
learned by the next layer. The default is True.
dout: A float of range [0, 100). A dropout rate. Set to 0 by default.
bias: Boolean. If True, biases are added. As a default, it is set to True
name: A name for the layer. As a default, the function name is assigned.
act: A name of activation function. e.g., `sigmoid`, `tanh`, etc.
reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope
as well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
regularizer: A string. None, 'l1' or 'l2'. The default is None
summary: If True, summaries are added. The default is True.
"""
from . import sg_initializer as init
from . import sg_activation
# kwargs parsing
opt = tf.sg_opt(kwargs) + sg_get_context()
# set default argument
try:
shape = tensor.get_shape().as_list()
# batch normalization off, layer normalization off, dropout off
opt += tf.sg_opt(shape=shape, in_dim=shape[-1], dim=shape[-1],
bn=False, ln=False, dout=0, summary=True, scale=True)
if opt.regularizer == 'l1':
opt.regularizer = lambda x: tf.reduce_mean(tf.abs(x))
elif opt.regularizer == 'l2':
opt.regularizer = lambda x: tf.square(tf.reduce_mean(tf.square(x)))
else:
opt.regularizer = None
assert not (opt.bn and opt.ln), 'one of batch normalization and layer normalization is available.'
# disable bias when normalization on
opt += tf.sg_opt(bias=not (opt.bn or opt.ln))
finally:
pass
# automatic layer naming
if opt.name is None:
# layer function name will be used as layer name
opt.name = func.__name__.replace('sg_', '')
# find existing layer names
exist_layers = []
for t in tf.global_variables():
scope_name = tf.get_variable_scope().name
prefix = scope_name + '/' if len(scope_name) > 0 else ''
i = t.name.rfind(prefix + opt.name)
if i >= 0:
exist_layers.append(t.name[i:].split('/')[-2])
exist_layers = list(set(exist_layers))
# layer name numbering
if len(exist_layers) == 0:
opt.name += '_1'
else:
opt.name += '_%d' % (max([int(n.split('_')[-1]) for n in exist_layers]) + 1)
with tf.variable_scope(opt.name, reuse=opt.reuse) as scope:
# call layer function
out = func(tensor, opt)
out_shape = out.get_shape()
# apply batch normalization
if opt.bn:
beta = init.constant('beta', opt.dim, summary=opt.summary)
gamma = init.constant('gamma', opt.dim, value=1, summary=opt.summary, trainable=opt.scale)
# offset, scale parameter ( for inference )
mean_running = init.constant('mean', opt.dim, trainable=False, summary=opt.summary)
variance_running = init.constant('variance', opt.dim, value=1, trainable=False, summary=opt.summary)
# use fused batch norm if ndims in [2, 3, 4]
if out_shape.ndims in [2, 3, 4]:
# add HW dims if necessary, fused_batch_norm requires shape to be NHWC
if out_shape.ndims == 2:
out = tf.expand_dims(out, axis=1)
out = tf.expand_dims(out, axis=2)
elif out_shape.ndims == 3:
out = tf.expand_dims(out, axis=2)
fused_eps = tf.sg_eps if tf.sg_eps > 1e-5 else 1e-5
out, mean, variance = tf.cond(
_phase,
lambda: tf.nn.fused_batch_norm(out, gamma, beta, epsilon=fused_eps),
lambda: tf.nn.fused_batch_norm(out, gamma, beta, mean=mean_running, variance=variance_running, epsilon=fused_eps, is_training=False),
)
# restore original shape if HW dims was added
if out_shape.ndims == 2:
out = tf.squeeze(out, axis=[1, 2])
elif out_shape.ndims == 3:
out = tf.squeeze(out, axis=2)
# fallback to naive batch norm
else:
mean, variance = tf.nn.moments(out, axes=list(range(len(out.get_shape()) - 1)))
out = tf.cond(
_phase,
lambda: tf.nn.batch_normalization(out, mean, variance, beta, gamma, tf.sg_eps),
lambda: tf.nn.batch_normalization(out, mean_running, variance_running, beta, gamma, tf.sg_eps)
)
decay = 0.99
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_running.assign(mean_running * decay + mean * (1 - decay)))
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_running.assign(variance_running * decay + variance * (1 - decay)))
# apply layer normalization
if opt.ln:
# offset, scale parameter
beta = init.constant('beta', opt.dim, summary=opt.summary)
if opt.scale:
gamma = init.constant('gamma', opt.dim, value=1, summary=opt.summary)
# calc layer mean, variance for final axis
mean, variance = tf.nn.moments(out, axes=[len(out.get_shape()) - 1], keep_dims=True)
# apply normalization
out = (out - mean) / tf.sqrt(variance + tf.sg_eps)
# apply parameter
if opt.scale:
out = gamma * out + beta
else:
out = out + beta
# apply activation
if opt.act:
out = getattr(sg_activation, 'sg_' + opt.act.lower())(out)
# apply dropout
if opt.dout:
out = tf.cond(_phase,
lambda: tf.nn.dropout(out, 1 - opt.dout),
lambda: out)
# rename tensor
out = tf.identity(out, 'out')
# add final output summary
if opt.summary:
tf.sg_summary_activation(out)
# save node info for reuse
out._sugar = tf.sg_opt(func=func, arg=tf.sg_opt(kwargs) + sg_get_context(),
prev=tensor, is_layer=True, name=opt.name)
# inject reuse function
out.sg_reuse = types.MethodType(sg_reuse, out)
return out
return wrapper | r"""Decorates a function `func` as a sg_layer function.
Args:
func: function to decorate | Below is the the instruction that describes the task:
### Input:
r"""Decorates a function `func` as a sg_layer function.
Args:
func: function to decorate
### Response:
def sg_layer_func(func):
r"""Decorates a function `func` as a sg_layer function.
Args:
func: function to decorate
"""
@wraps(func)
def wrapper(tensor, **kwargs):
r"""Manages arguments of `tf.sg_opt`.
Args:
tensor: A `tensor` (automatically passed by decorator).
kwargs:
shape: A list of integers. The shape of `tensor`. Inferred if not specified.
in_dim: An integer. The size of input dimension, which is set to the last one by default.
dim: An integer. The size of output dimension. Has the same value as in_dim by default.
bn: Boolean. If True, batch normalization is applied.
ln: Boolean. If True, layer normalization is applied.
scale: If true, multiple by a trainable gamma variable. When the activation is
linear (relu included), this can be disabled because it can be implicitly
learned by the next layer. The default is True.
dout: A float of range [0, 100). A dropout rate. Set to 0 by default.
bias: Boolean. If True, biases are added. As a default, it is set to True
name: A name for the layer. As a default, the function name is assigned.
act: A name of activation function. e.g., `sigmoid`, `tanh`, etc.
reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope
as well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
regularizer: A string. None, 'l1' or 'l2'. The default is None
summary: If True, summaries are added. The default is True.
"""
from . import sg_initializer as init
from . import sg_activation
# kwargs parsing
opt = tf.sg_opt(kwargs) + sg_get_context()
# set default argument
try:
shape = tensor.get_shape().as_list()
# batch normalization off, layer normalization off, dropout off
opt += tf.sg_opt(shape=shape, in_dim=shape[-1], dim=shape[-1],
bn=False, ln=False, dout=0, summary=True, scale=True)
if opt.regularizer == 'l1':
opt.regularizer = lambda x: tf.reduce_mean(tf.abs(x))
elif opt.regularizer == 'l2':
opt.regularizer = lambda x: tf.square(tf.reduce_mean(tf.square(x)))
else:
opt.regularizer = None
assert not (opt.bn and opt.ln), 'one of batch normalization and layer normalization is available.'
# disable bias when normalization on
opt += tf.sg_opt(bias=not (opt.bn or opt.ln))
finally:
pass
# automatic layer naming
if opt.name is None:
# layer function name will be used as layer name
opt.name = func.__name__.replace('sg_', '')
# find existing layer names
exist_layers = []
for t in tf.global_variables():
scope_name = tf.get_variable_scope().name
prefix = scope_name + '/' if len(scope_name) > 0 else ''
i = t.name.rfind(prefix + opt.name)
if i >= 0:
exist_layers.append(t.name[i:].split('/')[-2])
exist_layers = list(set(exist_layers))
# layer name numbering
if len(exist_layers) == 0:
opt.name += '_1'
else:
opt.name += '_%d' % (max([int(n.split('_')[-1]) for n in exist_layers]) + 1)
with tf.variable_scope(opt.name, reuse=opt.reuse) as scope:
# call layer function
out = func(tensor, opt)
out_shape = out.get_shape()
# apply batch normalization
if opt.bn:
beta = init.constant('beta', opt.dim, summary=opt.summary)
gamma = init.constant('gamma', opt.dim, value=1, summary=opt.summary, trainable=opt.scale)
# offset, scale parameter ( for inference )
mean_running = init.constant('mean', opt.dim, trainable=False, summary=opt.summary)
variance_running = init.constant('variance', opt.dim, value=1, trainable=False, summary=opt.summary)
# use fused batch norm if ndims in [2, 3, 4]
if out_shape.ndims in [2, 3, 4]:
# add HW dims if necessary, fused_batch_norm requires shape to be NHWC
if out_shape.ndims == 2:
out = tf.expand_dims(out, axis=1)
out = tf.expand_dims(out, axis=2)
elif out_shape.ndims == 3:
out = tf.expand_dims(out, axis=2)
fused_eps = tf.sg_eps if tf.sg_eps > 1e-5 else 1e-5
out, mean, variance = tf.cond(
_phase,
lambda: tf.nn.fused_batch_norm(out, gamma, beta, epsilon=fused_eps),
lambda: tf.nn.fused_batch_norm(out, gamma, beta, mean=mean_running, variance=variance_running, epsilon=fused_eps, is_training=False),
)
# restore original shape if HW dims was added
if out_shape.ndims == 2:
out = tf.squeeze(out, axis=[1, 2])
elif out_shape.ndims == 3:
out = tf.squeeze(out, axis=2)
# fallback to naive batch norm
else:
mean, variance = tf.nn.moments(out, axes=list(range(len(out.get_shape()) - 1)))
out = tf.cond(
_phase,
lambda: tf.nn.batch_normalization(out, mean, variance, beta, gamma, tf.sg_eps),
lambda: tf.nn.batch_normalization(out, mean_running, variance_running, beta, gamma, tf.sg_eps)
)
decay = 0.99
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_running.assign(mean_running * decay + mean * (1 - decay)))
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_running.assign(variance_running * decay + variance * (1 - decay)))
# apply layer normalization
if opt.ln:
# offset, scale parameter
beta = init.constant('beta', opt.dim, summary=opt.summary)
if opt.scale:
gamma = init.constant('gamma', opt.dim, value=1, summary=opt.summary)
# calc layer mean, variance for final axis
mean, variance = tf.nn.moments(out, axes=[len(out.get_shape()) - 1], keep_dims=True)
# apply normalization
out = (out - mean) / tf.sqrt(variance + tf.sg_eps)
# apply parameter
if opt.scale:
out = gamma * out + beta
else:
out = out + beta
# apply activation
if opt.act:
out = getattr(sg_activation, 'sg_' + opt.act.lower())(out)
# apply dropout
if opt.dout:
out = tf.cond(_phase,
lambda: tf.nn.dropout(out, 1 - opt.dout),
lambda: out)
# rename tensor
out = tf.identity(out, 'out')
# add final output summary
if opt.summary:
tf.sg_summary_activation(out)
# save node info for reuse
out._sugar = tf.sg_opt(func=func, arg=tf.sg_opt(kwargs) + sg_get_context(),
prev=tensor, is_layer=True, name=opt.name)
# inject reuse function
out.sg_reuse = types.MethodType(sg_reuse, out)
return out
return wrapper |
def _make_tasks_unique(tasks):
"""If some tasks of the workflow are the same they are deep copied."""
unique_tasks = []
prev_tasks = set()
for task in tasks:
if task in prev_tasks:
task = copy.deepcopy(task)
unique_tasks.append(task)
return unique_tasks | If some tasks of the workflow are the same they are deep copied. | Below is the the instruction that describes the task:
### Input:
If some tasks of the workflow are the same they are deep copied.
### Response:
def _make_tasks_unique(tasks):
"""If some tasks of the workflow are the same they are deep copied."""
unique_tasks = []
prev_tasks = set()
for task in tasks:
if task in prev_tasks:
task = copy.deepcopy(task)
unique_tasks.append(task)
return unique_tasks |
def evaluations_scipy(ty, pv):
"""
evaluations_scipy(ty, pv) -> (ACC, MSE, SCC)
ty, pv: ndarray
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
"""
if not (scipy != None and isinstance(ty, scipy.ndarray) and isinstance(pv, scipy.ndarray)):
raise TypeError("type of ty and pv must be ndarray")
if len(ty) != len(pv):
raise ValueError("len(ty) must be equal to len(pv)")
ACC = 100.0*(ty == pv).mean()
MSE = ((ty - pv)**2).mean()
l = len(ty)
sumv = pv.sum()
sumy = ty.sum()
sumvy = (pv*ty).sum()
sumvv = (pv*pv).sum()
sumyy = (ty*ty).sum()
with scipy.errstate(all = 'raise'):
try:
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
except:
SCC = float('nan')
return (float(ACC), float(MSE), float(SCC)) | evaluations_scipy(ty, pv) -> (ACC, MSE, SCC)
ty, pv: ndarray
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv). | Below is the the instruction that describes the task:
### Input:
evaluations_scipy(ty, pv) -> (ACC, MSE, SCC)
ty, pv: ndarray
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
### Response:
def evaluations_scipy(ty, pv):
"""
evaluations_scipy(ty, pv) -> (ACC, MSE, SCC)
ty, pv: ndarray
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
"""
if not (scipy != None and isinstance(ty, scipy.ndarray) and isinstance(pv, scipy.ndarray)):
raise TypeError("type of ty and pv must be ndarray")
if len(ty) != len(pv):
raise ValueError("len(ty) must be equal to len(pv)")
ACC = 100.0*(ty == pv).mean()
MSE = ((ty - pv)**2).mean()
l = len(ty)
sumv = pv.sum()
sumy = ty.sum()
sumvy = (pv*ty).sum()
sumvv = (pv*pv).sum()
sumyy = (ty*ty).sum()
with scipy.errstate(all = 'raise'):
try:
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
except:
SCC = float('nan')
return (float(ACC), float(MSE), float(SCC)) |
def getseed(myseed, i):
"""
Return a single seed from a long seed given by `genseeds`.
Parameters
----------
myseed : bytes
A long seed given by `genseeds(n)`.
i : int
An index less than n.
Returns
-------
rndseed : int
A seed (less than (2 ** 31))
"""
rndseed = int(myseed[(i - 1) * 8: i * 8], 16)
rndseed = rndseed % (2 ** 31) #XXX: trancate the first bit
return rndseed | Return a single seed from a long seed given by `genseeds`.
Parameters
----------
myseed : bytes
A long seed given by `genseeds(n)`.
i : int
An index less than n.
Returns
-------
rndseed : int
A seed (less than (2 ** 31)) | Below is the the instruction that describes the task:
### Input:
Return a single seed from a long seed given by `genseeds`.
Parameters
----------
myseed : bytes
A long seed given by `genseeds(n)`.
i : int
An index less than n.
Returns
-------
rndseed : int
A seed (less than (2 ** 31))
### Response:
def getseed(myseed, i):
"""
Return a single seed from a long seed given by `genseeds`.
Parameters
----------
myseed : bytes
A long seed given by `genseeds(n)`.
i : int
An index less than n.
Returns
-------
rndseed : int
A seed (less than (2 ** 31))
"""
rndseed = int(myseed[(i - 1) * 8: i * 8], 16)
rndseed = rndseed % (2 ** 31) #XXX: trancate the first bit
return rndseed |
def read(self, tid, length, offset, fh):
"""
Read from a file. Data is obtained from ``YTStor`` object (which is kept under `fh` descriptor) using its
``read`` method.
Parameters
----------
tid : str
Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator.
length : int
Length of data to read.
offset : int
Posision from which reading will start.
fh : int
File descriptor.
Returns
-------
bytes
Movie data.
"""
try:
return self.fds[fh].read(offset, length, fh)
except AttributeError: # control file
if tid[1] not in (" next", " prev"):
raise FuseOSError(errno.EINVAL)
return self.__sh_script[offset:offset+length]
except KeyError: # descriptor does not exist.
raise FuseOSError(errno.EBADF)
except ConnectionError:
raise FuseOSError(errno.ESTALE) | Read from a file. Data is obtained from ``YTStor`` object (which is kept under `fh` descriptor) using its
``read`` method.
Parameters
----------
tid : str
Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator.
length : int
Length of data to read.
offset : int
Posision from which reading will start.
fh : int
File descriptor.
Returns
-------
bytes
Movie data. | Below is the the instruction that describes the task:
### Input:
Read from a file. Data is obtained from ``YTStor`` object (which is kept under `fh` descriptor) using its
``read`` method.
Parameters
----------
tid : str
Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator.
length : int
Length of data to read.
offset : int
Posision from which reading will start.
fh : int
File descriptor.
Returns
-------
bytes
Movie data.
### Response:
def read(self, tid, length, offset, fh):
"""
Read from a file. Data is obtained from ``YTStor`` object (which is kept under `fh` descriptor) using its
``read`` method.
Parameters
----------
tid : str
Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator.
length : int
Length of data to read.
offset : int
Posision from which reading will start.
fh : int
File descriptor.
Returns
-------
bytes
Movie data.
"""
try:
return self.fds[fh].read(offset, length, fh)
except AttributeError: # control file
if tid[1] not in (" next", " prev"):
raise FuseOSError(errno.EINVAL)
return self.__sh_script[offset:offset+length]
except KeyError: # descriptor does not exist.
raise FuseOSError(errno.EBADF)
except ConnectionError:
raise FuseOSError(errno.ESTALE) |
def rename_property(self, old, new):
"""Replace the name of a property by a new one."""
self._properties.replace(old, new)
pairs = self._pairs
pairs |= {(o, new) for o in self._objects
if (o, old) in pairs and not pairs.remove((o, old))} | Replace the name of a property by a new one. | Below is the the instruction that describes the task:
### Input:
Replace the name of a property by a new one.
### Response:
def rename_property(self, old, new):
"""Replace the name of a property by a new one."""
self._properties.replace(old, new)
pairs = self._pairs
pairs |= {(o, new) for o in self._objects
if (o, old) in pairs and not pairs.remove((o, old))} |
def instance(
dt, tz=UTC # type: _datetime.datetime # type: Union[str, _Timezone, None]
): # type: (...) -> DateTime
"""
Create a DateTime instance from a datetime one.
"""
if not isinstance(dt, _datetime.datetime):
raise ValueError("instance() only accepts datetime objects.")
if isinstance(dt, DateTime):
return dt
tz = dt.tzinfo or tz
# Checking for pytz/tzinfo
if isinstance(tz, _datetime.tzinfo) and not isinstance(tz, _Timezone):
# pytz
if hasattr(tz, "localize") and tz.zone:
tz = tz.zone
else:
# We have no sure way to figure out
# the timezone name, we fallback
# on a fixed offset
tz = tz.utcoffset(dt).total_seconds() / 3600
return datetime(
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, tz=tz
) | Create a DateTime instance from a datetime one. | Below is the the instruction that describes the task:
### Input:
Create a DateTime instance from a datetime one.
### Response:
def instance(
dt, tz=UTC # type: _datetime.datetime # type: Union[str, _Timezone, None]
): # type: (...) -> DateTime
"""
Create a DateTime instance from a datetime one.
"""
if not isinstance(dt, _datetime.datetime):
raise ValueError("instance() only accepts datetime objects.")
if isinstance(dt, DateTime):
return dt
tz = dt.tzinfo or tz
# Checking for pytz/tzinfo
if isinstance(tz, _datetime.tzinfo) and not isinstance(tz, _Timezone):
# pytz
if hasattr(tz, "localize") and tz.zone:
tz = tz.zone
else:
# We have no sure way to figure out
# the timezone name, we fallback
# on a fixed offset
tz = tz.utcoffset(dt).total_seconds() / 3600
return datetime(
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, tz=tz
) |
def delete_additional_charge(self, recurring_billing_id):
"""
Remove an extra charge from an invoice.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns:
"""
fmt = 'recurringBillItems/{}'.format(recurring_billing_id)
return self.client._delete(self.url + fmt, headers=self.get_headers()) | Remove an extra charge from an invoice.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns: | Below is the the instruction that describes the task:
### Input:
Remove an extra charge from an invoice.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns:
### Response:
def delete_additional_charge(self, recurring_billing_id):
"""
Remove an extra charge from an invoice.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns:
"""
fmt = 'recurringBillItems/{}'.format(recurring_billing_id)
return self.client._delete(self.url + fmt, headers=self.get_headers()) |
async def parse_tag_results(soup):
"""
Parse a page of tag or trait results. Same format.
:param soup: BS4 Class Object
:return: A list of tags, Nothing else really useful there
"""
soup = soup.find_all('td', class_='tc3')
tags = []
for item in soup:
tags.append(item.a.string)
return tags | Parse a page of tag or trait results. Same format.
:param soup: BS4 Class Object
:return: A list of tags, Nothing else really useful there | Below is the the instruction that describes the task:
### Input:
Parse a page of tag or trait results. Same format.
:param soup: BS4 Class Object
:return: A list of tags, Nothing else really useful there
### Response:
async def parse_tag_results(soup):
"""
Parse a page of tag or trait results. Same format.
:param soup: BS4 Class Object
:return: A list of tags, Nothing else really useful there
"""
soup = soup.find_all('td', class_='tc3')
tags = []
for item in soup:
tags.append(item.a.string)
return tags |
def pack(self, out: IO):
"""
Write the Field to the file-like object `out`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when saving a ClassFile.
:param out: Any file-like object providing `write()`
"""
out.write(self.access_flags.pack())
out.write(pack('>HH', self._name_index, self._descriptor_index))
self.attributes.pack(out) | Write the Field to the file-like object `out`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when saving a ClassFile.
:param out: Any file-like object providing `write()` | Below is the the instruction that describes the task:
### Input:
Write the Field to the file-like object `out`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when saving a ClassFile.
:param out: Any file-like object providing `write()`
### Response:
def pack(self, out: IO):
"""
Write the Field to the file-like object `out`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when saving a ClassFile.
:param out: Any file-like object providing `write()`
"""
out.write(self.access_flags.pack())
out.write(pack('>HH', self._name_index, self._descriptor_index))
self.attributes.pack(out) |
def dot(A, B):
"""Matrix multiplication between A and B
This function is equivalent to ``A @ B``, which is unfortunately
not possible under python 2.x.
Args:
A (sequence):
B (sequence):
Returns:
sequence:
"""
try:
result = A.__matmul__(B)
if result is NotImplemented:
result = B.__rmatmul__(A)
except AttributeError:
result = B.__rmatmul__(A)
return result | Matrix multiplication between A and B
This function is equivalent to ``A @ B``, which is unfortunately
not possible under python 2.x.
Args:
A (sequence):
B (sequence):
Returns:
sequence: | Below is the the instruction that describes the task:
### Input:
Matrix multiplication between A and B
This function is equivalent to ``A @ B``, which is unfortunately
not possible under python 2.x.
Args:
A (sequence):
B (sequence):
Returns:
sequence:
### Response:
def dot(A, B):
"""Matrix multiplication between A and B
This function is equivalent to ``A @ B``, which is unfortunately
not possible under python 2.x.
Args:
A (sequence):
B (sequence):
Returns:
sequence:
"""
try:
result = A.__matmul__(B)
if result is NotImplemented:
result = B.__rmatmul__(A)
except AttributeError:
result = B.__rmatmul__(A)
return result |
def _sendMsg(self, msg):
"""Send a line to graphite. Retry with exponential backoff."""
if not self.sock:
self.connect()
if not isinstance(msg, binary_type):
msg = msg.encode("UTF-8")
backoff = 0.001
while True:
try:
self.sock.sendall(msg)
break
except socket.error:
log.warning('Graphite connection error', exc_info = True)
self.disconnect()
time.sleep(random.uniform(0, 2.0*backoff))
backoff = min(backoff*2.0, 5.0)
self.connect() | Send a line to graphite. Retry with exponential backoff. | Below is the the instruction that describes the task:
### Input:
Send a line to graphite. Retry with exponential backoff.
### Response:
def _sendMsg(self, msg):
"""Send a line to graphite. Retry with exponential backoff."""
if not self.sock:
self.connect()
if not isinstance(msg, binary_type):
msg = msg.encode("UTF-8")
backoff = 0.001
while True:
try:
self.sock.sendall(msg)
break
except socket.error:
log.warning('Graphite connection error', exc_info = True)
self.disconnect()
time.sleep(random.uniform(0, 2.0*backoff))
backoff = min(backoff*2.0, 5.0)
self.connect() |
def cache(self, code, number=0):
"""Make a name for a block of code, and cache the code.
Parameters
----------
code : str
The Python source code to cache.
number : int
A number which forms part of the code's name. Used for the execution
counter.
Returns
-------
The name of the cached code (as a string). Pass this as the filename
argument to compilation, so that tracebacks are correctly hooked up.
"""
name = code_name(code, number)
entry = (len(code), time.time(),
[line+'\n' for line in code.splitlines()], name)
linecache.cache[name] = entry
linecache._ipython_cache[name] = entry
return name | Make a name for a block of code, and cache the code.
Parameters
----------
code : str
The Python source code to cache.
number : int
A number which forms part of the code's name. Used for the execution
counter.
Returns
-------
The name of the cached code (as a string). Pass this as the filename
argument to compilation, so that tracebacks are correctly hooked up. | Below is the the instruction that describes the task:
### Input:
Make a name for a block of code, and cache the code.
Parameters
----------
code : str
The Python source code to cache.
number : int
A number which forms part of the code's name. Used for the execution
counter.
Returns
-------
The name of the cached code (as a string). Pass this as the filename
argument to compilation, so that tracebacks are correctly hooked up.
### Response:
def cache(self, code, number=0):
"""Make a name for a block of code, and cache the code.
Parameters
----------
code : str
The Python source code to cache.
number : int
A number which forms part of the code's name. Used for the execution
counter.
Returns
-------
The name of the cached code (as a string). Pass this as the filename
argument to compilation, so that tracebacks are correctly hooked up.
"""
name = code_name(code, number)
entry = (len(code), time.time(),
[line+'\n' for line in code.splitlines()], name)
linecache.cache[name] = entry
linecache._ipython_cache[name] = entry
return name |
async def reconnect(self):
"""断线重连."""
self.clean()
try:
self.writer.close()
except:
pass
self.closed = True
await self.connect()
if self.debug:
print("reconnect to {}".format((self.hostname, self.port))) | 断线重连. | Below is the the instruction that describes the task:
### Input:
断线重连.
### Response:
async def reconnect(self):
"""断线重连."""
self.clean()
try:
self.writer.close()
except:
pass
self.closed = True
await self.connect()
if self.debug:
print("reconnect to {}".format((self.hostname, self.port))) |
def set_action(self,action):
"""Set the action of the item.
:Parameters:
- `action`: the new action or `None`.
:Types:
- `action`: `unicode`
"""
if action is None:
if self.xmlnode.hasProp("action"):
self.xmlnode.unsetProp("action")
return
if action not in ("remove","update"):
raise ValueError("Action must be 'update' or 'remove'")
action = unicode(action)
self.xmlnode.setProp("action", action.encode("utf-8")) | Set the action of the item.
:Parameters:
- `action`: the new action or `None`.
:Types:
- `action`: `unicode` | Below is the the instruction that describes the task:
### Input:
Set the action of the item.
:Parameters:
- `action`: the new action or `None`.
:Types:
- `action`: `unicode`
### Response:
def set_action(self,action):
"""Set the action of the item.
:Parameters:
- `action`: the new action or `None`.
:Types:
- `action`: `unicode`
"""
if action is None:
if self.xmlnode.hasProp("action"):
self.xmlnode.unsetProp("action")
return
if action not in ("remove","update"):
raise ValueError("Action must be 'update' or 'remove'")
action = unicode(action)
self.xmlnode.setProp("action", action.encode("utf-8")) |
def set_save_itrs_root(setting):
"""
Adjust the root timer save_itrs setting, such as for use in
multiprocessing, when a root timer may become a parallel subdivision (see
subdivide()).
Args:
setting (bool): Save individual iterations data, passed through bool()
Returns:
bool: Implemented setting value.
"""
setting = bool(setting)
f.root.times.save_itrs = setting
return setting | Adjust the root timer save_itrs setting, such as for use in
multiprocessing, when a root timer may become a parallel subdivision (see
subdivide()).
Args:
setting (bool): Save individual iterations data, passed through bool()
Returns:
bool: Implemented setting value. | Below is the the instruction that describes the task:
### Input:
Adjust the root timer save_itrs setting, such as for use in
multiprocessing, when a root timer may become a parallel subdivision (see
subdivide()).
Args:
setting (bool): Save individual iterations data, passed through bool()
Returns:
bool: Implemented setting value.
### Response:
def set_save_itrs_root(setting):
"""
Adjust the root timer save_itrs setting, such as for use in
multiprocessing, when a root timer may become a parallel subdivision (see
subdivide()).
Args:
setting (bool): Save individual iterations data, passed through bool()
Returns:
bool: Implemented setting value.
"""
setting = bool(setting)
f.root.times.save_itrs = setting
return setting |
def combine(self, other):
"""An instance of lunr.MatchData will be created for every term that
matches a document.
However only one instance is required in a lunr.Index~Result. This
method combines metadata from another instance of MatchData with this
object's metadata.
"""
for term in other.metadata.keys():
if term not in self.metadata:
self.metadata[term] = {}
fields = other.metadata[term].keys()
for field in fields:
if field not in self.metadata[term]:
self.metadata[term][field] = {}
keys = other.metadata[term][field].keys()
for key in keys:
if key not in self.metadata[term][field]:
self.metadata[term][field][key] = other.metadata[term][field][
key
]
else:
self.metadata[term][field][key].extend(
other.metadata[term][field][key]
) | An instance of lunr.MatchData will be created for every term that
matches a document.
However only one instance is required in a lunr.Index~Result. This
method combines metadata from another instance of MatchData with this
object's metadata. | Below is the the instruction that describes the task:
### Input:
An instance of lunr.MatchData will be created for every term that
matches a document.
However only one instance is required in a lunr.Index~Result. This
method combines metadata from another instance of MatchData with this
object's metadata.
### Response:
def combine(self, other):
"""An instance of lunr.MatchData will be created for every term that
matches a document.
However only one instance is required in a lunr.Index~Result. This
method combines metadata from another instance of MatchData with this
object's metadata.
"""
for term in other.metadata.keys():
if term not in self.metadata:
self.metadata[term] = {}
fields = other.metadata[term].keys()
for field in fields:
if field not in self.metadata[term]:
self.metadata[term][field] = {}
keys = other.metadata[term][field].keys()
for key in keys:
if key not in self.metadata[term][field]:
self.metadata[term][field][key] = other.metadata[term][field][
key
]
else:
self.metadata[term][field][key].extend(
other.metadata[term][field][key]
) |
def iter_links_by_attrib(self, element):
'''Iterate an element by looking at its attributes for links.'''
for attrib_name in element.attrib.keys():
attrib_value = element.attrib.get(attrib_name)
if attrib_name in self.LINK_ATTRIBUTES:
if self.javascript_scraper and \
attrib_value.lstrip().startswith('javascript:'):
for link in self.iter_links_by_js_attrib(
attrib_name, percent_decode(attrib_value)):
yield link
else:
yield attrib_name, attrib_value
elif self.javascript_scraper and \
attrib_name[:5] in self.DYNAMIC_ATTRIBUTES:
for link in self.iter_links_by_js_attrib(attrib_name,
attrib_value):
yield link
elif attrib_name.startswith('data-'):
if is_likely_link(attrib_value) \
and not is_unlikely_link(attrib_value):
yield attrib_name, attrib_value
elif attrib_name == 'srcset':
items = self.iter_links_by_srcset_attrib(
attrib_name, attrib_value)
for item in items:
yield item | Iterate an element by looking at its attributes for links. | Below is the the instruction that describes the task:
### Input:
Iterate an element by looking at its attributes for links.
### Response:
def iter_links_by_attrib(self, element):
'''Iterate an element by looking at its attributes for links.'''
for attrib_name in element.attrib.keys():
attrib_value = element.attrib.get(attrib_name)
if attrib_name in self.LINK_ATTRIBUTES:
if self.javascript_scraper and \
attrib_value.lstrip().startswith('javascript:'):
for link in self.iter_links_by_js_attrib(
attrib_name, percent_decode(attrib_value)):
yield link
else:
yield attrib_name, attrib_value
elif self.javascript_scraper and \
attrib_name[:5] in self.DYNAMIC_ATTRIBUTES:
for link in self.iter_links_by_js_attrib(attrib_name,
attrib_value):
yield link
elif attrib_name.startswith('data-'):
if is_likely_link(attrib_value) \
and not is_unlikely_link(attrib_value):
yield attrib_name, attrib_value
elif attrib_name == 'srcset':
items = self.iter_links_by_srcset_attrib(
attrib_name, attrib_value)
for item in items:
yield item |
def lognorm(x, mu, sigma=1.0):
""" Log-normal function from scipy """
return stats.lognorm(sigma, scale=mu).pdf(x) | Log-normal function from scipy | Below is the the instruction that describes the task:
### Input:
Log-normal function from scipy
### Response:
def lognorm(x, mu, sigma=1.0):
""" Log-normal function from scipy """
return stats.lognorm(sigma, scale=mu).pdf(x) |
def create_table_with_pk(self, table, fields, primary_keys):
""" Responsys.createTableWithPK call
Accepts:
InteractObject table
list fields
list primary_keys
Returns True on success
"""
table = table.get_soap_object(self.client)
return self.call('createTableWithPK', table, fields, primary_keys) | Responsys.createTableWithPK call
Accepts:
InteractObject table
list fields
list primary_keys
Returns True on success | Below is the the instruction that describes the task:
### Input:
Responsys.createTableWithPK call
Accepts:
InteractObject table
list fields
list primary_keys
Returns True on success
### Response:
def create_table_with_pk(self, table, fields, primary_keys):
""" Responsys.createTableWithPK call
Accepts:
InteractObject table
list fields
list primary_keys
Returns True on success
"""
table = table.get_soap_object(self.client)
return self.call('createTableWithPK', table, fields, primary_keys) |
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout() | Saves the image file to disk. | Below is the the instruction that describes the task:
### Input:
Saves the image file to disk.
### Response:
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout() |
def _get_oath2_access_token(client_key, client_secret):
'''
Query the vistara API and get an access_token
'''
if not client_key and not client_secret:
log.error(
"client_key and client_secret have not been specified "
"and are required parameters."
)
return False
method = 'POST'
url = 'https://api.vistara.io/auth/oauth/token'
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
}
params = {
'grant_type': 'client_credentials',
'client_id': client_key,
'client_secret': client_secret
}
resp = salt.utils.http.query(
url=url,
method=method,
header_dict=headers,
params=params,
opts=__opts__
)
respbody = resp.get('body', None)
if not respbody:
return False
access_token = salt.utils.json.loads(respbody)['access_token']
return access_token | Query the vistara API and get an access_token | Below is the the instruction that describes the task:
### Input:
Query the vistara API and get an access_token
### Response:
def _get_oath2_access_token(client_key, client_secret):
'''
Query the vistara API and get an access_token
'''
if not client_key and not client_secret:
log.error(
"client_key and client_secret have not been specified "
"and are required parameters."
)
return False
method = 'POST'
url = 'https://api.vistara.io/auth/oauth/token'
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
}
params = {
'grant_type': 'client_credentials',
'client_id': client_key,
'client_secret': client_secret
}
resp = salt.utils.http.query(
url=url,
method=method,
header_dict=headers,
params=params,
opts=__opts__
)
respbody = resp.get('body', None)
if not respbody:
return False
access_token = salt.utils.json.loads(respbody)['access_token']
return access_token |
def getValue(words):
"""Computes the sum of the values of the words."""
value = 0
for word in words:
for letter in word:
# shared.getConst will evaluate to the dictionary broadcasted by
# the root Future
value += shared.getConst('lettersValue')[letter]
return value | Computes the sum of the values of the words. | Below is the the instruction that describes the task:
### Input:
Computes the sum of the values of the words.
### Response:
def getValue(words):
"""Computes the sum of the values of the words."""
value = 0
for word in words:
for letter in word:
# shared.getConst will evaluate to the dictionary broadcasted by
# the root Future
value += shared.getConst('lettersValue')[letter]
return value |
def html_output(cls, cs, score_dict, output_filename, ds_loc, limit):
'''
Generates rendered HTML output for the compliance score(s)
@param cs Compliance Checker Suite
@param score_groups List of results
@param output_filename The file path to output to
@param ds_loc List of source datasets
@param limit The degree of strictness, 1 being the strictest, and going up from there.
'''
checkers_html = []
for ds, score_groups in six.iteritems(score_dict):
for checker, (groups, errors) in six.iteritems(score_groups):
checkers_html.append(cs.checker_html_output(checker, groups,
ds, limit))
html = cs.html_output(checkers_html)
if output_filename == '-':
print(html)
else:
with io.open(output_filename, 'w', encoding='utf8') as f:
f.write(html)
return groups | Generates rendered HTML output for the compliance score(s)
@param cs Compliance Checker Suite
@param score_groups List of results
@param output_filename The file path to output to
@param ds_loc List of source datasets
@param limit The degree of strictness, 1 being the strictest, and going up from there. | Below is the the instruction that describes the task:
### Input:
Generates rendered HTML output for the compliance score(s)
@param cs Compliance Checker Suite
@param score_groups List of results
@param output_filename The file path to output to
@param ds_loc List of source datasets
@param limit The degree of strictness, 1 being the strictest, and going up from there.
### Response:
def html_output(cls, cs, score_dict, output_filename, ds_loc, limit):
'''
Generates rendered HTML output for the compliance score(s)
@param cs Compliance Checker Suite
@param score_groups List of results
@param output_filename The file path to output to
@param ds_loc List of source datasets
@param limit The degree of strictness, 1 being the strictest, and going up from there.
'''
checkers_html = []
for ds, score_groups in six.iteritems(score_dict):
for checker, (groups, errors) in six.iteritems(score_groups):
checkers_html.append(cs.checker_html_output(checker, groups,
ds, limit))
html = cs.html_output(checkers_html)
if output_filename == '-':
print(html)
else:
with io.open(output_filename, 'w', encoding='utf8') as f:
f.write(html)
return groups |
def load(self, fileobj):
'''Load the dict from the file object'''
# try formats from most restrictive to least restrictive
for loader in (pickle.load, json.load, csv.reader):
fileobj.seek(0)
try:
return self.initial_update(loader(fileobj))
except Exception as e:
pass
raise ValueError('File not in a supported format') | Load the dict from the file object | Below is the the instruction that describes the task:
### Input:
Load the dict from the file object
### Response:
def load(self, fileobj):
'''Load the dict from the file object'''
# try formats from most restrictive to least restrictive
for loader in (pickle.load, json.load, csv.reader):
fileobj.seek(0)
try:
return self.initial_update(loader(fileobj))
except Exception as e:
pass
raise ValueError('File not in a supported format') |
def map(self, key, value):
"""
Args:
key: Image name
value: Image as jpeg byte data
Yields:
A tuple in the form of (key, value)
key: Constant dummy value
value: (l2sqr_dist, value)
"""
try:
image = imfeat.resize_image(imfeat.image_fromstring(value, {'type': 'numpy', 'mode': 'bgr', 'dtype': 'uint8'}),
100, 100)
except:
hadoopy.counter('DATA_ERRORS', 'ImageLoadError')
return
# Distance metric
diff = image - self.target_image
dist = np.sum(diff * diff)
# Output
if dist < self.min_dist:
self.min_dist = dist
self.min_key = key
self.min_value = value | Args:
key: Image name
value: Image as jpeg byte data
Yields:
A tuple in the form of (key, value)
key: Constant dummy value
value: (l2sqr_dist, value) | Below is the the instruction that describes the task:
### Input:
Args:
key: Image name
value: Image as jpeg byte data
Yields:
A tuple in the form of (key, value)
key: Constant dummy value
value: (l2sqr_dist, value)
### Response:
def map(self, key, value):
"""
Args:
key: Image name
value: Image as jpeg byte data
Yields:
A tuple in the form of (key, value)
key: Constant dummy value
value: (l2sqr_dist, value)
"""
try:
image = imfeat.resize_image(imfeat.image_fromstring(value, {'type': 'numpy', 'mode': 'bgr', 'dtype': 'uint8'}),
100, 100)
except:
hadoopy.counter('DATA_ERRORS', 'ImageLoadError')
return
# Distance metric
diff = image - self.target_image
dist = np.sum(diff * diff)
# Output
if dist < self.min_dist:
self.min_dist = dist
self.min_key = key
self.min_value = value |
def get_job_info():
"""
Get information about the job from the PBS server
"""
jobid = get_job_id()
if jobid == '':
return None
info = get_qstat_info('-ft {0}'.format(jobid), 'Job Id:')
# Select the dict for this job (there should only be one entry in any case)
info = info['Job Id: {}'.format(jobid)]
# Add the jobid to the dict and then return
info['Job_ID'] = jobid
return info | Get information about the job from the PBS server | Below is the the instruction that describes the task:
### Input:
Get information about the job from the PBS server
### Response:
def get_job_info():
"""
Get information about the job from the PBS server
"""
jobid = get_job_id()
if jobid == '':
return None
info = get_qstat_info('-ft {0}'.format(jobid), 'Job Id:')
# Select the dict for this job (there should only be one entry in any case)
info = info['Job Id: {}'.format(jobid)]
# Add the jobid to the dict and then return
info['Job_ID'] = jobid
return info |
def count(self, **kwargs):
"""Counts the number of non-NaN objects for each column or row.
Return:
A new QueryCompiler object containing counts of non-NaN objects from each
column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().count(**kwargs)
axis = kwargs.get("axis", 0)
map_func = self._build_mapreduce_func(pandas.DataFrame.count, **kwargs)
reduce_func = self._build_mapreduce_func(pandas.DataFrame.sum, **kwargs)
return self._full_reduce(axis, map_func, reduce_func) | Counts the number of non-NaN objects for each column or row.
Return:
A new QueryCompiler object containing counts of non-NaN objects from each
column or row. | Below is the the instruction that describes the task:
### Input:
Counts the number of non-NaN objects for each column or row.
Return:
A new QueryCompiler object containing counts of non-NaN objects from each
column or row.
### Response:
def count(self, **kwargs):
"""Counts the number of non-NaN objects for each column or row.
Return:
A new QueryCompiler object containing counts of non-NaN objects from each
column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().count(**kwargs)
axis = kwargs.get("axis", 0)
map_func = self._build_mapreduce_func(pandas.DataFrame.count, **kwargs)
reduce_func = self._build_mapreduce_func(pandas.DataFrame.sum, **kwargs)
return self._full_reduce(axis, map_func, reduce_func) |
def __get_menu_entries(self, kibiter_major):
""" Get the menu entries from the panel definition """
menu_entries = []
for entry in self.panels_menu:
if entry['source'] not in self.data_sources:
continue
parent_menu_item = {
'name': entry['name'],
'title': entry['name'],
'description': "",
'type': "menu",
'dashboards': []
}
for subentry in entry['menu']:
try:
dash_name = get_dashboard_name(subentry['panel'])
except FileNotFoundError:
logging.error("Can't open dashboard file %s", subentry['panel'])
continue
# The name for the entry is in self.panels_menu
child_item = {
"name": subentry['name'],
"title": subentry['name'],
"description": "",
"type": "entry",
"panel_id": dash_name
}
parent_menu_item['dashboards'].append(child_item)
menu_entries.append(parent_menu_item)
return menu_entries | Get the menu entries from the panel definition | Below is the the instruction that describes the task:
### Input:
Get the menu entries from the panel definition
### Response:
def __get_menu_entries(self, kibiter_major):
""" Get the menu entries from the panel definition """
menu_entries = []
for entry in self.panels_menu:
if entry['source'] not in self.data_sources:
continue
parent_menu_item = {
'name': entry['name'],
'title': entry['name'],
'description': "",
'type': "menu",
'dashboards': []
}
for subentry in entry['menu']:
try:
dash_name = get_dashboard_name(subentry['panel'])
except FileNotFoundError:
logging.error("Can't open dashboard file %s", subentry['panel'])
continue
# The name for the entry is in self.panels_menu
child_item = {
"name": subentry['name'],
"title": subentry['name'],
"description": "",
"type": "entry",
"panel_id": dash_name
}
parent_menu_item['dashboards'].append(child_item)
menu_entries.append(parent_menu_item)
return menu_entries |
def pause():
"""
Pause the timer, preventing subsequent time from accumulating in the
total. Renders the timer inactive, disabling other timing commands.
Returns:
float: The current time.
Raises:
PausedError: If timer already paused.
StoppedError: If timer already stopped.
"""
t = timer()
if f.t.stopped:
raise StoppedError("Cannot pause stopped timer.")
if f.t.paused:
raise PausedError("Timer already paused.")
f.t.paused = True
f.t.tmp_total += t - f.t.start_t
f.t.start_t = None
f.t.last_t = None
return t | Pause the timer, preventing subsequent time from accumulating in the
total. Renders the timer inactive, disabling other timing commands.
Returns:
float: The current time.
Raises:
PausedError: If timer already paused.
StoppedError: If timer already stopped. | Below is the the instruction that describes the task:
### Input:
Pause the timer, preventing subsequent time from accumulating in the
total. Renders the timer inactive, disabling other timing commands.
Returns:
float: The current time.
Raises:
PausedError: If timer already paused.
StoppedError: If timer already stopped.
### Response:
def pause():
"""
Pause the timer, preventing subsequent time from accumulating in the
total. Renders the timer inactive, disabling other timing commands.
Returns:
float: The current time.
Raises:
PausedError: If timer already paused.
StoppedError: If timer already stopped.
"""
t = timer()
if f.t.stopped:
raise StoppedError("Cannot pause stopped timer.")
if f.t.paused:
raise PausedError("Timer already paused.")
f.t.paused = True
f.t.tmp_total += t - f.t.start_t
f.t.start_t = None
f.t.last_t = None
return t |
def _closeResources(self):
""" Disconnects signals.
Is called by self.finalize when the cti is deleted.
"""
self.viewBox.sigRangeChangedManually.disconnect(self.setAutoRangeOff)
self.viewBox.sigRangeChanged.disconnect(self.refreshMinMax) | Disconnects signals.
Is called by self.finalize when the cti is deleted. | Below is the the instruction that describes the task:
### Input:
Disconnects signals.
Is called by self.finalize when the cti is deleted.
### Response:
def _closeResources(self):
""" Disconnects signals.
Is called by self.finalize when the cti is deleted.
"""
self.viewBox.sigRangeChangedManually.disconnect(self.setAutoRangeOff)
self.viewBox.sigRangeChanged.disconnect(self.refreshMinMax) |
def get_access_token(
self, oauth_token, oauth_token_secret, oauth_verifier
):
"""
:param oauth_token: oauth_token retrieve by the API Twython
get_authentication_tokens()
:param oauth_token_secret: oauth_token_secret retrieve by the
API Twython get_authentication_tokens()
:param oauth_verifier: oauth_verifier retrieve from Twitter
:type oauth_token: string
:type oauth_token_secret: string
:type oauth_verifier: string
:return: access_token
:rtype: dict
"""
twitter = Twython(self.consumer_key,
self.consumer_secret,
oauth_token,
oauth_token_secret)
access_token = twitter.get_authorized_tokens(oauth_verifier)
return access_token | :param oauth_token: oauth_token retrieve by the API Twython
get_authentication_tokens()
:param oauth_token_secret: oauth_token_secret retrieve by the
API Twython get_authentication_tokens()
:param oauth_verifier: oauth_verifier retrieve from Twitter
:type oauth_token: string
:type oauth_token_secret: string
:type oauth_verifier: string
:return: access_token
:rtype: dict | Below is the the instruction that describes the task:
### Input:
:param oauth_token: oauth_token retrieve by the API Twython
get_authentication_tokens()
:param oauth_token_secret: oauth_token_secret retrieve by the
API Twython get_authentication_tokens()
:param oauth_verifier: oauth_verifier retrieve from Twitter
:type oauth_token: string
:type oauth_token_secret: string
:type oauth_verifier: string
:return: access_token
:rtype: dict
### Response:
def get_access_token(
self, oauth_token, oauth_token_secret, oauth_verifier
):
"""
:param oauth_token: oauth_token retrieve by the API Twython
get_authentication_tokens()
:param oauth_token_secret: oauth_token_secret retrieve by the
API Twython get_authentication_tokens()
:param oauth_verifier: oauth_verifier retrieve from Twitter
:type oauth_token: string
:type oauth_token_secret: string
:type oauth_verifier: string
:return: access_token
:rtype: dict
"""
twitter = Twython(self.consumer_key,
self.consumer_secret,
oauth_token,
oauth_token_secret)
access_token = twitter.get_authorized_tokens(oauth_verifier)
return access_token |
def independent(repertoire):
"""Check whether the repertoire is independent."""
marginals = [marginal(repertoire, i) for i in range(repertoire.ndim)]
# TODO: is there a way to do without an explicit iteration?
joint = marginals[0]
for m in marginals[1:]:
joint = joint * m
# TODO: should we round here?
# repertoire = repertoire.round(config.PRECISION)
# joint = joint.round(config.PRECISION)
return np.array_equal(repertoire, joint) | Check whether the repertoire is independent. | Below is the the instruction that describes the task:
### Input:
Check whether the repertoire is independent.
### Response:
def independent(repertoire):
"""Check whether the repertoire is independent."""
marginals = [marginal(repertoire, i) for i in range(repertoire.ndim)]
# TODO: is there a way to do without an explicit iteration?
joint = marginals[0]
for m in marginals[1:]:
joint = joint * m
# TODO: should we round here?
# repertoire = repertoire.round(config.PRECISION)
# joint = joint.round(config.PRECISION)
return np.array_equal(repertoire, joint) |
def list_rooms(api_key=None):
'''
List all Slack rooms.
:param api_key: The Slack admin api key.
:return: The room list.
CLI Example:
.. code-block:: bash
salt '*' slack.list_rooms
salt '*' slack.list_rooms api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15
'''
if not api_key:
api_key = _get_api_key()
return salt.utils.slack.query(function='rooms',
api_key=api_key,
opts=__opts__) | List all Slack rooms.
:param api_key: The Slack admin api key.
:return: The room list.
CLI Example:
.. code-block:: bash
salt '*' slack.list_rooms
salt '*' slack.list_rooms api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 | Below is the the instruction that describes the task:
### Input:
List all Slack rooms.
:param api_key: The Slack admin api key.
:return: The room list.
CLI Example:
.. code-block:: bash
salt '*' slack.list_rooms
salt '*' slack.list_rooms api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15
### Response:
def list_rooms(api_key=None):
'''
List all Slack rooms.
:param api_key: The Slack admin api key.
:return: The room list.
CLI Example:
.. code-block:: bash
salt '*' slack.list_rooms
salt '*' slack.list_rooms api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15
'''
if not api_key:
api_key = _get_api_key()
return salt.utils.slack.query(function='rooms',
api_key=api_key,
opts=__opts__) |
def delete_internet_gateway(internet_gateway_id=None,
internet_gateway_name=None,
detach=False, region=None,
key=None, keyid=None, profile=None):
'''
Delete an internet gateway (by name or id).
Returns True if the internet gateway was deleted and otherwise False.
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt myminion boto_vpc.delete_internet_gateway internet_gateway_id=igw-1a2b3c
salt myminion boto_vpc.delete_internet_gateway internet_gateway_name=myigw
'''
try:
if internet_gateway_name:
internet_gateway_id = _get_resource_id('internet_gateway',
internet_gateway_name,
region=region, key=key,
keyid=keyid, profile=profile)
if not internet_gateway_id:
return {'deleted': False, 'error': {
'message': 'internet gateway {0} does not exist.'.format(
internet_gateway_name)}}
if detach:
igw = _get_resource('internet_gateway',
resource_id=internet_gateway_id, region=region,
key=key, keyid=keyid, profile=profile)
if not igw:
return {'deleted': False, 'error': {
'message': 'internet gateway {0} does not exist.'.format(
internet_gateway_id)}}
if igw.attachments:
conn = _get_conn(region=region, key=key, keyid=keyid,
profile=profile)
conn.detach_internet_gateway(internet_gateway_id,
igw.attachments[0].vpc_id)
return _delete_resource('internet_gateway',
resource_id=internet_gateway_id,
region=region, key=key, keyid=keyid,
profile=profile)
except BotoServerError as e:
return {'deleted': False, 'error': __utils__['boto.get_error'](e)} | Delete an internet gateway (by name or id).
Returns True if the internet gateway was deleted and otherwise False.
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt myminion boto_vpc.delete_internet_gateway internet_gateway_id=igw-1a2b3c
salt myminion boto_vpc.delete_internet_gateway internet_gateway_name=myigw | Below is the the instruction that describes the task:
### Input:
Delete an internet gateway (by name or id).
Returns True if the internet gateway was deleted and otherwise False.
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt myminion boto_vpc.delete_internet_gateway internet_gateway_id=igw-1a2b3c
salt myminion boto_vpc.delete_internet_gateway internet_gateway_name=myigw
### Response:
def delete_internet_gateway(internet_gateway_id=None,
internet_gateway_name=None,
detach=False, region=None,
key=None, keyid=None, profile=None):
'''
Delete an internet gateway (by name or id).
Returns True if the internet gateway was deleted and otherwise False.
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt myminion boto_vpc.delete_internet_gateway internet_gateway_id=igw-1a2b3c
salt myminion boto_vpc.delete_internet_gateway internet_gateway_name=myigw
'''
try:
if internet_gateway_name:
internet_gateway_id = _get_resource_id('internet_gateway',
internet_gateway_name,
region=region, key=key,
keyid=keyid, profile=profile)
if not internet_gateway_id:
return {'deleted': False, 'error': {
'message': 'internet gateway {0} does not exist.'.format(
internet_gateway_name)}}
if detach:
igw = _get_resource('internet_gateway',
resource_id=internet_gateway_id, region=region,
key=key, keyid=keyid, profile=profile)
if not igw:
return {'deleted': False, 'error': {
'message': 'internet gateway {0} does not exist.'.format(
internet_gateway_id)}}
if igw.attachments:
conn = _get_conn(region=region, key=key, keyid=keyid,
profile=profile)
conn.detach_internet_gateway(internet_gateway_id,
igw.attachments[0].vpc_id)
return _delete_resource('internet_gateway',
resource_id=internet_gateway_id,
region=region, key=key, keyid=keyid,
profile=profile)
except BotoServerError as e:
return {'deleted': False, 'error': __utils__['boto.get_error'](e)} |
def is_attr_selected(attr_name, # type: str
include=None, # type: Union[str, Tuple[str]]
exclude=None # type: Union[str, Tuple[str]]
):
"""decide whether an action has to be performed on the attribute or not, based on its name"""
if include is not None and exclude is not None:
raise ValueError('Only one of \'include\' or \'exclude\' argument should be provided.')
# win time by not doing this
# check_var(include, var_name='include', var_types=[str, tuple], enforce_not_none=False)
# check_var(exclude, var_name='exclude', var_types=[str, tuple], enforce_not_none=False)
if attr_name is 'self':
return False
if exclude and attr_name in exclude:
return False
if not include or attr_name in include:
return True
else:
return False | decide whether an action has to be performed on the attribute or not, based on its name | Below is the the instruction that describes the task:
### Input:
decide whether an action has to be performed on the attribute or not, based on its name
### Response:
def is_attr_selected(attr_name, # type: str
include=None, # type: Union[str, Tuple[str]]
exclude=None # type: Union[str, Tuple[str]]
):
"""decide whether an action has to be performed on the attribute or not, based on its name"""
if include is not None and exclude is not None:
raise ValueError('Only one of \'include\' or \'exclude\' argument should be provided.')
# win time by not doing this
# check_var(include, var_name='include', var_types=[str, tuple], enforce_not_none=False)
# check_var(exclude, var_name='exclude', var_types=[str, tuple], enforce_not_none=False)
if attr_name is 'self':
return False
if exclude and attr_name in exclude:
return False
if not include or attr_name in include:
return True
else:
return False |
def execute_update(args):
"""Execute the update based on command line args and returns a dictionary
with 'execution result, ''response code', 'response info' and
'process friendly message'.
"""
provider_class = getattr(dnsupdater,
dnsupdater.AVAILABLE_PLUGINS.get(args.provider))
updater_options = {}
process_message = None
auth = None
if args.store: # --store argument
if provider_class.auth_type == 'T':
user_arg = args.usertoken or utils.read_input(
"Paste your auth token: ")
auth = authinfo.ApiAuth(usertoken=user_arg)
else:
user_arg = args.usertoken or utils.read_input(
"Type your username: ")
pass_arg = args.password or getpass.getpass("Type your password: ")
auth = authinfo.ApiAuth(user_arg, pass_arg)
authinfo.store(auth, args.provider, args.config)
exec_result = EXECUTION_RESULT_OK
if not args.hostname:
update_ddns = False
process_message = "Auth info stored."
else:
update_ddns = True
# informations arguments
elif args.usertoken and args.hostname:
if provider_class.auth_type == 'T':
auth = authinfo.ApiAuth(args.usertoken)
else:
auth = authinfo.ApiAuth(args.usertoken, args.password)
update_ddns = True
exec_result = EXECUTION_RESULT_OK
elif args.hostname:
if authinfo.exists(args.provider, args.config):
auth = authinfo.load(args.provider, args.config)
update_ddns = True
exec_result = EXECUTION_RESULT_OK
else:
update_ddns = False
exec_result = EXECUTION_RESULT_NOK
process_message = "No stored auth information found for " \
"provider: '%s'" % args.provider
else: # no arguments
update_ddns = False
exec_result = EXECUTION_RESULT_NOK
process_message = "Warning: The hostname to be updated must be " \
"provided.\nUsertoken and password can be either " \
"provided via command line or stored with --store " \
"option.\nExecute noipy --help for more details."
if update_ddns and args.provider == 'generic':
if args.url:
if not URL_RE.match(args.url):
process_message = "Malformed URL."
exec_result = EXECUTION_RESULT_NOK
update_ddns = False
else:
updater_options['url'] = args.url
else:
process_message = "Must use --url if --provider is 'generic' " \
"(default)"
exec_result = EXECUTION_RESULT_NOK
update_ddns = False
response_code = None
response_text = None
if update_ddns:
ip_address = args.ip if args.ip else utils.get_ip()
if not ip_address:
process_message = "Unable to get IP address. Check connection."
exec_result = EXECUTION_RESULT_NOK
elif ip_address == utils.get_dns_ip(args.hostname):
process_message = "No update required."
else:
updater = provider_class(auth, args.hostname, updater_options)
print("Updating hostname '%s' with IP address %s "
"[provider: '%s']..."
% (args.hostname, ip_address, args.provider))
response_code, response_text = updater.update_dns(ip_address)
process_message = updater.status_message
proc_result = {
'exec_result': exec_result,
'response_code': response_code,
'response_text': response_text,
'process_message': process_message,
}
return proc_result | Execute the update based on command line args and returns a dictionary
with 'execution result, ''response code', 'response info' and
'process friendly message'. | Below is the the instruction that describes the task:
### Input:
Execute the update based on command line args and returns a dictionary
with 'execution result, ''response code', 'response info' and
'process friendly message'.
### Response:
def execute_update(args):
"""Execute the update based on command line args and returns a dictionary
with 'execution result, ''response code', 'response info' and
'process friendly message'.
"""
provider_class = getattr(dnsupdater,
dnsupdater.AVAILABLE_PLUGINS.get(args.provider))
updater_options = {}
process_message = None
auth = None
if args.store: # --store argument
if provider_class.auth_type == 'T':
user_arg = args.usertoken or utils.read_input(
"Paste your auth token: ")
auth = authinfo.ApiAuth(usertoken=user_arg)
else:
user_arg = args.usertoken or utils.read_input(
"Type your username: ")
pass_arg = args.password or getpass.getpass("Type your password: ")
auth = authinfo.ApiAuth(user_arg, pass_arg)
authinfo.store(auth, args.provider, args.config)
exec_result = EXECUTION_RESULT_OK
if not args.hostname:
update_ddns = False
process_message = "Auth info stored."
else:
update_ddns = True
# informations arguments
elif args.usertoken and args.hostname:
if provider_class.auth_type == 'T':
auth = authinfo.ApiAuth(args.usertoken)
else:
auth = authinfo.ApiAuth(args.usertoken, args.password)
update_ddns = True
exec_result = EXECUTION_RESULT_OK
elif args.hostname:
if authinfo.exists(args.provider, args.config):
auth = authinfo.load(args.provider, args.config)
update_ddns = True
exec_result = EXECUTION_RESULT_OK
else:
update_ddns = False
exec_result = EXECUTION_RESULT_NOK
process_message = "No stored auth information found for " \
"provider: '%s'" % args.provider
else: # no arguments
update_ddns = False
exec_result = EXECUTION_RESULT_NOK
process_message = "Warning: The hostname to be updated must be " \
"provided.\nUsertoken and password can be either " \
"provided via command line or stored with --store " \
"option.\nExecute noipy --help for more details."
if update_ddns and args.provider == 'generic':
if args.url:
if not URL_RE.match(args.url):
process_message = "Malformed URL."
exec_result = EXECUTION_RESULT_NOK
update_ddns = False
else:
updater_options['url'] = args.url
else:
process_message = "Must use --url if --provider is 'generic' " \
"(default)"
exec_result = EXECUTION_RESULT_NOK
update_ddns = False
response_code = None
response_text = None
if update_ddns:
ip_address = args.ip if args.ip else utils.get_ip()
if not ip_address:
process_message = "Unable to get IP address. Check connection."
exec_result = EXECUTION_RESULT_NOK
elif ip_address == utils.get_dns_ip(args.hostname):
process_message = "No update required."
else:
updater = provider_class(auth, args.hostname, updater_options)
print("Updating hostname '%s' with IP address %s "
"[provider: '%s']..."
% (args.hostname, ip_address, args.provider))
response_code, response_text = updater.update_dns(ip_address)
process_message = updater.status_message
proc_result = {
'exec_result': exec_result,
'response_code': response_code,
'response_text': response_text,
'process_message': process_message,
}
return proc_result |
def solve(self, angles0, target):
"""Calculate joint angles and returns it."""
return self.optimizer.optimize(np.array(angles0), target) | Calculate joint angles and returns it. | Below is the the instruction that describes the task:
### Input:
Calculate joint angles and returns it.
### Response:
def solve(self, angles0, target):
"""Calculate joint angles and returns it."""
return self.optimizer.optimize(np.array(angles0), target) |
def repository(self, owner, repository):
"""Returns a Repository object for the specified combination of
owner and repository
:param str owner: (required)
:param str repository: (required)
:returns: :class:`Repository <github3.repos.Repository>`
"""
json = None
if owner and repository:
url = self._build_url('repos', owner, repository)
json = self._json(self._get(url), 200)
return Repository(json, self) if json else None | Returns a Repository object for the specified combination of
owner and repository
:param str owner: (required)
:param str repository: (required)
:returns: :class:`Repository <github3.repos.Repository>` | Below is the the instruction that describes the task:
### Input:
Returns a Repository object for the specified combination of
owner and repository
:param str owner: (required)
:param str repository: (required)
:returns: :class:`Repository <github3.repos.Repository>`
### Response:
def repository(self, owner, repository):
"""Returns a Repository object for the specified combination of
owner and repository
:param str owner: (required)
:param str repository: (required)
:returns: :class:`Repository <github3.repos.Repository>`
"""
json = None
if owner and repository:
url = self._build_url('repos', owner, repository)
json = self._json(self._get(url), 200)
return Repository(json, self) if json else None |
def get_dpi():
"""Returns screen dpi resolution"""
def pxmm_2_dpi((pixels, length_mm)):
return pixels * 25.6 / length_mm
return map(pxmm_2_dpi, zip(wx.GetDisplaySize(), wx.GetDisplaySizeMM())) | Returns screen dpi resolution | Below is the the instruction that describes the task:
### Input:
Returns screen dpi resolution
### Response:
def get_dpi():
"""Returns screen dpi resolution"""
def pxmm_2_dpi((pixels, length_mm)):
return pixels * 25.6 / length_mm
return map(pxmm_2_dpi, zip(wx.GetDisplaySize(), wx.GetDisplaySizeMM())) |
def _create_dict(self, format, args):
"""Handle the case where the outermost type of format is a dict."""
builder = None
if args is None or not args[0]:
# empty value: we need to call _create() to parse the subtype,
# and specify the element type precisely
rest_format = self._create(format[2:], None)[1]
rest_format = self._create(rest_format, None)[1]
if not rest_format.startswith('}'):
raise TypeError('dictionary type string not closed with }')
rest_format = rest_format[1:] # eat the }
element_type = format[:len(format) - len(rest_format)]
builder = GLib.VariantBuilder.new(variant_type_from_string(element_type))
else:
builder = GLib.VariantBuilder.new(variant_type_from_string('a{?*}'))
for k, v in args[0].items():
(key_v, rest_format, _) = self._create(format[2:], [k])
(val_v, rest_format, _) = self._create(rest_format, [v])
if not rest_format.startswith('}'):
raise TypeError('dictionary type string not closed with }')
rest_format = rest_format[1:] # eat the }
entry = GLib.VariantBuilder.new(variant_type_from_string('{?*}'))
entry.add_value(key_v)
entry.add_value(val_v)
builder.add_value(entry.end())
if args is not None:
args = args[1:]
return (builder.end(), rest_format, args) | Handle the case where the outermost type of format is a dict. | Below is the the instruction that describes the task:
### Input:
Handle the case where the outermost type of format is a dict.
### Response:
def _create_dict(self, format, args):
"""Handle the case where the outermost type of format is a dict."""
builder = None
if args is None or not args[0]:
# empty value: we need to call _create() to parse the subtype,
# and specify the element type precisely
rest_format = self._create(format[2:], None)[1]
rest_format = self._create(rest_format, None)[1]
if not rest_format.startswith('}'):
raise TypeError('dictionary type string not closed with }')
rest_format = rest_format[1:] # eat the }
element_type = format[:len(format) - len(rest_format)]
builder = GLib.VariantBuilder.new(variant_type_from_string(element_type))
else:
builder = GLib.VariantBuilder.new(variant_type_from_string('a{?*}'))
for k, v in args[0].items():
(key_v, rest_format, _) = self._create(format[2:], [k])
(val_v, rest_format, _) = self._create(rest_format, [v])
if not rest_format.startswith('}'):
raise TypeError('dictionary type string not closed with }')
rest_format = rest_format[1:] # eat the }
entry = GLib.VariantBuilder.new(variant_type_from_string('{?*}'))
entry.add_value(key_v)
entry.add_value(val_v)
builder.add_value(entry.end())
if args is not None:
args = args[1:]
return (builder.end(), rest_format, args) |
def resample(self, data, input_rate):
"""
Microphone may not support our native processing sampling rate, so
resample from input_rate to RATE_PROCESS here for webrtcvad and
deepspeech
Args:
data (binary): Input audio stream
input_rate (int): Input audio rate to resample from
"""
data16 = np.fromstring(string=data, dtype=np.int16)
resample_size = int(len(data16) / self.input_rate * self.RATE_PROCESS)
resample = signal.resample(data16, resample_size)
resample16 = np.array(resample, dtype=np.int16)
return resample16.tostring() | Microphone may not support our native processing sampling rate, so
resample from input_rate to RATE_PROCESS here for webrtcvad and
deepspeech
Args:
data (binary): Input audio stream
input_rate (int): Input audio rate to resample from | Below is the the instruction that describes the task:
### Input:
Microphone may not support our native processing sampling rate, so
resample from input_rate to RATE_PROCESS here for webrtcvad and
deepspeech
Args:
data (binary): Input audio stream
input_rate (int): Input audio rate to resample from
### Response:
def resample(self, data, input_rate):
"""
Microphone may not support our native processing sampling rate, so
resample from input_rate to RATE_PROCESS here for webrtcvad and
deepspeech
Args:
data (binary): Input audio stream
input_rate (int): Input audio rate to resample from
"""
data16 = np.fromstring(string=data, dtype=np.int16)
resample_size = int(len(data16) / self.input_rate * self.RATE_PROCESS)
resample = signal.resample(data16, resample_size)
resample16 = np.array(resample, dtype=np.int16)
return resample16.tostring() |
def _print_trainings_long(trainings: Iterable[Tuple[str, dict, TrainingTrace]]) -> None:
"""
Print a plain table with the details of the given trainings.
:param trainings: iterable of tuples (train_dir, configuration dict, trace)
"""
long_table = []
for train_dir, config, trace in trainings:
start_datetime, end_datetime = trace[TrainingTraceKeys.TRAIN_BEGIN], trace[TrainingTraceKeys.TRAIN_END]
if start_datetime:
age = format_timedelta(datetime.now() - start_datetime) + ' ago'
if end_datetime:
duration = format_timedelta(end_datetime - start_datetime)
else:
duration = CXF_NA_STR
else:
age = CXF_NA_STR
duration = CXF_NA_STR
epochs_done = trace[TrainingTraceKeys.EPOCHS_DONE] if trace[TrainingTraceKeys.EPOCHS_DONE] else 0
long_table.append([path.basename(train_dir)] +
list(map(lambda fq_name: fq_name.split('.')[-1], get_classes(config))) +
[age, duration, epochs_done])
print(tabulate(long_table, tablefmt='plain')) | Print a plain table with the details of the given trainings.
:param trainings: iterable of tuples (train_dir, configuration dict, trace) | Below is the the instruction that describes the task:
### Input:
Print a plain table with the details of the given trainings.
:param trainings: iterable of tuples (train_dir, configuration dict, trace)
### Response:
def _print_trainings_long(trainings: Iterable[Tuple[str, dict, TrainingTrace]]) -> None:
"""
Print a plain table with the details of the given trainings.
:param trainings: iterable of tuples (train_dir, configuration dict, trace)
"""
long_table = []
for train_dir, config, trace in trainings:
start_datetime, end_datetime = trace[TrainingTraceKeys.TRAIN_BEGIN], trace[TrainingTraceKeys.TRAIN_END]
if start_datetime:
age = format_timedelta(datetime.now() - start_datetime) + ' ago'
if end_datetime:
duration = format_timedelta(end_datetime - start_datetime)
else:
duration = CXF_NA_STR
else:
age = CXF_NA_STR
duration = CXF_NA_STR
epochs_done = trace[TrainingTraceKeys.EPOCHS_DONE] if trace[TrainingTraceKeys.EPOCHS_DONE] else 0
long_table.append([path.basename(train_dir)] +
list(map(lambda fq_name: fq_name.split('.')[-1], get_classes(config))) +
[age, duration, epochs_done])
print(tabulate(long_table, tablefmt='plain')) |
def get(key, adapter = MemoryAdapter):
'''
get the cache value
'''
try:
return pickle.loads(adapter().get(key))
except CacheExpiredException:
return None | get the cache value | Below is the the instruction that describes the task:
### Input:
get the cache value
### Response:
def get(key, adapter = MemoryAdapter):
'''
get the cache value
'''
try:
return pickle.loads(adapter().get(key))
except CacheExpiredException:
return None |
def function(self, new_function):
"""Set the MonitorState of this Monitor."""
self._client.change_state(
self._monitor_url,
{'Monitor[Function]': new_function.value}) | Set the MonitorState of this Monitor. | Below is the the instruction that describes the task:
### Input:
Set the MonitorState of this Monitor.
### Response:
def function(self, new_function):
"""Set the MonitorState of this Monitor."""
self._client.change_state(
self._monitor_url,
{'Monitor[Function]': new_function.value}) |
def hmset(self, **kwargs):
"""
This command on the model allow setting many instancehash fields with only
one redis call. You must pass kwargs with field names as keys, with
their value.
"""
if kwargs and not any(kwarg in self._instancehash_fields for kwarg in iterkeys(kwargs)):
raise ValueError("Only InstanceHashField can be used here.")
indexed = []
# main try block to revert indexes if something fail
try:
# Set indexes for indexable fields.
for field_name, value in iteritems(kwargs):
field = self.get_field(field_name)
if field.indexable:
indexed.append(field)
field.deindex()
field.index(value)
# Call redis (waits for a dict)
result = self._call_command('hmset', kwargs)
return result
except:
# We revert indexes previously set if we have an exception, then
# really raise the error
for field in indexed:
field._rollback_indexes()
raise
finally:
for field in indexed:
field._reset_indexes_caches() | This command on the model allow setting many instancehash fields with only
one redis call. You must pass kwargs with field names as keys, with
their value. | Below is the the instruction that describes the task:
### Input:
This command on the model allow setting many instancehash fields with only
one redis call. You must pass kwargs with field names as keys, with
their value.
### Response:
def hmset(self, **kwargs):
"""
This command on the model allow setting many instancehash fields with only
one redis call. You must pass kwargs with field names as keys, with
their value.
"""
if kwargs and not any(kwarg in self._instancehash_fields for kwarg in iterkeys(kwargs)):
raise ValueError("Only InstanceHashField can be used here.")
indexed = []
# main try block to revert indexes if something fail
try:
# Set indexes for indexable fields.
for field_name, value in iteritems(kwargs):
field = self.get_field(field_name)
if field.indexable:
indexed.append(field)
field.deindex()
field.index(value)
# Call redis (waits for a dict)
result = self._call_command('hmset', kwargs)
return result
except:
# We revert indexes previously set if we have an exception, then
# really raise the error
for field in indexed:
field._rollback_indexes()
raise
finally:
for field in indexed:
field._reset_indexes_caches() |
def add_topic(self, group_alias, title, content):
"""
创建话题(小心验证码~)
:param group_alias: 小组ID
:param title: 标题
:param content: 内容
:return: bool
"""
xml = self.api.req(API_GROUP_ADD_TOPIC % group_alias, 'post', data={
'ck': self.api.ck(),
'rev_title': title,
'rev_text': content,
'rev_submit': '好了,发言',
})
return not xml.url.startswith(API_GROUP_ADD_TOPIC % group_alias) | 创建话题(小心验证码~)
:param group_alias: 小组ID
:param title: 标题
:param content: 内容
:return: bool | Below is the the instruction that describes the task:
### Input:
创建话题(小心验证码~)
:param group_alias: 小组ID
:param title: 标题
:param content: 内容
:return: bool
### Response:
def add_topic(self, group_alias, title, content):
"""
创建话题(小心验证码~)
:param group_alias: 小组ID
:param title: 标题
:param content: 内容
:return: bool
"""
xml = self.api.req(API_GROUP_ADD_TOPIC % group_alias, 'post', data={
'ck': self.api.ck(),
'rev_title': title,
'rev_text': content,
'rev_submit': '好了,发言',
})
return not xml.url.startswith(API_GROUP_ADD_TOPIC % group_alias) |
def dumps(obj):
"""
Dumps a serializable object to JSON. This API maps to the Python built-in
json dumps method, with a few differences:
* The return value is always valid JSON according to RFC 7159.
* The input can be any of the following types:
- SFrame
- SArray
- SGraph
- single flexible_type (Image, int, long, float, datetime.datetime)
- recursive flexible_type (list, dict, array.array)
- recursive variant_type (list or dict of all of the above)
* Serialized result includes both data and schema. Deserialization requires
valid schema information to disambiguate various other wrapped types
(like Image) from dict.
"""
(data, schema) = to_serializable(obj)
return _json.dumps({'data': data, 'schema': schema}) | Dumps a serializable object to JSON. This API maps to the Python built-in
json dumps method, with a few differences:
* The return value is always valid JSON according to RFC 7159.
* The input can be any of the following types:
- SFrame
- SArray
- SGraph
- single flexible_type (Image, int, long, float, datetime.datetime)
- recursive flexible_type (list, dict, array.array)
- recursive variant_type (list or dict of all of the above)
* Serialized result includes both data and schema. Deserialization requires
valid schema information to disambiguate various other wrapped types
(like Image) from dict. | Below is the the instruction that describes the task:
### Input:
Dumps a serializable object to JSON. This API maps to the Python built-in
json dumps method, with a few differences:
* The return value is always valid JSON according to RFC 7159.
* The input can be any of the following types:
- SFrame
- SArray
- SGraph
- single flexible_type (Image, int, long, float, datetime.datetime)
- recursive flexible_type (list, dict, array.array)
- recursive variant_type (list or dict of all of the above)
* Serialized result includes both data and schema. Deserialization requires
valid schema information to disambiguate various other wrapped types
(like Image) from dict.
### Response:
def dumps(obj):
"""
Dumps a serializable object to JSON. This API maps to the Python built-in
json dumps method, with a few differences:
* The return value is always valid JSON according to RFC 7159.
* The input can be any of the following types:
- SFrame
- SArray
- SGraph
- single flexible_type (Image, int, long, float, datetime.datetime)
- recursive flexible_type (list, dict, array.array)
- recursive variant_type (list or dict of all of the above)
* Serialized result includes both data and schema. Deserialization requires
valid schema information to disambiguate various other wrapped types
(like Image) from dict.
"""
(data, schema) = to_serializable(obj)
return _json.dumps({'data': data, 'schema': schema}) |
def _get_tmp_account_id(cls, writer_spec):
"""Returns the account id to use with tmp bucket."""
# pick tmp id iff tmp bucket is set explicitly
if cls.TMP_BUCKET_NAME_PARAM in writer_spec:
return writer_spec.get(cls._TMP_ACCOUNT_ID_PARAM, None)
return cls._get_account_id(writer_spec) | Returns the account id to use with tmp bucket. | Below is the the instruction that describes the task:
### Input:
Returns the account id to use with tmp bucket.
### Response:
def _get_tmp_account_id(cls, writer_spec):
"""Returns the account id to use with tmp bucket."""
# pick tmp id iff tmp bucket is set explicitly
if cls.TMP_BUCKET_NAME_PARAM in writer_spec:
return writer_spec.get(cls._TMP_ACCOUNT_ID_PARAM, None)
return cls._get_account_id(writer_spec) |
def value_contains(self, value, attribute):
"""
Determine if any of the items in the value list for the given
attribute contain value.
"""
for item in self[attribute]:
if value in item:
return True
return False | Determine if any of the items in the value list for the given
attribute contain value. | Below is the the instruction that describes the task:
### Input:
Determine if any of the items in the value list for the given
attribute contain value.
### Response:
def value_contains(self, value, attribute):
"""
Determine if any of the items in the value list for the given
attribute contain value.
"""
for item in self[attribute]:
if value in item:
return True
return False |
def install_default_formatters(self):
"""
Installs default formatters for the following tags:
b, i, u, s, list (and \*), quote, code, center, color, url
"""
self.add_simple_formatter('b', '<strong>%(value)s</strong>')
self.add_simple_formatter('i', '<em>%(value)s</em>')
self.add_simple_formatter('u', '<u>%(value)s</u>')
self.add_simple_formatter('s', '<strike>%(value)s</strike>')
self.add_simple_formatter('hr', '<hr />', standalone=True)
self.add_simple_formatter('sub', '<sub>%(value)s</sub>')
self.add_simple_formatter('sup', '<sup>%(value)s</sup>')
def _render_list(name, value, options, parent, context):
list_type = options['list'] if (options and 'list' in options) else '*'
css_opts = {
'1': 'decimal', '01': 'decimal-leading-zero',
'a': 'lower-alpha', 'A': 'upper-alpha',
'i': 'lower-roman', 'I': 'upper-roman',
}
tag = 'ol' if list_type in css_opts else 'ul'
css = ' style="list-style-type:%s;"' % css_opts[list_type] if list_type in css_opts else ''
return '<%s%s>%s</%s>' % (tag, css, value, tag)
self.add_formatter('list', _render_list, transform_newlines=False, strip=True, swallow_trailing_newline=True)
# Make sure transform_newlines = False for [*], so [code] tags can be embedded without transformation.
def _render_list_item(name, value, options, parent, context):
if not parent or parent.tag_name != 'list':
return '[*]%s<br />' % value
return '<li>%s</li>' % value
self.add_formatter('*', _render_list_item, newline_closes=True, transform_newlines=False,
same_tag_closes=True, strip=True)
self.add_simple_formatter('quote', '<blockquote>%(value)s</blockquote>', strip=True,
swallow_trailing_newline=True)
self.add_simple_formatter('code', '<code>%(value)s</code>', render_embedded=False, transform_newlines=False,
swallow_trailing_newline=True, replace_cosmetic=False)
self.add_simple_formatter('center', '<div style="text-align:center;">%(value)s</div>')
def _render_color(name, value, options, parent, context):
if 'color' in options:
color = options['color'].strip()
elif options:
color = list(options.keys())[0].strip()
else:
return value
match = re.match(r'^([a-z]+)|^(#[a-f0-9]{3,6})', color, re.I)
color = match.group() if match else 'inherit'
return '<span style="color:%(color)s;">%(value)s</span>' % {
'color': color,
'value': value,
}
self.add_formatter('color', _render_color)
def _render_url(name, value, options, parent, context):
if options and 'url' in options:
# Option values are not escaped for HTML output.
href = self._replace(options['url'], self.REPLACE_ESCAPE)
else:
href = value
# Completely ignore javascript: and data: "links".
if re.sub(r'[^a-z0-9+]', '', href.lower().split(':', 1)[0]) in ('javascript', 'data', 'vbscript'):
return ''
# Only add the missing http:// if it looks like it starts with a domain name.
if '://' not in href and _domain_re.match(href):
href = 'http://' + href
return self.url_template.format(href=href.replace('"', '%22'), text=value)
self.add_formatter('url', _render_url, replace_links=False, replace_cosmetic=False) | Installs default formatters for the following tags:
b, i, u, s, list (and \*), quote, code, center, color, url | Below is the the instruction that describes the task:
### Input:
Installs default formatters for the following tags:
b, i, u, s, list (and \*), quote, code, center, color, url
### Response:
def install_default_formatters(self):
"""
Installs default formatters for the following tags:
b, i, u, s, list (and \*), quote, code, center, color, url
"""
self.add_simple_formatter('b', '<strong>%(value)s</strong>')
self.add_simple_formatter('i', '<em>%(value)s</em>')
self.add_simple_formatter('u', '<u>%(value)s</u>')
self.add_simple_formatter('s', '<strike>%(value)s</strike>')
self.add_simple_formatter('hr', '<hr />', standalone=True)
self.add_simple_formatter('sub', '<sub>%(value)s</sub>')
self.add_simple_formatter('sup', '<sup>%(value)s</sup>')
def _render_list(name, value, options, parent, context):
list_type = options['list'] if (options and 'list' in options) else '*'
css_opts = {
'1': 'decimal', '01': 'decimal-leading-zero',
'a': 'lower-alpha', 'A': 'upper-alpha',
'i': 'lower-roman', 'I': 'upper-roman',
}
tag = 'ol' if list_type in css_opts else 'ul'
css = ' style="list-style-type:%s;"' % css_opts[list_type] if list_type in css_opts else ''
return '<%s%s>%s</%s>' % (tag, css, value, tag)
self.add_formatter('list', _render_list, transform_newlines=False, strip=True, swallow_trailing_newline=True)
# Make sure transform_newlines = False for [*], so [code] tags can be embedded without transformation.
def _render_list_item(name, value, options, parent, context):
if not parent or parent.tag_name != 'list':
return '[*]%s<br />' % value
return '<li>%s</li>' % value
self.add_formatter('*', _render_list_item, newline_closes=True, transform_newlines=False,
same_tag_closes=True, strip=True)
self.add_simple_formatter('quote', '<blockquote>%(value)s</blockquote>', strip=True,
swallow_trailing_newline=True)
self.add_simple_formatter('code', '<code>%(value)s</code>', render_embedded=False, transform_newlines=False,
swallow_trailing_newline=True, replace_cosmetic=False)
self.add_simple_formatter('center', '<div style="text-align:center;">%(value)s</div>')
def _render_color(name, value, options, parent, context):
if 'color' in options:
color = options['color'].strip()
elif options:
color = list(options.keys())[0].strip()
else:
return value
match = re.match(r'^([a-z]+)|^(#[a-f0-9]{3,6})', color, re.I)
color = match.group() if match else 'inherit'
return '<span style="color:%(color)s;">%(value)s</span>' % {
'color': color,
'value': value,
}
self.add_formatter('color', _render_color)
def _render_url(name, value, options, parent, context):
if options and 'url' in options:
# Option values are not escaped for HTML output.
href = self._replace(options['url'], self.REPLACE_ESCAPE)
else:
href = value
# Completely ignore javascript: and data: "links".
if re.sub(r'[^a-z0-9+]', '', href.lower().split(':', 1)[0]) in ('javascript', 'data', 'vbscript'):
return ''
# Only add the missing http:// if it looks like it starts with a domain name.
if '://' not in href and _domain_re.match(href):
href = 'http://' + href
return self.url_template.format(href=href.replace('"', '%22'), text=value)
self.add_formatter('url', _render_url, replace_links=False, replace_cosmetic=False) |
def __get_view_tmpl(tag_key):
'''
根据分类uid的4位编码来找模板。如果4位的存在,则使用4位的;不然找其父类;再不然则使用通用模板
只有View需要,edit, list使用通用模板
:return String.
'''
the_view_file_4 = './templates/tmpl_{0}/tpl_view_{1}.html'.format(
KIND_DICS['kind_' + tag_key.split('_')[-1]],
tag_key.split('_')[1]
)
the_view_file_2 = './templates/tmpl_{0}/tpl_view_{1}.html'.format(
KIND_DICS['kind_' + tag_key.split('_')[-1]],
tag_key.split('_')[1][:2]
)
if os.path.exists(the_view_file_4):
the_view_sig_str = '_{0}'.format(tag_key.split('_')[1])
elif os.path.exists(the_view_file_2):
the_view_sig_str = '_{0}'.format(tag_key.split('_')[1][:2])
else:
the_view_sig_str = ''
return the_view_sig_str | 根据分类uid的4位编码来找模板。如果4位的存在,则使用4位的;不然找其父类;再不然则使用通用模板
只有View需要,edit, list使用通用模板
:return String. | Below is the the instruction that describes the task:
### Input:
根据分类uid的4位编码来找模板。如果4位的存在,则使用4位的;不然找其父类;再不然则使用通用模板
只有View需要,edit, list使用通用模板
:return String.
### Response:
def __get_view_tmpl(tag_key):
'''
根据分类uid的4位编码来找模板。如果4位的存在,则使用4位的;不然找其父类;再不然则使用通用模板
只有View需要,edit, list使用通用模板
:return String.
'''
the_view_file_4 = './templates/tmpl_{0}/tpl_view_{1}.html'.format(
KIND_DICS['kind_' + tag_key.split('_')[-1]],
tag_key.split('_')[1]
)
the_view_file_2 = './templates/tmpl_{0}/tpl_view_{1}.html'.format(
KIND_DICS['kind_' + tag_key.split('_')[-1]],
tag_key.split('_')[1][:2]
)
if os.path.exists(the_view_file_4):
the_view_sig_str = '_{0}'.format(tag_key.split('_')[1])
elif os.path.exists(the_view_file_2):
the_view_sig_str = '_{0}'.format(tag_key.split('_')[1][:2])
else:
the_view_sig_str = ''
return the_view_sig_str |
def configmap_install_id_plugin(scout, app, map_name=None, namespace="default"):
"""
Scout id_plugin that uses a Kubernetes configmap to store the install ID.
:param scout: Scout instance that's calling the plugin
:param app: Name of the application that's using Scout
:param map_name: Optional ConfigMap name to use; defaults to "scout.config.$app"
:param namespace: Optional Kubernetes namespace to use; defaults to "default"
This plugin assumes that the KUBERNETES_SERVICE_{HOST,PORT,PORT_HTTPS}
environment variables are set correctly, and it assumes the default Kubernetes
namespace unless the 'namespace' keyword argument is used to select a different
namespace.
If KUBERNETES_ACCESS_TOKEN is set in the environment, use that for the apiserver
access token -- otherwise, the plugin assumes that it's running in a Kubernetes
pod and tries to read its token from /var/run/secrets.
"""
plugin_response = None
if not map_name:
map_name = "scout.config.{0}".format(app)
kube_host = os.environ.get('KUBERNETES_SERVICE_HOST', None)
try:
kube_port = int(os.environ.get('KUBERNETES_SERVICE_PORT', 443))
except ValueError:
scout.logger.debug("Scout: KUBERNETES_SERVICE_PORT isn't numeric, defaulting to 443")
kube_port = 443
kube_proto = "https" if (kube_port == 443) else "http"
kube_token = os.environ.get('KUBERNETES_ACCESS_TOKEN', None)
if not kube_host:
# We're not running in Kubernetes. Fall back to the usual filesystem stuff.
scout.logger.debug("Scout: no KUBERNETES_SERVICE_HOST, not running in Kubernetes")
return None
if not kube_token:
try:
kube_token = open("/var/run/secrets/kubernetes.io/serviceaccount/token", "r").read()
except OSError:
pass
if not kube_token:
# We're not running in Kubernetes. Fall back to the usual filesystem stuff.
scout.logger.debug("Scout: not running in Kubernetes")
return None
# OK, we're in a cluster. Load our map.
base_url = "%s://%s:%s" % (kube_proto, kube_host, kube_port)
url_path = "api/v1/namespaces/%s/configmaps" % namespace
auth_headers = { "Authorization": "Bearer " + kube_token }
install_id = None
cm_url = "%s/%s" % (base_url, url_path)
fetch_url = "%s/%s" % (cm_url, map_name)
scout.logger.debug("Scout: trying %s" % fetch_url)
try:
r = requests.get(fetch_url, headers=auth_headers, verify=False)
if r.status_code == 200:
# OK, the map is present. What do we see?
map_data = r.json()
if "data" not in map_data:
# This is "impossible".
scout.logger.error("Scout: no map data in returned map???")
else:
map_data = map_data.get("data", {})
scout.logger.debug("Scout: configmap has map data %s" % json.dumps(map_data))
install_id = map_data.get("install_id", None)
if install_id:
scout.logger.debug("Scout: got install_id %s from map" % install_id)
plugin_response = { "install_id": install_id }
except OSError as e:
scout.logger.debug("Scout: could not read configmap (map %s, namespace %s): %s" %
(map_name, namespace, e))
if not install_id:
# No extant install_id. Try to create a new one.
install_id = str(uuid4())
cm = {
"apiVersion":"v1",
"kind":"ConfigMap",
"metadata":{
"name": map_name,
"namespace": namespace,
},
"data": {
"install_id": install_id
}
}
scout.logger.debug("Scout: saving new install_id %s" % install_id)
saved = False
try:
r = requests.post(cm_url, headers=auth_headers, verify=False, json=cm)
if r.status_code == 201:
saved = True
scout.logger.debug("Scout: saved install_id %s" % install_id)
plugin_response = {
"install_id": install_id,
"new_install": True
}
else:
scout.logger.error("Scout: could not save install_id: {0}, {1}".format(r.status_code, r.text))
except OSError as e:
logging.debug("Scout: could not write configmap (map %s, namespace %s): %s" %
(map_name, namespace, e))
scout.logger.debug("Scout: plugin_response %s" % json.dumps(plugin_response))
return plugin_response | Scout id_plugin that uses a Kubernetes configmap to store the install ID.
:param scout: Scout instance that's calling the plugin
:param app: Name of the application that's using Scout
:param map_name: Optional ConfigMap name to use; defaults to "scout.config.$app"
:param namespace: Optional Kubernetes namespace to use; defaults to "default"
This plugin assumes that the KUBERNETES_SERVICE_{HOST,PORT,PORT_HTTPS}
environment variables are set correctly, and it assumes the default Kubernetes
namespace unless the 'namespace' keyword argument is used to select a different
namespace.
If KUBERNETES_ACCESS_TOKEN is set in the environment, use that for the apiserver
access token -- otherwise, the plugin assumes that it's running in a Kubernetes
pod and tries to read its token from /var/run/secrets. | Below is the the instruction that describes the task:
### Input:
Scout id_plugin that uses a Kubernetes configmap to store the install ID.
:param scout: Scout instance that's calling the plugin
:param app: Name of the application that's using Scout
:param map_name: Optional ConfigMap name to use; defaults to "scout.config.$app"
:param namespace: Optional Kubernetes namespace to use; defaults to "default"
This plugin assumes that the KUBERNETES_SERVICE_{HOST,PORT,PORT_HTTPS}
environment variables are set correctly, and it assumes the default Kubernetes
namespace unless the 'namespace' keyword argument is used to select a different
namespace.
If KUBERNETES_ACCESS_TOKEN is set in the environment, use that for the apiserver
access token -- otherwise, the plugin assumes that it's running in a Kubernetes
pod and tries to read its token from /var/run/secrets.
### Response:
def configmap_install_id_plugin(scout, app, map_name=None, namespace="default"):
"""
Scout id_plugin that uses a Kubernetes configmap to store the install ID.
:param scout: Scout instance that's calling the plugin
:param app: Name of the application that's using Scout
:param map_name: Optional ConfigMap name to use; defaults to "scout.config.$app"
:param namespace: Optional Kubernetes namespace to use; defaults to "default"
This plugin assumes that the KUBERNETES_SERVICE_{HOST,PORT,PORT_HTTPS}
environment variables are set correctly, and it assumes the default Kubernetes
namespace unless the 'namespace' keyword argument is used to select a different
namespace.
If KUBERNETES_ACCESS_TOKEN is set in the environment, use that for the apiserver
access token -- otherwise, the plugin assumes that it's running in a Kubernetes
pod and tries to read its token from /var/run/secrets.
"""
plugin_response = None
if not map_name:
map_name = "scout.config.{0}".format(app)
kube_host = os.environ.get('KUBERNETES_SERVICE_HOST', None)
try:
kube_port = int(os.environ.get('KUBERNETES_SERVICE_PORT', 443))
except ValueError:
scout.logger.debug("Scout: KUBERNETES_SERVICE_PORT isn't numeric, defaulting to 443")
kube_port = 443
kube_proto = "https" if (kube_port == 443) else "http"
kube_token = os.environ.get('KUBERNETES_ACCESS_TOKEN', None)
if not kube_host:
# We're not running in Kubernetes. Fall back to the usual filesystem stuff.
scout.logger.debug("Scout: no KUBERNETES_SERVICE_HOST, not running in Kubernetes")
return None
if not kube_token:
try:
kube_token = open("/var/run/secrets/kubernetes.io/serviceaccount/token", "r").read()
except OSError:
pass
if not kube_token:
# We're not running in Kubernetes. Fall back to the usual filesystem stuff.
scout.logger.debug("Scout: not running in Kubernetes")
return None
# OK, we're in a cluster. Load our map.
base_url = "%s://%s:%s" % (kube_proto, kube_host, kube_port)
url_path = "api/v1/namespaces/%s/configmaps" % namespace
auth_headers = { "Authorization": "Bearer " + kube_token }
install_id = None
cm_url = "%s/%s" % (base_url, url_path)
fetch_url = "%s/%s" % (cm_url, map_name)
scout.logger.debug("Scout: trying %s" % fetch_url)
try:
r = requests.get(fetch_url, headers=auth_headers, verify=False)
if r.status_code == 200:
# OK, the map is present. What do we see?
map_data = r.json()
if "data" not in map_data:
# This is "impossible".
scout.logger.error("Scout: no map data in returned map???")
else:
map_data = map_data.get("data", {})
scout.logger.debug("Scout: configmap has map data %s" % json.dumps(map_data))
install_id = map_data.get("install_id", None)
if install_id:
scout.logger.debug("Scout: got install_id %s from map" % install_id)
plugin_response = { "install_id": install_id }
except OSError as e:
scout.logger.debug("Scout: could not read configmap (map %s, namespace %s): %s" %
(map_name, namespace, e))
if not install_id:
# No extant install_id. Try to create a new one.
install_id = str(uuid4())
cm = {
"apiVersion":"v1",
"kind":"ConfigMap",
"metadata":{
"name": map_name,
"namespace": namespace,
},
"data": {
"install_id": install_id
}
}
scout.logger.debug("Scout: saving new install_id %s" % install_id)
saved = False
try:
r = requests.post(cm_url, headers=auth_headers, verify=False, json=cm)
if r.status_code == 201:
saved = True
scout.logger.debug("Scout: saved install_id %s" % install_id)
plugin_response = {
"install_id": install_id,
"new_install": True
}
else:
scout.logger.error("Scout: could not save install_id: {0}, {1}".format(r.status_code, r.text))
except OSError as e:
logging.debug("Scout: could not write configmap (map %s, namespace %s): %s" %
(map_name, namespace, e))
scout.logger.debug("Scout: plugin_response %s" % json.dumps(plugin_response))
return plugin_response |
def core_periphery_dir(W, gamma=1, C0=None, seed=None):
'''
The optimal core/periphery subdivision is a partition of the network
into two nonoverlapping groups of nodes, a core group and a periphery
group. The number of core-group edges is maximized, and the number of
within periphery edges is minimized.
The core-ness is a statistic which quantifies the goodness of the
optimal core/periphery subdivision (with arbitrary relative value).
The algorithm uses a variation of the Kernighan-Lin graph partitioning
algorithm to optimize a core-structure objective described in
Borgatti & Everett (2000) Soc Networks 21:375-395
See Rubinov, Ypma et al. (2015) PNAS 112:10032-7
Parameters
----------
W : NxN np.ndarray
directed connection matrix
gamma : core-ness resolution parameter
Default value = 1
gamma > 1 detects small core, large periphery
0 < gamma < 1 detects large core, small periphery
C0 : NxN np.ndarray
Initial core structure
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
'''
rng = get_rng(seed)
n = len(W)
np.fill_diagonal(W, 0)
if C0 == None:
C = rng.randint(2, size=(n,))
else:
C = C0.copy()
#methodological note, the core-detection null model is not corrected
#for degree cf community detection (to enable detection of hubs)
s = np.sum(W)
p = np.mean(W)
b = W - gamma * p
B = (b + b.T) / (2 * s)
cix, = np.where(C)
ncix, = np.where(np.logical_not(C))
q = np.sum(B[np.ix_(cix, cix)]) - np.sum(B[np.ix_(ncix, ncix)])
#sqish
flag = True
it = 0
while flag:
it += 1
if it > 100:
raise BCTParamError('Infinite Loop aborted')
flag = False
#initial node indices
ixes = np.arange(n)
Ct = C.copy()
while len(ixes) > 0:
Qt = np.zeros((n,))
ctix, = np.where(Ct)
nctix, = np.where(np.logical_not(Ct))
q0 = (np.sum(B[np.ix_(ctix, ctix)]) -
np.sum(B[np.ix_(nctix, nctix)]))
Qt[ctix] = q0 - 2 * np.sum(B[ctix, :], axis=1)
Qt[nctix] = q0 + 2 * np.sum(B[nctix, :], axis=1)
max_Qt = np.max(Qt[ixes])
u, = np.where(np.abs(Qt[ixes]-max_Qt) < 1e-10)
#tunourn
u = u[rng.randint(len(u))]
Ct[ixes[u]] = np.logical_not(Ct[ixes[u]])
#casga
ixes = np.delete(ixes, u)
if max_Qt - q > 1e-10:
flag = True
C = Ct.copy()
cix, = np.where(C)
ncix, = np.where(np.logical_not(C))
q = (np.sum(B[np.ix_(cix, cix)]) -
np.sum(B[np.ix_(ncix, ncix)]))
cix, = np.where(C)
ncix, = np.where(np.logical_not(C))
q = np.sum(B[np.ix_(cix, cix)]) - np.sum(B[np.ix_(ncix, ncix)])
return C, q | The optimal core/periphery subdivision is a partition of the network
into two nonoverlapping groups of nodes, a core group and a periphery
group. The number of core-group edges is maximized, and the number of
within periphery edges is minimized.
The core-ness is a statistic which quantifies the goodness of the
optimal core/periphery subdivision (with arbitrary relative value).
The algorithm uses a variation of the Kernighan-Lin graph partitioning
algorithm to optimize a core-structure objective described in
Borgatti & Everett (2000) Soc Networks 21:375-395
See Rubinov, Ypma et al. (2015) PNAS 112:10032-7
Parameters
----------
W : NxN np.ndarray
directed connection matrix
gamma : core-ness resolution parameter
Default value = 1
gamma > 1 detects small core, large periphery
0 < gamma < 1 detects large core, small periphery
C0 : NxN np.ndarray
Initial core structure
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value. | Below is the the instruction that describes the task:
### Input:
The optimal core/periphery subdivision is a partition of the network
into two nonoverlapping groups of nodes, a core group and a periphery
group. The number of core-group edges is maximized, and the number of
within periphery edges is minimized.
The core-ness is a statistic which quantifies the goodness of the
optimal core/periphery subdivision (with arbitrary relative value).
The algorithm uses a variation of the Kernighan-Lin graph partitioning
algorithm to optimize a core-structure objective described in
Borgatti & Everett (2000) Soc Networks 21:375-395
See Rubinov, Ypma et al. (2015) PNAS 112:10032-7
Parameters
----------
W : NxN np.ndarray
directed connection matrix
gamma : core-ness resolution parameter
Default value = 1
gamma > 1 detects small core, large periphery
0 < gamma < 1 detects large core, small periphery
C0 : NxN np.ndarray
Initial core structure
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
### Response:
def core_periphery_dir(W, gamma=1, C0=None, seed=None):
'''
The optimal core/periphery subdivision is a partition of the network
into two nonoverlapping groups of nodes, a core group and a periphery
group. The number of core-group edges is maximized, and the number of
within periphery edges is minimized.
The core-ness is a statistic which quantifies the goodness of the
optimal core/periphery subdivision (with arbitrary relative value).
The algorithm uses a variation of the Kernighan-Lin graph partitioning
algorithm to optimize a core-structure objective described in
Borgatti & Everett (2000) Soc Networks 21:375-395
See Rubinov, Ypma et al. (2015) PNAS 112:10032-7
Parameters
----------
W : NxN np.ndarray
directed connection matrix
gamma : core-ness resolution parameter
Default value = 1
gamma > 1 detects small core, large periphery
0 < gamma < 1 detects large core, small periphery
C0 : NxN np.ndarray
Initial core structure
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
'''
rng = get_rng(seed)
n = len(W)
np.fill_diagonal(W, 0)
if C0 == None:
C = rng.randint(2, size=(n,))
else:
C = C0.copy()
#methodological note, the core-detection null model is not corrected
#for degree cf community detection (to enable detection of hubs)
s = np.sum(W)
p = np.mean(W)
b = W - gamma * p
B = (b + b.T) / (2 * s)
cix, = np.where(C)
ncix, = np.where(np.logical_not(C))
q = np.sum(B[np.ix_(cix, cix)]) - np.sum(B[np.ix_(ncix, ncix)])
#sqish
flag = True
it = 0
while flag:
it += 1
if it > 100:
raise BCTParamError('Infinite Loop aborted')
flag = False
#initial node indices
ixes = np.arange(n)
Ct = C.copy()
while len(ixes) > 0:
Qt = np.zeros((n,))
ctix, = np.where(Ct)
nctix, = np.where(np.logical_not(Ct))
q0 = (np.sum(B[np.ix_(ctix, ctix)]) -
np.sum(B[np.ix_(nctix, nctix)]))
Qt[ctix] = q0 - 2 * np.sum(B[ctix, :], axis=1)
Qt[nctix] = q0 + 2 * np.sum(B[nctix, :], axis=1)
max_Qt = np.max(Qt[ixes])
u, = np.where(np.abs(Qt[ixes]-max_Qt) < 1e-10)
#tunourn
u = u[rng.randint(len(u))]
Ct[ixes[u]] = np.logical_not(Ct[ixes[u]])
#casga
ixes = np.delete(ixes, u)
if max_Qt - q > 1e-10:
flag = True
C = Ct.copy()
cix, = np.where(C)
ncix, = np.where(np.logical_not(C))
q = (np.sum(B[np.ix_(cix, cix)]) -
np.sum(B[np.ix_(ncix, ncix)]))
cix, = np.where(C)
ncix, = np.where(np.logical_not(C))
q = np.sum(B[np.ix_(cix, cix)]) - np.sum(B[np.ix_(ncix, ncix)])
return C, q |
def _get_numeric_status(self, key):
"""Extract the numeric value from the statuses object."""
value = self._get_status(key)
if value and any(i.isdigit() for i in value):
return float(re.sub("[^0-9.]", "", value))
return None | Extract the numeric value from the statuses object. | Below is the the instruction that describes the task:
### Input:
Extract the numeric value from the statuses object.
### Response:
def _get_numeric_status(self, key):
"""Extract the numeric value from the statuses object."""
value = self._get_status(key)
if value and any(i.isdigit() for i in value):
return float(re.sub("[^0-9.]", "", value))
return None |
def _save_and_log_checkpoint(self, actor):
"""Save an actor checkpoint if necessary and log any errors.
Args:
actor: The actor to checkpoint.
Returns:
The result of the actor's user-defined `save_checkpoint` method.
"""
actor_id = self._worker.actor_id
checkpoint_info = self._worker.actor_checkpoint_info[actor_id]
checkpoint_info.num_tasks_since_last_checkpoint += 1
now = int(1000 * time.time())
checkpoint_context = ray.actor.CheckpointContext(
actor_id, checkpoint_info.num_tasks_since_last_checkpoint,
now - checkpoint_info.last_checkpoint_timestamp)
# If we should take a checkpoint, notify raylet to prepare a checkpoint
# and then call `save_checkpoint`.
if actor.should_checkpoint(checkpoint_context):
try:
now = int(1000 * time.time())
checkpoint_id = (self._worker.raylet_client.
prepare_actor_checkpoint(actor_id))
checkpoint_info.checkpoint_ids.append(checkpoint_id)
actor.save_checkpoint(actor_id, checkpoint_id)
if (len(checkpoint_info.checkpoint_ids) >
ray._config.num_actor_checkpoints_to_keep()):
actor.checkpoint_expired(
actor_id,
checkpoint_info.checkpoint_ids.pop(0),
)
checkpoint_info.num_tasks_since_last_checkpoint = 0
checkpoint_info.last_checkpoint_timestamp = now
except Exception:
# Checkpoint save or reload failed. Notify the driver.
traceback_str = ray.utils.format_error_message(
traceback.format_exc())
ray.utils.push_error_to_driver(
self._worker,
ray_constants.CHECKPOINT_PUSH_ERROR,
traceback_str,
driver_id=self._worker.task_driver_id) | Save an actor checkpoint if necessary and log any errors.
Args:
actor: The actor to checkpoint.
Returns:
The result of the actor's user-defined `save_checkpoint` method. | Below is the the instruction that describes the task:
### Input:
Save an actor checkpoint if necessary and log any errors.
Args:
actor: The actor to checkpoint.
Returns:
The result of the actor's user-defined `save_checkpoint` method.
### Response:
def _save_and_log_checkpoint(self, actor):
"""Save an actor checkpoint if necessary and log any errors.
Args:
actor: The actor to checkpoint.
Returns:
The result of the actor's user-defined `save_checkpoint` method.
"""
actor_id = self._worker.actor_id
checkpoint_info = self._worker.actor_checkpoint_info[actor_id]
checkpoint_info.num_tasks_since_last_checkpoint += 1
now = int(1000 * time.time())
checkpoint_context = ray.actor.CheckpointContext(
actor_id, checkpoint_info.num_tasks_since_last_checkpoint,
now - checkpoint_info.last_checkpoint_timestamp)
# If we should take a checkpoint, notify raylet to prepare a checkpoint
# and then call `save_checkpoint`.
if actor.should_checkpoint(checkpoint_context):
try:
now = int(1000 * time.time())
checkpoint_id = (self._worker.raylet_client.
prepare_actor_checkpoint(actor_id))
checkpoint_info.checkpoint_ids.append(checkpoint_id)
actor.save_checkpoint(actor_id, checkpoint_id)
if (len(checkpoint_info.checkpoint_ids) >
ray._config.num_actor_checkpoints_to_keep()):
actor.checkpoint_expired(
actor_id,
checkpoint_info.checkpoint_ids.pop(0),
)
checkpoint_info.num_tasks_since_last_checkpoint = 0
checkpoint_info.last_checkpoint_timestamp = now
except Exception:
# Checkpoint save or reload failed. Notify the driver.
traceback_str = ray.utils.format_error_message(
traceback.format_exc())
ray.utils.push_error_to_driver(
self._worker,
ray_constants.CHECKPOINT_PUSH_ERROR,
traceback_str,
driver_id=self._worker.task_driver_id) |
def obbTree(self):
"""obbTree is an object to generate oriented bounding box (OBB)
trees. An oriented bounding box is a bounding box that does not
necessarily line up along coordinate axes. The OBB tree is a
hierarchical tree structure of such boxes, where deeper levels of OBB
confine smaller regions of space.
"""
if not hasattr(self, '_obbTree'):
self._obbTree = vtk.vtkOBBTree()
self._obbTree.SetDataSet(self)
self._obbTree.BuildLocator()
return self._obbTree | obbTree is an object to generate oriented bounding box (OBB)
trees. An oriented bounding box is a bounding box that does not
necessarily line up along coordinate axes. The OBB tree is a
hierarchical tree structure of such boxes, where deeper levels of OBB
confine smaller regions of space. | Below is the the instruction that describes the task:
### Input:
obbTree is an object to generate oriented bounding box (OBB)
trees. An oriented bounding box is a bounding box that does not
necessarily line up along coordinate axes. The OBB tree is a
hierarchical tree structure of such boxes, where deeper levels of OBB
confine smaller regions of space.
### Response:
def obbTree(self):
"""obbTree is an object to generate oriented bounding box (OBB)
trees. An oriented bounding box is a bounding box that does not
necessarily line up along coordinate axes. The OBB tree is a
hierarchical tree structure of such boxes, where deeper levels of OBB
confine smaller regions of space.
"""
if not hasattr(self, '_obbTree'):
self._obbTree = vtk.vtkOBBTree()
self._obbTree.SetDataSet(self)
self._obbTree.BuildLocator()
return self._obbTree |
def read_nanopubs(fn: str) -> Iterable[Mapping[str, Any]]:
"""Read file and generate nanopubs
If filename has *.gz, will read as a gzip file
If filename has *.jsonl*, will parsed as a JSONLines file
IF filename has *.json*, will be parsed as a JSON file
If filename has *.yaml* or *.yml*, will be parsed as a YAML file
Args:
filename (str): filename to read nanopubs from
Returns:
Generator[Mapping[str, Any]]: generator of nanopubs in nanopub_bel JSON Schema format
"""
jsonl_flag, json_flag, yaml_flag = False, False, False
if fn == "-" or "jsonl" in fn:
jsonl_flag = True
elif "json" in fn:
json_flag = True
elif re.search("ya?ml", fn):
yaml_flag = True
else:
log.error("Do not recognize nanopub file format - neither json nor jsonl format.")
return {}
try:
if re.search("gz$", fn):
f = gzip.open(fn, "rt")
else:
try:
f = click.open_file(fn, mode="rt")
except Exception as e:
log.info(f"Can not open file {fn} Error: {e}")
quit()
if jsonl_flag:
for line in f:
yield json.loads(line)
elif json_flag:
nanopubs = json.load(f)
for nanopub in nanopubs:
yield nanopub
elif yaml_flag:
nanopubs = yaml.load(f, Loader=yaml.SafeLoader)
for nanopub in nanopubs:
yield nanopub
except Exception as e:
log.error(f"Could not open file: {fn}") | Read file and generate nanopubs
If filename has *.gz, will read as a gzip file
If filename has *.jsonl*, will parsed as a JSONLines file
IF filename has *.json*, will be parsed as a JSON file
If filename has *.yaml* or *.yml*, will be parsed as a YAML file
Args:
filename (str): filename to read nanopubs from
Returns:
Generator[Mapping[str, Any]]: generator of nanopubs in nanopub_bel JSON Schema format | Below is the the instruction that describes the task:
### Input:
Read file and generate nanopubs
If filename has *.gz, will read as a gzip file
If filename has *.jsonl*, will parsed as a JSONLines file
IF filename has *.json*, will be parsed as a JSON file
If filename has *.yaml* or *.yml*, will be parsed as a YAML file
Args:
filename (str): filename to read nanopubs from
Returns:
Generator[Mapping[str, Any]]: generator of nanopubs in nanopub_bel JSON Schema format
### Response:
def read_nanopubs(fn: str) -> Iterable[Mapping[str, Any]]:
"""Read file and generate nanopubs
If filename has *.gz, will read as a gzip file
If filename has *.jsonl*, will parsed as a JSONLines file
IF filename has *.json*, will be parsed as a JSON file
If filename has *.yaml* or *.yml*, will be parsed as a YAML file
Args:
filename (str): filename to read nanopubs from
Returns:
Generator[Mapping[str, Any]]: generator of nanopubs in nanopub_bel JSON Schema format
"""
jsonl_flag, json_flag, yaml_flag = False, False, False
if fn == "-" or "jsonl" in fn:
jsonl_flag = True
elif "json" in fn:
json_flag = True
elif re.search("ya?ml", fn):
yaml_flag = True
else:
log.error("Do not recognize nanopub file format - neither json nor jsonl format.")
return {}
try:
if re.search("gz$", fn):
f = gzip.open(fn, "rt")
else:
try:
f = click.open_file(fn, mode="rt")
except Exception as e:
log.info(f"Can not open file {fn} Error: {e}")
quit()
if jsonl_flag:
for line in f:
yield json.loads(line)
elif json_flag:
nanopubs = json.load(f)
for nanopub in nanopubs:
yield nanopub
elif yaml_flag:
nanopubs = yaml.load(f, Loader=yaml.SafeLoader)
for nanopub in nanopubs:
yield nanopub
except Exception as e:
log.error(f"Could not open file: {fn}") |
def numpy_binning(data, bins=10, range=None, *args, **kwargs) -> NumpyBinning:
"""Construct binning schema compatible with numpy.histogram
Parameters
----------
data: array_like, optional
This is optional if both bins and range are set
bins: int or array_like
range: Optional[tuple]
(min, max)
includes_right_edge: Optional[bool]
default: True
See Also
--------
numpy.histogram
"""
if isinstance(bins, int):
if range:
bins = np.linspace(range[0], range[1], bins + 1)
else:
start = data.min()
stop = data.max()
bins = np.linspace(start, stop, bins + 1)
elif np.iterable(bins):
bins = np.asarray(bins)
else:
# Some numpy edge case
_, bins = np.histogram(data, bins, **kwargs)
return NumpyBinning(bins) | Construct binning schema compatible with numpy.histogram
Parameters
----------
data: array_like, optional
This is optional if both bins and range are set
bins: int or array_like
range: Optional[tuple]
(min, max)
includes_right_edge: Optional[bool]
default: True
See Also
--------
numpy.histogram | Below is the the instruction that describes the task:
### Input:
Construct binning schema compatible with numpy.histogram
Parameters
----------
data: array_like, optional
This is optional if both bins and range are set
bins: int or array_like
range: Optional[tuple]
(min, max)
includes_right_edge: Optional[bool]
default: True
See Also
--------
numpy.histogram
### Response:
def numpy_binning(data, bins=10, range=None, *args, **kwargs) -> NumpyBinning:
"""Construct binning schema compatible with numpy.histogram
Parameters
----------
data: array_like, optional
This is optional if both bins and range are set
bins: int or array_like
range: Optional[tuple]
(min, max)
includes_right_edge: Optional[bool]
default: True
See Also
--------
numpy.histogram
"""
if isinstance(bins, int):
if range:
bins = np.linspace(range[0], range[1], bins + 1)
else:
start = data.min()
stop = data.max()
bins = np.linspace(start, stop, bins + 1)
elif np.iterable(bins):
bins = np.asarray(bins)
else:
# Some numpy edge case
_, bins = np.histogram(data, bins, **kwargs)
return NumpyBinning(bins) |
def _hsig_input(self, index):
'''
inputs for the hsig hash
'''
hsig_input = z.ZcashByteData()
hsig_input += self.tx_joinsplits[index].random_seed
hsig_input += self.tx_joinsplits[index].nullifiers
hsig_input += self.joinsplit_pubkey
return hsig_input.to_bytes() | inputs for the hsig hash | Below is the the instruction that describes the task:
### Input:
inputs for the hsig hash
### Response:
def _hsig_input(self, index):
'''
inputs for the hsig hash
'''
hsig_input = z.ZcashByteData()
hsig_input += self.tx_joinsplits[index].random_seed
hsig_input += self.tx_joinsplits[index].nullifiers
hsig_input += self.joinsplit_pubkey
return hsig_input.to_bytes() |
def cleanup_event_loop(self):
""" Cleanup an event loop and close it down forever. """
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None | Cleanup an event loop and close it down forever. | Below is the the instruction that describes the task:
### Input:
Cleanup an event loop and close it down forever.
### Response:
def cleanup_event_loop(self):
""" Cleanup an event loop and close it down forever. """
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None |
def option(self, name, description=None, action=None, resolve=None):
"""
Add or get option.
Here are some examples::
command.option('-v, --verbose', 'show more log')
command.option('--tag <tag>', 'tag of the package')
command.option('-s, --source <source>', 'the source repo')
:param name: arguments of the option
:param description: description of the option
:param action: a function to be invoked
:param resolve: a function to resolve the data
"""
if isinstance(name, Option):
option = name
else:
name = name.strip()
option = Option(
name=name, description=description,
action=action, resolve=resolve,
)
self._option_list.append(option)
return self | Add or get option.
Here are some examples::
command.option('-v, --verbose', 'show more log')
command.option('--tag <tag>', 'tag of the package')
command.option('-s, --source <source>', 'the source repo')
:param name: arguments of the option
:param description: description of the option
:param action: a function to be invoked
:param resolve: a function to resolve the data | Below is the the instruction that describes the task:
### Input:
Add or get option.
Here are some examples::
command.option('-v, --verbose', 'show more log')
command.option('--tag <tag>', 'tag of the package')
command.option('-s, --source <source>', 'the source repo')
:param name: arguments of the option
:param description: description of the option
:param action: a function to be invoked
:param resolve: a function to resolve the data
### Response:
def option(self, name, description=None, action=None, resolve=None):
"""
Add or get option.
Here are some examples::
command.option('-v, --verbose', 'show more log')
command.option('--tag <tag>', 'tag of the package')
command.option('-s, --source <source>', 'the source repo')
:param name: arguments of the option
:param description: description of the option
:param action: a function to be invoked
:param resolve: a function to resolve the data
"""
if isinstance(name, Option):
option = name
else:
name = name.strip()
option = Option(
name=name, description=description,
action=action, resolve=resolve,
)
self._option_list.append(option)
return self |
def get_current_url(request, ignore_params=None):
"""
Giving a django request, return the current http url, possibly ignoring some GET parameters
:param django.http.HttpRequest request: The current request object.
:param set ignore_params: An optional set of GET parameters to ignore
:return: The URL of the current page, possibly omitting some parameters from
``ignore_params`` in the querystring.
:rtype: unicode
"""
if ignore_params is None:
ignore_params = set()
protocol = u'https' if request.is_secure() else u"http"
service_url = u"%s://%s%s" % (protocol, request.get_host(), request.path)
if request.GET:
params = copy_params(request.GET, ignore_params)
if params:
service_url += u"?%s" % urlencode(params)
return service_url | Giving a django request, return the current http url, possibly ignoring some GET parameters
:param django.http.HttpRequest request: The current request object.
:param set ignore_params: An optional set of GET parameters to ignore
:return: The URL of the current page, possibly omitting some parameters from
``ignore_params`` in the querystring.
:rtype: unicode | Below is the the instruction that describes the task:
### Input:
Giving a django request, return the current http url, possibly ignoring some GET parameters
:param django.http.HttpRequest request: The current request object.
:param set ignore_params: An optional set of GET parameters to ignore
:return: The URL of the current page, possibly omitting some parameters from
``ignore_params`` in the querystring.
:rtype: unicode
### Response:
def get_current_url(request, ignore_params=None):
"""
Giving a django request, return the current http url, possibly ignoring some GET parameters
:param django.http.HttpRequest request: The current request object.
:param set ignore_params: An optional set of GET parameters to ignore
:return: The URL of the current page, possibly omitting some parameters from
``ignore_params`` in the querystring.
:rtype: unicode
"""
if ignore_params is None:
ignore_params = set()
protocol = u'https' if request.is_secure() else u"http"
service_url = u"%s://%s%s" % (protocol, request.get_host(), request.path)
if request.GET:
params = copy_params(request.GET, ignore_params)
if params:
service_url += u"?%s" % urlencode(params)
return service_url |
def stage_http_response1(self, conn_id, version, status, reason, headers):
"""Set response http info including headers, status, etc.
conn_id unused here. Used in log"""
# pylint: disable=attribute-defined-outside-init
self._http_response_version = version
self._http_response_status = status
self._http_response_reason = reason
self._http_response_headers = headers | Set response http info including headers, status, etc.
conn_id unused here. Used in log | Below is the the instruction that describes the task:
### Input:
Set response http info including headers, status, etc.
conn_id unused here. Used in log
### Response:
def stage_http_response1(self, conn_id, version, status, reason, headers):
"""Set response http info including headers, status, etc.
conn_id unused here. Used in log"""
# pylint: disable=attribute-defined-outside-init
self._http_response_version = version
self._http_response_status = status
self._http_response_reason = reason
self._http_response_headers = headers |
def corr_dw_v1(self):
"""Adjust the water stage drop to the highest value allowed and correct
the associated fluxes.
Note that method |corr_dw_v1| calls the method `interp_v` of the
respective application model. Hence the requirements of the actual
`interp_v` need to be considered additionally.
Required control parameter:
|MaxDW|
Required derived parameters:
|llake_derived.TOY|
|Seconds|
Required flux sequence:
|QZ|
Updated flux sequence:
|llake_fluxes.QA|
Updated state sequences:
|llake_states.W|
|llake_states.V|
Basic Restriction:
:math:`W_{old} - W_{new} \\leq MaxDW`
Examples:
In preparation for the following examples, define a short simulation
time period with a simulation step size of 12 hours and initialize
the required model object:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01', '2000.01.04', '12h'
>>> from hydpy.models.llake import *
>>> parameterstep('1d')
>>> derived.toy.update()
>>> derived.seconds.update()
Select the first half of the second day of January as the simulation
step relevant for the following examples:
>>> model.idx_sim = pub.timegrids.init['2000.01.02']
The following tests are based on method |interp_v_v1| for the
interpolation of the stored water volume based on the corrected
water stage:
>>> model.interp_v = model.interp_v_v1
For the sake of simplicity, the underlying `w`-`v` relationship is
assumed to be linear:
>>> n(2.)
>>> w(0., 1.)
>>> v(0., 1e6)
The maximum drop in water stage for the first half of the second
day of January is set to 0.4 m/d. Note that, due to the difference
between the parameter step size and the simulation step size, the
actual value used for calculation is 0.2 m/12h:
>>> maxdw(_1_1_18=.1,
... _1_2_6=.4,
... _1_2_18=.1)
>>> maxdw
maxdw(toy_1_1_18_0_0=0.1,
toy_1_2_6_0_0=0.4,
toy_1_2_18_0_0=0.1)
>>> from hydpy import round_
>>> round_(maxdw.value[2])
0.2
Define old and new water stages and volumes in agreement with the
given linear relationship:
>>> states.w.old = 1.
>>> states.v.old = 1e6
>>> states.w.new = .9
>>> states.v.new = 9e5
Also define an inflow and an outflow value. Note the that the latter
is set to zero, which is inconsistent with the actual water stage drop
defined above, but done for didactic reasons:
>>> fluxes.qz = 1.
>>> fluxes.qa = 0.
Calling the |corr_dw_v1| method does not change the values of
either of following sequences, as the actual drop (0.1 m/12h) is
smaller than the allowed drop (0.2 m/12h):
>>> model.corr_dw_v1()
>>> states.w
w(0.9)
>>> states.v
v(900000.0)
>>> fluxes.qa
qa(0.0)
Note that the values given above are not recalculated, which can
clearly be seen for the lake outflow, which is still zero.
Through setting the new value of the water stage to 0.6 m, the actual
drop (0.4 m/12h) exceeds the allowed drop (0.2 m/12h). Hence the
water stage is trimmed and the other values are recalculated:
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.8)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963)
Through setting the maximum water stage drop to zero, method
|corr_dw_v1| is effectively disabled. Regardless of the actual
change in water stage, no trimming or recalculating is performed:
>>> maxdw.toy_01_02_06 = 0.
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.6)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
old = self.sequences.states.fastaccess_old
new = self.sequences.states.fastaccess_new
idx = der.toy[self.idx_sim]
if (con.maxdw[idx] > 0.) and ((old.w-new.w) > con.maxdw[idx]):
new.w = old.w-con.maxdw[idx]
self.interp_v()
flu.qa = flu.qz+(old.v-new.v)/der.seconds | Adjust the water stage drop to the highest value allowed and correct
the associated fluxes.
Note that method |corr_dw_v1| calls the method `interp_v` of the
respective application model. Hence the requirements of the actual
`interp_v` need to be considered additionally.
Required control parameter:
|MaxDW|
Required derived parameters:
|llake_derived.TOY|
|Seconds|
Required flux sequence:
|QZ|
Updated flux sequence:
|llake_fluxes.QA|
Updated state sequences:
|llake_states.W|
|llake_states.V|
Basic Restriction:
:math:`W_{old} - W_{new} \\leq MaxDW`
Examples:
In preparation for the following examples, define a short simulation
time period with a simulation step size of 12 hours and initialize
the required model object:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01', '2000.01.04', '12h'
>>> from hydpy.models.llake import *
>>> parameterstep('1d')
>>> derived.toy.update()
>>> derived.seconds.update()
Select the first half of the second day of January as the simulation
step relevant for the following examples:
>>> model.idx_sim = pub.timegrids.init['2000.01.02']
The following tests are based on method |interp_v_v1| for the
interpolation of the stored water volume based on the corrected
water stage:
>>> model.interp_v = model.interp_v_v1
For the sake of simplicity, the underlying `w`-`v` relationship is
assumed to be linear:
>>> n(2.)
>>> w(0., 1.)
>>> v(0., 1e6)
The maximum drop in water stage for the first half of the second
day of January is set to 0.4 m/d. Note that, due to the difference
between the parameter step size and the simulation step size, the
actual value used for calculation is 0.2 m/12h:
>>> maxdw(_1_1_18=.1,
... _1_2_6=.4,
... _1_2_18=.1)
>>> maxdw
maxdw(toy_1_1_18_0_0=0.1,
toy_1_2_6_0_0=0.4,
toy_1_2_18_0_0=0.1)
>>> from hydpy import round_
>>> round_(maxdw.value[2])
0.2
Define old and new water stages and volumes in agreement with the
given linear relationship:
>>> states.w.old = 1.
>>> states.v.old = 1e6
>>> states.w.new = .9
>>> states.v.new = 9e5
Also define an inflow and an outflow value. Note the that the latter
is set to zero, which is inconsistent with the actual water stage drop
defined above, but done for didactic reasons:
>>> fluxes.qz = 1.
>>> fluxes.qa = 0.
Calling the |corr_dw_v1| method does not change the values of
either of following sequences, as the actual drop (0.1 m/12h) is
smaller than the allowed drop (0.2 m/12h):
>>> model.corr_dw_v1()
>>> states.w
w(0.9)
>>> states.v
v(900000.0)
>>> fluxes.qa
qa(0.0)
Note that the values given above are not recalculated, which can
clearly be seen for the lake outflow, which is still zero.
Through setting the new value of the water stage to 0.6 m, the actual
drop (0.4 m/12h) exceeds the allowed drop (0.2 m/12h). Hence the
water stage is trimmed and the other values are recalculated:
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.8)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963)
Through setting the maximum water stage drop to zero, method
|corr_dw_v1| is effectively disabled. Regardless of the actual
change in water stage, no trimming or recalculating is performed:
>>> maxdw.toy_01_02_06 = 0.
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.6)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963) | Below is the the instruction that describes the task:
### Input:
Adjust the water stage drop to the highest value allowed and correct
the associated fluxes.
Note that method |corr_dw_v1| calls the method `interp_v` of the
respective application model. Hence the requirements of the actual
`interp_v` need to be considered additionally.
Required control parameter:
|MaxDW|
Required derived parameters:
|llake_derived.TOY|
|Seconds|
Required flux sequence:
|QZ|
Updated flux sequence:
|llake_fluxes.QA|
Updated state sequences:
|llake_states.W|
|llake_states.V|
Basic Restriction:
:math:`W_{old} - W_{new} \\leq MaxDW`
Examples:
In preparation for the following examples, define a short simulation
time period with a simulation step size of 12 hours and initialize
the required model object:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01', '2000.01.04', '12h'
>>> from hydpy.models.llake import *
>>> parameterstep('1d')
>>> derived.toy.update()
>>> derived.seconds.update()
Select the first half of the second day of January as the simulation
step relevant for the following examples:
>>> model.idx_sim = pub.timegrids.init['2000.01.02']
The following tests are based on method |interp_v_v1| for the
interpolation of the stored water volume based on the corrected
water stage:
>>> model.interp_v = model.interp_v_v1
For the sake of simplicity, the underlying `w`-`v` relationship is
assumed to be linear:
>>> n(2.)
>>> w(0., 1.)
>>> v(0., 1e6)
The maximum drop in water stage for the first half of the second
day of January is set to 0.4 m/d. Note that, due to the difference
between the parameter step size and the simulation step size, the
actual value used for calculation is 0.2 m/12h:
>>> maxdw(_1_1_18=.1,
... _1_2_6=.4,
... _1_2_18=.1)
>>> maxdw
maxdw(toy_1_1_18_0_0=0.1,
toy_1_2_6_0_0=0.4,
toy_1_2_18_0_0=0.1)
>>> from hydpy import round_
>>> round_(maxdw.value[2])
0.2
Define old and new water stages and volumes in agreement with the
given linear relationship:
>>> states.w.old = 1.
>>> states.v.old = 1e6
>>> states.w.new = .9
>>> states.v.new = 9e5
Also define an inflow and an outflow value. Note the that the latter
is set to zero, which is inconsistent with the actual water stage drop
defined above, but done for didactic reasons:
>>> fluxes.qz = 1.
>>> fluxes.qa = 0.
Calling the |corr_dw_v1| method does not change the values of
either of following sequences, as the actual drop (0.1 m/12h) is
smaller than the allowed drop (0.2 m/12h):
>>> model.corr_dw_v1()
>>> states.w
w(0.9)
>>> states.v
v(900000.0)
>>> fluxes.qa
qa(0.0)
Note that the values given above are not recalculated, which can
clearly be seen for the lake outflow, which is still zero.
Through setting the new value of the water stage to 0.6 m, the actual
drop (0.4 m/12h) exceeds the allowed drop (0.2 m/12h). Hence the
water stage is trimmed and the other values are recalculated:
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.8)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963)
Through setting the maximum water stage drop to zero, method
|corr_dw_v1| is effectively disabled. Regardless of the actual
change in water stage, no trimming or recalculating is performed:
>>> maxdw.toy_01_02_06 = 0.
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.6)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963)
### Response:
def corr_dw_v1(self):
"""Adjust the water stage drop to the highest value allowed and correct
the associated fluxes.
Note that method |corr_dw_v1| calls the method `interp_v` of the
respective application model. Hence the requirements of the actual
`interp_v` need to be considered additionally.
Required control parameter:
|MaxDW|
Required derived parameters:
|llake_derived.TOY|
|Seconds|
Required flux sequence:
|QZ|
Updated flux sequence:
|llake_fluxes.QA|
Updated state sequences:
|llake_states.W|
|llake_states.V|
Basic Restriction:
:math:`W_{old} - W_{new} \\leq MaxDW`
Examples:
In preparation for the following examples, define a short simulation
time period with a simulation step size of 12 hours and initialize
the required model object:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01', '2000.01.04', '12h'
>>> from hydpy.models.llake import *
>>> parameterstep('1d')
>>> derived.toy.update()
>>> derived.seconds.update()
Select the first half of the second day of January as the simulation
step relevant for the following examples:
>>> model.idx_sim = pub.timegrids.init['2000.01.02']
The following tests are based on method |interp_v_v1| for the
interpolation of the stored water volume based on the corrected
water stage:
>>> model.interp_v = model.interp_v_v1
For the sake of simplicity, the underlying `w`-`v` relationship is
assumed to be linear:
>>> n(2.)
>>> w(0., 1.)
>>> v(0., 1e6)
The maximum drop in water stage for the first half of the second
day of January is set to 0.4 m/d. Note that, due to the difference
between the parameter step size and the simulation step size, the
actual value used for calculation is 0.2 m/12h:
>>> maxdw(_1_1_18=.1,
... _1_2_6=.4,
... _1_2_18=.1)
>>> maxdw
maxdw(toy_1_1_18_0_0=0.1,
toy_1_2_6_0_0=0.4,
toy_1_2_18_0_0=0.1)
>>> from hydpy import round_
>>> round_(maxdw.value[2])
0.2
Define old and new water stages and volumes in agreement with the
given linear relationship:
>>> states.w.old = 1.
>>> states.v.old = 1e6
>>> states.w.new = .9
>>> states.v.new = 9e5
Also define an inflow and an outflow value. Note the that the latter
is set to zero, which is inconsistent with the actual water stage drop
defined above, but done for didactic reasons:
>>> fluxes.qz = 1.
>>> fluxes.qa = 0.
Calling the |corr_dw_v1| method does not change the values of
either of following sequences, as the actual drop (0.1 m/12h) is
smaller than the allowed drop (0.2 m/12h):
>>> model.corr_dw_v1()
>>> states.w
w(0.9)
>>> states.v
v(900000.0)
>>> fluxes.qa
qa(0.0)
Note that the values given above are not recalculated, which can
clearly be seen for the lake outflow, which is still zero.
Through setting the new value of the water stage to 0.6 m, the actual
drop (0.4 m/12h) exceeds the allowed drop (0.2 m/12h). Hence the
water stage is trimmed and the other values are recalculated:
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.8)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963)
Through setting the maximum water stage drop to zero, method
|corr_dw_v1| is effectively disabled. Regardless of the actual
change in water stage, no trimming or recalculating is performed:
>>> maxdw.toy_01_02_06 = 0.
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.6)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
old = self.sequences.states.fastaccess_old
new = self.sequences.states.fastaccess_new
idx = der.toy[self.idx_sim]
if (con.maxdw[idx] > 0.) and ((old.w-new.w) > con.maxdw[idx]):
new.w = old.w-con.maxdw[idx]
self.interp_v()
flu.qa = flu.qz+(old.v-new.v)/der.seconds |
def evaluate(dataset, predictions, output_folder, **kwargs):
"""evaluate dataset using different methods based on dataset type.
Args:
dataset: Dataset object
predictions(list[BoxList]): each item in the list represents the
prediction results for one image.
output_folder: output folder, to save evaluation files or results.
**kwargs: other args.
Returns:
evaluation result
"""
args = dict(
dataset=dataset, predictions=predictions, output_folder=output_folder, **kwargs
)
if isinstance(dataset, datasets.COCODataset):
return coco_evaluation(**args)
elif isinstance(dataset, datasets.PascalVOCDataset):
return voc_evaluation(**args)
else:
dataset_name = dataset.__class__.__name__
raise NotImplementedError("Unsupported dataset type {}.".format(dataset_name)) | evaluate dataset using different methods based on dataset type.
Args:
dataset: Dataset object
predictions(list[BoxList]): each item in the list represents the
prediction results for one image.
output_folder: output folder, to save evaluation files or results.
**kwargs: other args.
Returns:
evaluation result | Below is the the instruction that describes the task:
### Input:
evaluate dataset using different methods based on dataset type.
Args:
dataset: Dataset object
predictions(list[BoxList]): each item in the list represents the
prediction results for one image.
output_folder: output folder, to save evaluation files or results.
**kwargs: other args.
Returns:
evaluation result
### Response:
def evaluate(dataset, predictions, output_folder, **kwargs):
"""evaluate dataset using different methods based on dataset type.
Args:
dataset: Dataset object
predictions(list[BoxList]): each item in the list represents the
prediction results for one image.
output_folder: output folder, to save evaluation files or results.
**kwargs: other args.
Returns:
evaluation result
"""
args = dict(
dataset=dataset, predictions=predictions, output_folder=output_folder, **kwargs
)
if isinstance(dataset, datasets.COCODataset):
return coco_evaluation(**args)
elif isinstance(dataset, datasets.PascalVOCDataset):
return voc_evaluation(**args)
else:
dataset_name = dataset.__class__.__name__
raise NotImplementedError("Unsupported dataset type {}.".format(dataset_name)) |
def create_job(JobType=None, Resources=None, Description=None, AddressId=None, KmsKeyARN=None, RoleARN=None, SnowballCapacityPreference=None, ShippingOption=None, Notification=None, ClusterId=None, SnowballType=None, ForwardingAddressId=None):
"""
Creates a job to import or export data between Amazon S3 and your on-premises data center. Your AWS account must have the right trust policies and permissions in place to create a job for Snowball. If you're creating a job for a node in a cluster, you only need to provide the clusterId value; the other job attributes are inherited from the cluster.
See also: AWS API Documentation
Examples
Creates a job to import or export data between Amazon S3 and your on-premises data center. Your AWS account must have the right trust policies and permissions in place to create a job for Snowball. If you're creating a job for a node in a cluster, you only need to provide the clusterId value; the other job attributes are inherited from the cluster.
Expected Output:
:example: response = client.create_job(
JobType='IMPORT'|'EXPORT'|'LOCAL_USE',
Resources={
'S3Resources': [
{
'BucketArn': 'string',
'KeyRange': {
'BeginMarker': 'string',
'EndMarker': 'string'
}
},
],
'LambdaResources': [
{
'LambdaArn': 'string',
'EventTriggers': [
{
'EventResourceARN': 'string'
},
]
},
]
},
Description='string',
AddressId='string',
KmsKeyARN='string',
RoleARN='string',
SnowballCapacityPreference='T50'|'T80'|'T100'|'NoPreference',
ShippingOption='SECOND_DAY'|'NEXT_DAY'|'EXPRESS'|'STANDARD',
Notification={
'SnsTopicARN': 'string',
'JobStatesToNotify': [
'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending',
],
'NotifyAll': True|False
},
ClusterId='string',
SnowballType='STANDARD'|'EDGE',
ForwardingAddressId='string'
)
:type JobType: string
:param JobType: Defines the type of job that you're creating.
:type Resources: dict
:param Resources: Defines the Amazon S3 buckets associated with this job.
With IMPORT jobs, you specify the bucket or buckets that your transferred data will be imported into.
With EXPORT jobs, you specify the bucket or buckets that your transferred data will be exported from. Optionally, you can also specify a KeyRange value. If you choose to export a range, you define the length of the range by providing either an inclusive BeginMarker value, an inclusive EndMarker value, or both. Ranges are UTF-8 binary sorted.
S3Resources (list) --An array of S3Resource objects.
(dict) --Each S3Resource object represents an Amazon S3 bucket that your transferred data will be exported from or imported into. For export jobs, this object can have an optional KeyRange value. The length of the range is defined at job creation, and has either an inclusive BeginMarker , an inclusive EndMarker , or both. Ranges are UTF-8 binary sorted.
BucketArn (string) --The Amazon Resource Name (ARN) of an Amazon S3 bucket.
KeyRange (dict) --For export jobs, you can provide an optional KeyRange within a specific Amazon S3 bucket. The length of the range is defined at job creation, and has either an inclusive BeginMarker , an inclusive EndMarker , or both. Ranges are UTF-8 binary sorted.
BeginMarker (string) --The key that starts an optional key range for an export job. Ranges are inclusive and UTF-8 binary sorted.
EndMarker (string) --The key that ends an optional key range for an export job. Ranges are inclusive and UTF-8 binary sorted.
LambdaResources (list) --The Python-language Lambda functions for this job.
(dict) --Identifies
LambdaArn (string) --An Amazon Resource Name (ARN) that represents an AWS Lambda function to be triggered by PUT object actions on the associated local Amazon S3 resource.
EventTriggers (list) --The array of ARNs for S3Resource objects to trigger the LambdaResource objects associated with this job.
(dict) --The container for the EventTriggerDefinition$EventResourceARN .
EventResourceARN (string) --The Amazon Resource Name (ARN) for any local Amazon S3 resource that is an AWS Lambda function's event trigger associated with this job.
:type Description: string
:param Description: Defines an optional description of this specific job, for example Important Photos 2016-08-11 .
:type AddressId: string
:param AddressId: The ID for the address that you want the Snowball shipped to.
:type KmsKeyARN: string
:param KmsKeyARN: The KmsKeyARN that you want to associate with this job. KmsKeyARN s are created using the CreateKey AWS Key Management Service (KMS) API action.
:type RoleARN: string
:param RoleARN: The RoleARN that you want to associate with this job. RoleArn s are created using the CreateRole AWS Identity and Access Management (IAM) API action.
:type SnowballCapacityPreference: string
:param SnowballCapacityPreference: If your job is being created in one of the US regions, you have the option of specifying what size Snowball you'd like for this job. In all other regions, Snowballs come with 80 TB in storage capacity.
:type ShippingOption: string
:param ShippingOption: The shipping speed for this job. This speed doesn't dictate how soon you'll get the Snowball, rather it represents how quickly the Snowball moves to its destination while in transit. Regional shipping speeds are as follows:
In Australia, you have access to express shipping. Typically, Snowballs shipped express are delivered in about a day.
In the European Union (EU), you have access to express shipping. Typically, Snowballs shipped express are delivered in about a day. In addition, most countries in the EU have access to standard shipping, which typically takes less than a week, one way.
In India, Snowballs are delivered in one to seven days.
In the US, you have access to one-day shipping and two-day shipping.
:type Notification: dict
:param Notification: Defines the Amazon Simple Notification Service (Amazon SNS) notification settings for this job.
SnsTopicARN (string) --The new SNS TopicArn that you want to associate with this job. You can create Amazon Resource Names (ARNs) for topics by using the CreateTopic Amazon SNS API action.
You can subscribe email addresses to an Amazon SNS topic through the AWS Management Console, or by using the Subscribe AWS Simple Notification Service (SNS) API action.
JobStatesToNotify (list) --The list of job states that will trigger a notification for this job.
(string) --
NotifyAll (boolean) --Any change in job state will trigger a notification for this job.
:type ClusterId: string
:param ClusterId: The ID of a cluster. If you're creating a job for a node in a cluster, you need to provide only this clusterId value. The other job attributes are inherited from the cluster.
:type SnowballType: string
:param SnowballType: The type of AWS Snowball appliance to use for this job. Currently, the only supported appliance type for cluster jobs is EDGE .
:type ForwardingAddressId: string
:param ForwardingAddressId: The forwarding address ID for a job. This field is not supported in most regions.
:rtype: dict
:return: {
'JobId': 'string'
}
"""
pass | Creates a job to import or export data between Amazon S3 and your on-premises data center. Your AWS account must have the right trust policies and permissions in place to create a job for Snowball. If you're creating a job for a node in a cluster, you only need to provide the clusterId value; the other job attributes are inherited from the cluster.
See also: AWS API Documentation
Examples
Creates a job to import or export data between Amazon S3 and your on-premises data center. Your AWS account must have the right trust policies and permissions in place to create a job for Snowball. If you're creating a job for a node in a cluster, you only need to provide the clusterId value; the other job attributes are inherited from the cluster.
Expected Output:
:example: response = client.create_job(
JobType='IMPORT'|'EXPORT'|'LOCAL_USE',
Resources={
'S3Resources': [
{
'BucketArn': 'string',
'KeyRange': {
'BeginMarker': 'string',
'EndMarker': 'string'
}
},
],
'LambdaResources': [
{
'LambdaArn': 'string',
'EventTriggers': [
{
'EventResourceARN': 'string'
},
]
},
]
},
Description='string',
AddressId='string',
KmsKeyARN='string',
RoleARN='string',
SnowballCapacityPreference='T50'|'T80'|'T100'|'NoPreference',
ShippingOption='SECOND_DAY'|'NEXT_DAY'|'EXPRESS'|'STANDARD',
Notification={
'SnsTopicARN': 'string',
'JobStatesToNotify': [
'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending',
],
'NotifyAll': True|False
},
ClusterId='string',
SnowballType='STANDARD'|'EDGE',
ForwardingAddressId='string'
)
:type JobType: string
:param JobType: Defines the type of job that you're creating.
:type Resources: dict
:param Resources: Defines the Amazon S3 buckets associated with this job.
With IMPORT jobs, you specify the bucket or buckets that your transferred data will be imported into.
With EXPORT jobs, you specify the bucket or buckets that your transferred data will be exported from. Optionally, you can also specify a KeyRange value. If you choose to export a range, you define the length of the range by providing either an inclusive BeginMarker value, an inclusive EndMarker value, or both. Ranges are UTF-8 binary sorted.
S3Resources (list) --An array of S3Resource objects.
(dict) --Each S3Resource object represents an Amazon S3 bucket that your transferred data will be exported from or imported into. For export jobs, this object can have an optional KeyRange value. The length of the range is defined at job creation, and has either an inclusive BeginMarker , an inclusive EndMarker , or both. Ranges are UTF-8 binary sorted.
BucketArn (string) --The Amazon Resource Name (ARN) of an Amazon S3 bucket.
KeyRange (dict) --For export jobs, you can provide an optional KeyRange within a specific Amazon S3 bucket. The length of the range is defined at job creation, and has either an inclusive BeginMarker , an inclusive EndMarker , or both. Ranges are UTF-8 binary sorted.
BeginMarker (string) --The key that starts an optional key range for an export job. Ranges are inclusive and UTF-8 binary sorted.
EndMarker (string) --The key that ends an optional key range for an export job. Ranges are inclusive and UTF-8 binary sorted.
LambdaResources (list) --The Python-language Lambda functions for this job.
(dict) --Identifies
LambdaArn (string) --An Amazon Resource Name (ARN) that represents an AWS Lambda function to be triggered by PUT object actions on the associated local Amazon S3 resource.
EventTriggers (list) --The array of ARNs for S3Resource objects to trigger the LambdaResource objects associated with this job.
(dict) --The container for the EventTriggerDefinition$EventResourceARN .
EventResourceARN (string) --The Amazon Resource Name (ARN) for any local Amazon S3 resource that is an AWS Lambda function's event trigger associated with this job.
:type Description: string
:param Description: Defines an optional description of this specific job, for example Important Photos 2016-08-11 .
:type AddressId: string
:param AddressId: The ID for the address that you want the Snowball shipped to.
:type KmsKeyARN: string
:param KmsKeyARN: The KmsKeyARN that you want to associate with this job. KmsKeyARN s are created using the CreateKey AWS Key Management Service (KMS) API action.
:type RoleARN: string
:param RoleARN: The RoleARN that you want to associate with this job. RoleArn s are created using the CreateRole AWS Identity and Access Management (IAM) API action.
:type SnowballCapacityPreference: string
:param SnowballCapacityPreference: If your job is being created in one of the US regions, you have the option of specifying what size Snowball you'd like for this job. In all other regions, Snowballs come with 80 TB in storage capacity.
:type ShippingOption: string
:param ShippingOption: The shipping speed for this job. This speed doesn't dictate how soon you'll get the Snowball, rather it represents how quickly the Snowball moves to its destination while in transit. Regional shipping speeds are as follows:
In Australia, you have access to express shipping. Typically, Snowballs shipped express are delivered in about a day.
In the European Union (EU), you have access to express shipping. Typically, Snowballs shipped express are delivered in about a day. In addition, most countries in the EU have access to standard shipping, which typically takes less than a week, one way.
In India, Snowballs are delivered in one to seven days.
In the US, you have access to one-day shipping and two-day shipping.
:type Notification: dict
:param Notification: Defines the Amazon Simple Notification Service (Amazon SNS) notification settings for this job.
SnsTopicARN (string) --The new SNS TopicArn that you want to associate with this job. You can create Amazon Resource Names (ARNs) for topics by using the CreateTopic Amazon SNS API action.
You can subscribe email addresses to an Amazon SNS topic through the AWS Management Console, or by using the Subscribe AWS Simple Notification Service (SNS) API action.
JobStatesToNotify (list) --The list of job states that will trigger a notification for this job.
(string) --
NotifyAll (boolean) --Any change in job state will trigger a notification for this job.
:type ClusterId: string
:param ClusterId: The ID of a cluster. If you're creating a job for a node in a cluster, you need to provide only this clusterId value. The other job attributes are inherited from the cluster.
:type SnowballType: string
:param SnowballType: The type of AWS Snowball appliance to use for this job. Currently, the only supported appliance type for cluster jobs is EDGE .
:type ForwardingAddressId: string
:param ForwardingAddressId: The forwarding address ID for a job. This field is not supported in most regions.
:rtype: dict
:return: {
'JobId': 'string'
} | Below is the the instruction that describes the task:
### Input:
Creates a job to import or export data between Amazon S3 and your on-premises data center. Your AWS account must have the right trust policies and permissions in place to create a job for Snowball. If you're creating a job for a node in a cluster, you only need to provide the clusterId value; the other job attributes are inherited from the cluster.
See also: AWS API Documentation
Examples
Creates a job to import or export data between Amazon S3 and your on-premises data center. Your AWS account must have the right trust policies and permissions in place to create a job for Snowball. If you're creating a job for a node in a cluster, you only need to provide the clusterId value; the other job attributes are inherited from the cluster.
Expected Output:
:example: response = client.create_job(
JobType='IMPORT'|'EXPORT'|'LOCAL_USE',
Resources={
'S3Resources': [
{
'BucketArn': 'string',
'KeyRange': {
'BeginMarker': 'string',
'EndMarker': 'string'
}
},
],
'LambdaResources': [
{
'LambdaArn': 'string',
'EventTriggers': [
{
'EventResourceARN': 'string'
},
]
},
]
},
Description='string',
AddressId='string',
KmsKeyARN='string',
RoleARN='string',
SnowballCapacityPreference='T50'|'T80'|'T100'|'NoPreference',
ShippingOption='SECOND_DAY'|'NEXT_DAY'|'EXPRESS'|'STANDARD',
Notification={
'SnsTopicARN': 'string',
'JobStatesToNotify': [
'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending',
],
'NotifyAll': True|False
},
ClusterId='string',
SnowballType='STANDARD'|'EDGE',
ForwardingAddressId='string'
)
:type JobType: string
:param JobType: Defines the type of job that you're creating.
:type Resources: dict
:param Resources: Defines the Amazon S3 buckets associated with this job.
With IMPORT jobs, you specify the bucket or buckets that your transferred data will be imported into.
With EXPORT jobs, you specify the bucket or buckets that your transferred data will be exported from. Optionally, you can also specify a KeyRange value. If you choose to export a range, you define the length of the range by providing either an inclusive BeginMarker value, an inclusive EndMarker value, or both. Ranges are UTF-8 binary sorted.
S3Resources (list) --An array of S3Resource objects.
(dict) --Each S3Resource object represents an Amazon S3 bucket that your transferred data will be exported from or imported into. For export jobs, this object can have an optional KeyRange value. The length of the range is defined at job creation, and has either an inclusive BeginMarker , an inclusive EndMarker , or both. Ranges are UTF-8 binary sorted.
BucketArn (string) --The Amazon Resource Name (ARN) of an Amazon S3 bucket.
KeyRange (dict) --For export jobs, you can provide an optional KeyRange within a specific Amazon S3 bucket. The length of the range is defined at job creation, and has either an inclusive BeginMarker , an inclusive EndMarker , or both. Ranges are UTF-8 binary sorted.
BeginMarker (string) --The key that starts an optional key range for an export job. Ranges are inclusive and UTF-8 binary sorted.
EndMarker (string) --The key that ends an optional key range for an export job. Ranges are inclusive and UTF-8 binary sorted.
LambdaResources (list) --The Python-language Lambda functions for this job.
(dict) --Identifies
LambdaArn (string) --An Amazon Resource Name (ARN) that represents an AWS Lambda function to be triggered by PUT object actions on the associated local Amazon S3 resource.
EventTriggers (list) --The array of ARNs for S3Resource objects to trigger the LambdaResource objects associated with this job.
(dict) --The container for the EventTriggerDefinition$EventResourceARN .
EventResourceARN (string) --The Amazon Resource Name (ARN) for any local Amazon S3 resource that is an AWS Lambda function's event trigger associated with this job.
:type Description: string
:param Description: Defines an optional description of this specific job, for example Important Photos 2016-08-11 .
:type AddressId: string
:param AddressId: The ID for the address that you want the Snowball shipped to.
:type KmsKeyARN: string
:param KmsKeyARN: The KmsKeyARN that you want to associate with this job. KmsKeyARN s are created using the CreateKey AWS Key Management Service (KMS) API action.
:type RoleARN: string
:param RoleARN: The RoleARN that you want to associate with this job. RoleArn s are created using the CreateRole AWS Identity and Access Management (IAM) API action.
:type SnowballCapacityPreference: string
:param SnowballCapacityPreference: If your job is being created in one of the US regions, you have the option of specifying what size Snowball you'd like for this job. In all other regions, Snowballs come with 80 TB in storage capacity.
:type ShippingOption: string
:param ShippingOption: The shipping speed for this job. This speed doesn't dictate how soon you'll get the Snowball, rather it represents how quickly the Snowball moves to its destination while in transit. Regional shipping speeds are as follows:
In Australia, you have access to express shipping. Typically, Snowballs shipped express are delivered in about a day.
In the European Union (EU), you have access to express shipping. Typically, Snowballs shipped express are delivered in about a day. In addition, most countries in the EU have access to standard shipping, which typically takes less than a week, one way.
In India, Snowballs are delivered in one to seven days.
In the US, you have access to one-day shipping and two-day shipping.
:type Notification: dict
:param Notification: Defines the Amazon Simple Notification Service (Amazon SNS) notification settings for this job.
SnsTopicARN (string) --The new SNS TopicArn that you want to associate with this job. You can create Amazon Resource Names (ARNs) for topics by using the CreateTopic Amazon SNS API action.
You can subscribe email addresses to an Amazon SNS topic through the AWS Management Console, or by using the Subscribe AWS Simple Notification Service (SNS) API action.
JobStatesToNotify (list) --The list of job states that will trigger a notification for this job.
(string) --
NotifyAll (boolean) --Any change in job state will trigger a notification for this job.
:type ClusterId: string
:param ClusterId: The ID of a cluster. If you're creating a job for a node in a cluster, you need to provide only this clusterId value. The other job attributes are inherited from the cluster.
:type SnowballType: string
:param SnowballType: The type of AWS Snowball appliance to use for this job. Currently, the only supported appliance type for cluster jobs is EDGE .
:type ForwardingAddressId: string
:param ForwardingAddressId: The forwarding address ID for a job. This field is not supported in most regions.
:rtype: dict
:return: {
'JobId': 'string'
}
### Response:
def create_job(JobType=None, Resources=None, Description=None, AddressId=None, KmsKeyARN=None, RoleARN=None, SnowballCapacityPreference=None, ShippingOption=None, Notification=None, ClusterId=None, SnowballType=None, ForwardingAddressId=None):
"""
Creates a job to import or export data between Amazon S3 and your on-premises data center. Your AWS account must have the right trust policies and permissions in place to create a job for Snowball. If you're creating a job for a node in a cluster, you only need to provide the clusterId value; the other job attributes are inherited from the cluster.
See also: AWS API Documentation
Examples
Creates a job to import or export data between Amazon S3 and your on-premises data center. Your AWS account must have the right trust policies and permissions in place to create a job for Snowball. If you're creating a job for a node in a cluster, you only need to provide the clusterId value; the other job attributes are inherited from the cluster.
Expected Output:
:example: response = client.create_job(
JobType='IMPORT'|'EXPORT'|'LOCAL_USE',
Resources={
'S3Resources': [
{
'BucketArn': 'string',
'KeyRange': {
'BeginMarker': 'string',
'EndMarker': 'string'
}
},
],
'LambdaResources': [
{
'LambdaArn': 'string',
'EventTriggers': [
{
'EventResourceARN': 'string'
},
]
},
]
},
Description='string',
AddressId='string',
KmsKeyARN='string',
RoleARN='string',
SnowballCapacityPreference='T50'|'T80'|'T100'|'NoPreference',
ShippingOption='SECOND_DAY'|'NEXT_DAY'|'EXPRESS'|'STANDARD',
Notification={
'SnsTopicARN': 'string',
'JobStatesToNotify': [
'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending',
],
'NotifyAll': True|False
},
ClusterId='string',
SnowballType='STANDARD'|'EDGE',
ForwardingAddressId='string'
)
:type JobType: string
:param JobType: Defines the type of job that you're creating.
:type Resources: dict
:param Resources: Defines the Amazon S3 buckets associated with this job.
With IMPORT jobs, you specify the bucket or buckets that your transferred data will be imported into.
With EXPORT jobs, you specify the bucket or buckets that your transferred data will be exported from. Optionally, you can also specify a KeyRange value. If you choose to export a range, you define the length of the range by providing either an inclusive BeginMarker value, an inclusive EndMarker value, or both. Ranges are UTF-8 binary sorted.
S3Resources (list) --An array of S3Resource objects.
(dict) --Each S3Resource object represents an Amazon S3 bucket that your transferred data will be exported from or imported into. For export jobs, this object can have an optional KeyRange value. The length of the range is defined at job creation, and has either an inclusive BeginMarker , an inclusive EndMarker , or both. Ranges are UTF-8 binary sorted.
BucketArn (string) --The Amazon Resource Name (ARN) of an Amazon S3 bucket.
KeyRange (dict) --For export jobs, you can provide an optional KeyRange within a specific Amazon S3 bucket. The length of the range is defined at job creation, and has either an inclusive BeginMarker , an inclusive EndMarker , or both. Ranges are UTF-8 binary sorted.
BeginMarker (string) --The key that starts an optional key range for an export job. Ranges are inclusive and UTF-8 binary sorted.
EndMarker (string) --The key that ends an optional key range for an export job. Ranges are inclusive and UTF-8 binary sorted.
LambdaResources (list) --The Python-language Lambda functions for this job.
(dict) --Identifies
LambdaArn (string) --An Amazon Resource Name (ARN) that represents an AWS Lambda function to be triggered by PUT object actions on the associated local Amazon S3 resource.
EventTriggers (list) --The array of ARNs for S3Resource objects to trigger the LambdaResource objects associated with this job.
(dict) --The container for the EventTriggerDefinition$EventResourceARN .
EventResourceARN (string) --The Amazon Resource Name (ARN) for any local Amazon S3 resource that is an AWS Lambda function's event trigger associated with this job.
:type Description: string
:param Description: Defines an optional description of this specific job, for example Important Photos 2016-08-11 .
:type AddressId: string
:param AddressId: The ID for the address that you want the Snowball shipped to.
:type KmsKeyARN: string
:param KmsKeyARN: The KmsKeyARN that you want to associate with this job. KmsKeyARN s are created using the CreateKey AWS Key Management Service (KMS) API action.
:type RoleARN: string
:param RoleARN: The RoleARN that you want to associate with this job. RoleArn s are created using the CreateRole AWS Identity and Access Management (IAM) API action.
:type SnowballCapacityPreference: string
:param SnowballCapacityPreference: If your job is being created in one of the US regions, you have the option of specifying what size Snowball you'd like for this job. In all other regions, Snowballs come with 80 TB in storage capacity.
:type ShippingOption: string
:param ShippingOption: The shipping speed for this job. This speed doesn't dictate how soon you'll get the Snowball, rather it represents how quickly the Snowball moves to its destination while in transit. Regional shipping speeds are as follows:
In Australia, you have access to express shipping. Typically, Snowballs shipped express are delivered in about a day.
In the European Union (EU), you have access to express shipping. Typically, Snowballs shipped express are delivered in about a day. In addition, most countries in the EU have access to standard shipping, which typically takes less than a week, one way.
In India, Snowballs are delivered in one to seven days.
In the US, you have access to one-day shipping and two-day shipping.
:type Notification: dict
:param Notification: Defines the Amazon Simple Notification Service (Amazon SNS) notification settings for this job.
SnsTopicARN (string) --The new SNS TopicArn that you want to associate with this job. You can create Amazon Resource Names (ARNs) for topics by using the CreateTopic Amazon SNS API action.
You can subscribe email addresses to an Amazon SNS topic through the AWS Management Console, or by using the Subscribe AWS Simple Notification Service (SNS) API action.
JobStatesToNotify (list) --The list of job states that will trigger a notification for this job.
(string) --
NotifyAll (boolean) --Any change in job state will trigger a notification for this job.
:type ClusterId: string
:param ClusterId: The ID of a cluster. If you're creating a job for a node in a cluster, you need to provide only this clusterId value. The other job attributes are inherited from the cluster.
:type SnowballType: string
:param SnowballType: The type of AWS Snowball appliance to use for this job. Currently, the only supported appliance type for cluster jobs is EDGE .
:type ForwardingAddressId: string
:param ForwardingAddressId: The forwarding address ID for a job. This field is not supported in most regions.
:rtype: dict
:return: {
'JobId': 'string'
}
"""
pass |
def form_adverb_from_adjective(adjective):
"""
Forms an adverb from the input adjective, f.ex. "happy" => "happily".
Adverbs are generated using rules from: http://www.edufind.com/english-grammar/forming-adverbs-adjectives/
:param adjective: adjective
:return: adverb form of the input adjective
"""
# If the adjective ends in -able, -ible, or -le, replace the -e with -y
if adjective.endswith("able") or adjective.endswith("ible") or adjective.endswith("le"):
return adjective[:-1] + "y"
# If the adjective ends in -y, replace the y with i and add -ly
elif adjective.endswith("y"):
return adjective[:-1] + "ily"
# If the adjective ends in -ic, add -ally
elif adjective.endswith("ic"):
return adjective[:-2] + "ally"
# In most cases, an adverb is formed by adding -ly to an adjective
return adjective + "ly" | Forms an adverb from the input adjective, f.ex. "happy" => "happily".
Adverbs are generated using rules from: http://www.edufind.com/english-grammar/forming-adverbs-adjectives/
:param adjective: adjective
:return: adverb form of the input adjective | Below is the the instruction that describes the task:
### Input:
Forms an adverb from the input adjective, f.ex. "happy" => "happily".
Adverbs are generated using rules from: http://www.edufind.com/english-grammar/forming-adverbs-adjectives/
:param adjective: adjective
:return: adverb form of the input adjective
### Response:
def form_adverb_from_adjective(adjective):
"""
Forms an adverb from the input adjective, f.ex. "happy" => "happily".
Adverbs are generated using rules from: http://www.edufind.com/english-grammar/forming-adverbs-adjectives/
:param adjective: adjective
:return: adverb form of the input adjective
"""
# If the adjective ends in -able, -ible, or -le, replace the -e with -y
if adjective.endswith("able") or adjective.endswith("ible") or adjective.endswith("le"):
return adjective[:-1] + "y"
# If the adjective ends in -y, replace the y with i and add -ly
elif adjective.endswith("y"):
return adjective[:-1] + "ily"
# If the adjective ends in -ic, add -ally
elif adjective.endswith("ic"):
return adjective[:-2] + "ally"
# In most cases, an adverb is formed by adding -ly to an adjective
return adjective + "ly" |
def tx2genefile(gtf, out_file=None):
"""
write out a file of transcript->gene mappings.
use the installed tx2gene.csv if it exists, else write a new one out
"""
installed_tx2gene = os.path.join(os.path.dirname(gtf), "tx2gene.csv")
if file_exists(installed_tx2gene):
return installed_tx2gene
if file_exists(out_file):
return out_file
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for k, v in transcript_to_gene(gtf).items():
out_handle.write(",".join([k, v]) + "\n")
return out_file | write out a file of transcript->gene mappings.
use the installed tx2gene.csv if it exists, else write a new one out | Below is the the instruction that describes the task:
### Input:
write out a file of transcript->gene mappings.
use the installed tx2gene.csv if it exists, else write a new one out
### Response:
def tx2genefile(gtf, out_file=None):
"""
write out a file of transcript->gene mappings.
use the installed tx2gene.csv if it exists, else write a new one out
"""
installed_tx2gene = os.path.join(os.path.dirname(gtf), "tx2gene.csv")
if file_exists(installed_tx2gene):
return installed_tx2gene
if file_exists(out_file):
return out_file
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for k, v in transcript_to_gene(gtf).items():
out_handle.write(",".join([k, v]) + "\n")
return out_file |
def GetFileObject(self, data_stream_name=''):
"""Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): name of the data stream, where an empty
string represents the default data stream.
Returns:
FakeFileIO: a file-like object or None if not available.
Raises:
IOError: if the file entry is not a file.
OSError: if the file entry is not a file.
"""
if not self.IsFile():
raise IOError('Cannot open non-file.')
if data_stream_name:
return None
location = getattr(self.path_spec, 'location', None)
if location is None:
return None
file_data = self._file_system.GetDataByPath(location)
file_object = fake_file_io.FakeFile(self._resolver_context, file_data)
file_object.open(path_spec=self.path_spec)
return file_object | Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): name of the data stream, where an empty
string represents the default data stream.
Returns:
FakeFileIO: a file-like object or None if not available.
Raises:
IOError: if the file entry is not a file.
OSError: if the file entry is not a file. | Below is the the instruction that describes the task:
### Input:
Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): name of the data stream, where an empty
string represents the default data stream.
Returns:
FakeFileIO: a file-like object or None if not available.
Raises:
IOError: if the file entry is not a file.
OSError: if the file entry is not a file.
### Response:
def GetFileObject(self, data_stream_name=''):
"""Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): name of the data stream, where an empty
string represents the default data stream.
Returns:
FakeFileIO: a file-like object or None if not available.
Raises:
IOError: if the file entry is not a file.
OSError: if the file entry is not a file.
"""
if not self.IsFile():
raise IOError('Cannot open non-file.')
if data_stream_name:
return None
location = getattr(self.path_spec, 'location', None)
if location is None:
return None
file_data = self._file_system.GetDataByPath(location)
file_object = fake_file_io.FakeFile(self._resolver_context, file_data)
file_object.open(path_spec=self.path_spec)
return file_object |
def mux_pilot_blocks(IQ_data, Np):
"""
Parameters
----------
IQ_data : a 2D array of input QAM symbols with the columns
representing the NF carrier frequencies and each
row the QAM symbols used to form an OFDM symbol
Np : the period of the pilot blocks; e.g., a pilot block is
inserted every Np OFDM symbols (Np-1 OFDM data symbols
of width Nf are inserted in between the pilot blocks.
Returns
-------
IQ_datap : IQ_data with pilot blocks inserted
See Also
--------
OFDM_tx
Notes
-----
A helper function called by :func:`OFDM_tx` that inserts pilot block for use
in channel estimation when a delay spread channel is present.
"""
N_OFDM = IQ_data.shape[0]
Npb = N_OFDM // (Np - 1)
N_OFDM_rem = N_OFDM - Npb * (Np - 1)
Nf = IQ_data.shape[1]
IQ_datap = np.zeros((N_OFDM + Npb + 1, Nf), dtype=np.complex128)
pilots = np.ones(Nf) # The pilot symbol is simply 1 + j0
for k in range(Npb):
IQ_datap[Np * k:Np * (k + 1), :] = np.vstack((pilots,
IQ_data[(Np - 1) * k:(Np - 1) * (k + 1), :]))
IQ_datap[Np * Npb:Np * (Npb + N_OFDM_rem), :] = np.vstack((pilots,
IQ_data[(Np - 1) * Npb:, :]))
return IQ_datap | Parameters
----------
IQ_data : a 2D array of input QAM symbols with the columns
representing the NF carrier frequencies and each
row the QAM symbols used to form an OFDM symbol
Np : the period of the pilot blocks; e.g., a pilot block is
inserted every Np OFDM symbols (Np-1 OFDM data symbols
of width Nf are inserted in between the pilot blocks.
Returns
-------
IQ_datap : IQ_data with pilot blocks inserted
See Also
--------
OFDM_tx
Notes
-----
A helper function called by :func:`OFDM_tx` that inserts pilot block for use
in channel estimation when a delay spread channel is present. | Below is the the instruction that describes the task:
### Input:
Parameters
----------
IQ_data : a 2D array of input QAM symbols with the columns
representing the NF carrier frequencies and each
row the QAM symbols used to form an OFDM symbol
Np : the period of the pilot blocks; e.g., a pilot block is
inserted every Np OFDM symbols (Np-1 OFDM data symbols
of width Nf are inserted in between the pilot blocks.
Returns
-------
IQ_datap : IQ_data with pilot blocks inserted
See Also
--------
OFDM_tx
Notes
-----
A helper function called by :func:`OFDM_tx` that inserts pilot block for use
in channel estimation when a delay spread channel is present.
### Response:
def mux_pilot_blocks(IQ_data, Np):
"""
Parameters
----------
IQ_data : a 2D array of input QAM symbols with the columns
representing the NF carrier frequencies and each
row the QAM symbols used to form an OFDM symbol
Np : the period of the pilot blocks; e.g., a pilot block is
inserted every Np OFDM symbols (Np-1 OFDM data symbols
of width Nf are inserted in between the pilot blocks.
Returns
-------
IQ_datap : IQ_data with pilot blocks inserted
See Also
--------
OFDM_tx
Notes
-----
A helper function called by :func:`OFDM_tx` that inserts pilot block for use
in channel estimation when a delay spread channel is present.
"""
N_OFDM = IQ_data.shape[0]
Npb = N_OFDM // (Np - 1)
N_OFDM_rem = N_OFDM - Npb * (Np - 1)
Nf = IQ_data.shape[1]
IQ_datap = np.zeros((N_OFDM + Npb + 1, Nf), dtype=np.complex128)
pilots = np.ones(Nf) # The pilot symbol is simply 1 + j0
for k in range(Npb):
IQ_datap[Np * k:Np * (k + 1), :] = np.vstack((pilots,
IQ_data[(Np - 1) * k:(Np - 1) * (k + 1), :]))
IQ_datap[Np * Npb:Np * (Npb + N_OFDM_rem), :] = np.vstack((pilots,
IQ_data[(Np - 1) * Npb:, :]))
return IQ_datap |
def long_press(self, locator, duration=1000):
""" Long press the element with optional duration """
driver = self._current_application()
element = self._element_find(locator, True, True)
action = TouchAction(driver)
action.press(element).wait(duration).release().perform() | Long press the element with optional duration | Below is the the instruction that describes the task:
### Input:
Long press the element with optional duration
### Response:
def long_press(self, locator, duration=1000):
""" Long press the element with optional duration """
driver = self._current_application()
element = self._element_find(locator, True, True)
action = TouchAction(driver)
action.press(element).wait(duration).release().perform() |
def select_by_mtime(self, min_time=0, max_time=ts_2100,
recursive=True):
"""
Select file path by modify time.
:param min_time: lower bound timestamp
:param max_time: upper bound timestamp
**中文文档**
选择所有 :attr:`pathlib_mate.pathlib2.Path.mtime` 在一定范围内的文件。
"""
def filters(p): return min_time <= p.mtime <= max_time
return self.select_file(filters, recursive) | Select file path by modify time.
:param min_time: lower bound timestamp
:param max_time: upper bound timestamp
**中文文档**
选择所有 :attr:`pathlib_mate.pathlib2.Path.mtime` 在一定范围内的文件。 | Below is the the instruction that describes the task:
### Input:
Select file path by modify time.
:param min_time: lower bound timestamp
:param max_time: upper bound timestamp
**中文文档**
选择所有 :attr:`pathlib_mate.pathlib2.Path.mtime` 在一定范围内的文件。
### Response:
def select_by_mtime(self, min_time=0, max_time=ts_2100,
recursive=True):
"""
Select file path by modify time.
:param min_time: lower bound timestamp
:param max_time: upper bound timestamp
**中文文档**
选择所有 :attr:`pathlib_mate.pathlib2.Path.mtime` 在一定范围内的文件。
"""
def filters(p): return min_time <= p.mtime <= max_time
return self.select_file(filters, recursive) |
def getexcfo(e):
'''
Get an err tufo from an exception.
Args:
e (Exception): An Exception (or Exception subclass).
Notes:
This can be called outside of the context of an exception handler,
however details such as file, line, function name and source may be
missing.
Returns:
((str, dict)):
'''
tb = sys.exc_info()[2]
tbinfo = traceback.extract_tb(tb)
path, line, name, src = '', '', '', None
if tbinfo:
path, line, name, sorc = tbinfo[-1]
retd = {
'msg': str(e),
'file': path,
'line': line,
'name': name,
'src': src
}
if isinstance(e, s_exc.SynErr):
retd['syn:err'] = e.errinfo
return (e.__class__.__name__, retd) | Get an err tufo from an exception.
Args:
e (Exception): An Exception (or Exception subclass).
Notes:
This can be called outside of the context of an exception handler,
however details such as file, line, function name and source may be
missing.
Returns:
((str, dict)): | Below is the the instruction that describes the task:
### Input:
Get an err tufo from an exception.
Args:
e (Exception): An Exception (or Exception subclass).
Notes:
This can be called outside of the context of an exception handler,
however details such as file, line, function name and source may be
missing.
Returns:
((str, dict)):
### Response:
def getexcfo(e):
'''
Get an err tufo from an exception.
Args:
e (Exception): An Exception (or Exception subclass).
Notes:
This can be called outside of the context of an exception handler,
however details such as file, line, function name and source may be
missing.
Returns:
((str, dict)):
'''
tb = sys.exc_info()[2]
tbinfo = traceback.extract_tb(tb)
path, line, name, src = '', '', '', None
if tbinfo:
path, line, name, sorc = tbinfo[-1]
retd = {
'msg': str(e),
'file': path,
'line': line,
'name': name,
'src': src
}
if isinstance(e, s_exc.SynErr):
retd['syn:err'] = e.errinfo
return (e.__class__.__name__, retd) |
def from_file(filename,
output_path,
options=None,
toc=None,
cover=None,
css=None,
config=None,
cover_first=None):
"""
Convert HTML file/files to IMG file/files
:param filename: path of HTML file or list with paths or file-like object
:param output_path: path to output PDF file/files. False means file will be returned as string
:param options: (optional) dict with wkhtmltopdf global and page options, with or w/o '--'
:param toc: (optional) dict with toc-specific wkhtmltopdf options, with or w/o '--'
:param cover: (optional) string with url/filename with a cover html page
:param css: style of input
:param config: (optional) instance of imgkit.config.Config()
:param cover_first: (optional) if True, cover always precedes TOC
:return: True when success
"""
rtn = IMGKit(filename,
'file',
options=options,
toc=toc,
cover=cover,
css=css,
config=config,
cover_first=cover_first)
return rtn.to_img(output_path) | Convert HTML file/files to IMG file/files
:param filename: path of HTML file or list with paths or file-like object
:param output_path: path to output PDF file/files. False means file will be returned as string
:param options: (optional) dict with wkhtmltopdf global and page options, with or w/o '--'
:param toc: (optional) dict with toc-specific wkhtmltopdf options, with or w/o '--'
:param cover: (optional) string with url/filename with a cover html page
:param css: style of input
:param config: (optional) instance of imgkit.config.Config()
:param cover_first: (optional) if True, cover always precedes TOC
:return: True when success | Below is the the instruction that describes the task:
### Input:
Convert HTML file/files to IMG file/files
:param filename: path of HTML file or list with paths or file-like object
:param output_path: path to output PDF file/files. False means file will be returned as string
:param options: (optional) dict with wkhtmltopdf global and page options, with or w/o '--'
:param toc: (optional) dict with toc-specific wkhtmltopdf options, with or w/o '--'
:param cover: (optional) string with url/filename with a cover html page
:param css: style of input
:param config: (optional) instance of imgkit.config.Config()
:param cover_first: (optional) if True, cover always precedes TOC
:return: True when success
### Response:
def from_file(filename,
output_path,
options=None,
toc=None,
cover=None,
css=None,
config=None,
cover_first=None):
"""
Convert HTML file/files to IMG file/files
:param filename: path of HTML file or list with paths or file-like object
:param output_path: path to output PDF file/files. False means file will be returned as string
:param options: (optional) dict with wkhtmltopdf global and page options, with or w/o '--'
:param toc: (optional) dict with toc-specific wkhtmltopdf options, with or w/o '--'
:param cover: (optional) string with url/filename with a cover html page
:param css: style of input
:param config: (optional) instance of imgkit.config.Config()
:param cover_first: (optional) if True, cover always precedes TOC
:return: True when success
"""
rtn = IMGKit(filename,
'file',
options=options,
toc=toc,
cover=cover,
css=css,
config=config,
cover_first=cover_first)
return rtn.to_img(output_path) |
def send_message(self, id: str, message: str) -> Dict[str, Any]:
"""Send a message to a channel
For formatting options, see the documentation:
https://discordapp.com/developers/docs/resources/channel#create-message
Args:
id: channel snowflake id
message: your message (string)
Returns:
Dictionary object of the new message
"""
if not self.connected:
raise ValueError('Websocket not connected')
return self._query(f'channels/{id}/messages', 'POST', {'content': message}) | Send a message to a channel
For formatting options, see the documentation:
https://discordapp.com/developers/docs/resources/channel#create-message
Args:
id: channel snowflake id
message: your message (string)
Returns:
Dictionary object of the new message | Below is the the instruction that describes the task:
### Input:
Send a message to a channel
For formatting options, see the documentation:
https://discordapp.com/developers/docs/resources/channel#create-message
Args:
id: channel snowflake id
message: your message (string)
Returns:
Dictionary object of the new message
### Response:
def send_message(self, id: str, message: str) -> Dict[str, Any]:
"""Send a message to a channel
For formatting options, see the documentation:
https://discordapp.com/developers/docs/resources/channel#create-message
Args:
id: channel snowflake id
message: your message (string)
Returns:
Dictionary object of the new message
"""
if not self.connected:
raise ValueError('Websocket not connected')
return self._query(f'channels/{id}/messages', 'POST', {'content': message}) |
def covstr(strings):
""" convert string to int or float. """
try:
result = int(strings)
except ValueError:
result = float(strings)
return result | convert string to int or float. | Below is the the instruction that describes the task:
### Input:
convert string to int or float.
### Response:
def covstr(strings):
""" convert string to int or float. """
try:
result = int(strings)
except ValueError:
result = float(strings)
return result |
def prepare_info(self, ts=None):
"""Return all session unique ids recorded in prepare phase.
:param ts: timestamp, default to current timestamp
:return: set of session unique ids
"""
sp_key = "%s:session_prepare" % self.namespace(ts or int(time.time()))
return set(s(m) for m in self.r.smembers(sp_key)) | Return all session unique ids recorded in prepare phase.
:param ts: timestamp, default to current timestamp
:return: set of session unique ids | Below is the the instruction that describes the task:
### Input:
Return all session unique ids recorded in prepare phase.
:param ts: timestamp, default to current timestamp
:return: set of session unique ids
### Response:
def prepare_info(self, ts=None):
"""Return all session unique ids recorded in prepare phase.
:param ts: timestamp, default to current timestamp
:return: set of session unique ids
"""
sp_key = "%s:session_prepare" % self.namespace(ts or int(time.time()))
return set(s(m) for m in self.r.smembers(sp_key)) |
def get_vertices_per_edge(mesh_v, mesh_f):
"""Returns an Ex2 array of adjacencies between vertices, where
each element in the array is a vertex index. Each edge is included
only once. If output of get_faces_per_edge is provided, this is used to
avoid call to get_vert_connectivity()"""
vc = sp.coo_matrix(get_vert_connectivity(mesh_v, mesh_f))
result = np.hstack((col(vc.row), col(vc.col)))
result = result[result[:,0] < result[:,1]] # for uniqueness
return result | Returns an Ex2 array of adjacencies between vertices, where
each element in the array is a vertex index. Each edge is included
only once. If output of get_faces_per_edge is provided, this is used to
avoid call to get_vert_connectivity() | Below is the the instruction that describes the task:
### Input:
Returns an Ex2 array of adjacencies between vertices, where
each element in the array is a vertex index. Each edge is included
only once. If output of get_faces_per_edge is provided, this is used to
avoid call to get_vert_connectivity()
### Response:
def get_vertices_per_edge(mesh_v, mesh_f):
"""Returns an Ex2 array of adjacencies between vertices, where
each element in the array is a vertex index. Each edge is included
only once. If output of get_faces_per_edge is provided, this is used to
avoid call to get_vert_connectivity()"""
vc = sp.coo_matrix(get_vert_connectivity(mesh_v, mesh_f))
result = np.hstack((col(vc.row), col(vc.col)))
result = result[result[:,0] < result[:,1]] # for uniqueness
return result |
def functions_shadowed(self):
'''
Return the list of functions shadowed
Returns:
list(core.Function)
'''
candidates = [c.functions_not_inherited for c in self.contract.inheritance]
candidates = [candidate for sublist in candidates for candidate in sublist]
return [f for f in candidates if f.full_name == self.full_name] | Return the list of functions shadowed
Returns:
list(core.Function) | Below is the the instruction that describes the task:
### Input:
Return the list of functions shadowed
Returns:
list(core.Function)
### Response:
def functions_shadowed(self):
'''
Return the list of functions shadowed
Returns:
list(core.Function)
'''
candidates = [c.functions_not_inherited for c in self.contract.inheritance]
candidates = [candidate for sublist in candidates for candidate in sublist]
return [f for f in candidates if f.full_name == self.full_name] |
def delete_relay(self, relayid, data):
"""Delete relay settings"""
return self.api_call(
ENDPOINTS['relays']['delete'],
dict(relayid=relayid),
body=data) | Delete relay settings | Below is the the instruction that describes the task:
### Input:
Delete relay settings
### Response:
def delete_relay(self, relayid, data):
"""Delete relay settings"""
return self.api_call(
ENDPOINTS['relays']['delete'],
dict(relayid=relayid),
body=data) |
def _get_dep_statuses(self, ti, session, dep_context):
"""
Determines whether a task is ready to be rescheduled. Only tasks in
NONE state with at least one row in task_reschedule table are
handled by this dependency class, otherwise this dependency is
considered as passed. This dependency fails if the latest reschedule
request's reschedule date is still in future.
"""
if dep_context.ignore_in_reschedule_period:
yield self._passing_status(
reason="The context specified that being in a reschedule period was "
"permitted.")
return
if ti.state not in self.RESCHEDULEABLE_STATES:
yield self._passing_status(
reason="The task instance is not in State_UP_FOR_RESCHEDULE or NONE state.")
return
task_reschedules = TaskReschedule.find_for_task_instance(task_instance=ti)
if not task_reschedules:
yield self._passing_status(
reason="There is no reschedule request for this task instance.")
return
now = timezone.utcnow()
next_reschedule_date = task_reschedules[-1].reschedule_date
if now >= next_reschedule_date:
yield self._passing_status(
reason="Task instance id ready for reschedule.")
return
yield self._failing_status(
reason="Task is not ready for reschedule yet but will be rescheduled "
"automatically. Current date is {0} and task will be rescheduled "
"at {1}.".format(now.isoformat(), next_reschedule_date.isoformat())) | Determines whether a task is ready to be rescheduled. Only tasks in
NONE state with at least one row in task_reschedule table are
handled by this dependency class, otherwise this dependency is
considered as passed. This dependency fails if the latest reschedule
request's reschedule date is still in future. | Below is the the instruction that describes the task:
### Input:
Determines whether a task is ready to be rescheduled. Only tasks in
NONE state with at least one row in task_reschedule table are
handled by this dependency class, otherwise this dependency is
considered as passed. This dependency fails if the latest reschedule
request's reschedule date is still in future.
### Response:
def _get_dep_statuses(self, ti, session, dep_context):
"""
Determines whether a task is ready to be rescheduled. Only tasks in
NONE state with at least one row in task_reschedule table are
handled by this dependency class, otherwise this dependency is
considered as passed. This dependency fails if the latest reschedule
request's reschedule date is still in future.
"""
if dep_context.ignore_in_reschedule_period:
yield self._passing_status(
reason="The context specified that being in a reschedule period was "
"permitted.")
return
if ti.state not in self.RESCHEDULEABLE_STATES:
yield self._passing_status(
reason="The task instance is not in State_UP_FOR_RESCHEDULE or NONE state.")
return
task_reschedules = TaskReschedule.find_for_task_instance(task_instance=ti)
if not task_reschedules:
yield self._passing_status(
reason="There is no reschedule request for this task instance.")
return
now = timezone.utcnow()
next_reschedule_date = task_reschedules[-1].reschedule_date
if now >= next_reschedule_date:
yield self._passing_status(
reason="Task instance id ready for reschedule.")
return
yield self._failing_status(
reason="Task is not ready for reschedule yet but will be rescheduled "
"automatically. Current date is {0} and task will be rescheduled "
"at {1}.".format(now.isoformat(), next_reschedule_date.isoformat())) |
def set_value(self, value, block_events=False):
"""
Sets the current value of the number box.
Setting block_events=True will temporarily block the widget from
sending any signals when setting the value.
"""
if block_events: self.block_events()
self._widget.setValue(value)
if block_events: self.unblock_events() | Sets the current value of the number box.
Setting block_events=True will temporarily block the widget from
sending any signals when setting the value. | Below is the the instruction that describes the task:
### Input:
Sets the current value of the number box.
Setting block_events=True will temporarily block the widget from
sending any signals when setting the value.
### Response:
def set_value(self, value, block_events=False):
"""
Sets the current value of the number box.
Setting block_events=True will temporarily block the widget from
sending any signals when setting the value.
"""
if block_events: self.block_events()
self._widget.setValue(value)
if block_events: self.unblock_events() |
def _validate(self, writing=False):
"""Verify that the box obeys the specifications."""
if self.colorspace is not None and self.icc_profile is not None:
msg = ("Colorspace and icc_profile cannot both be set when "
"creating a ColourSpecificationBox.")
self._dispatch_validation_error(msg, writing=writing)
if self.method not in _COLORSPACE_METHODS.keys():
msg = "Invalid colorspace method value ({method})."
msg = msg.format(method=self.method)
if writing:
raise IOError(msg)
else:
warnings.warn(msg, UserWarning)
if self.approximation not in _APPROXIMATION_MEASURES.keys():
msg = "Invalid colr approximation value ({approx})."
msg = msg.format(approx=self.approximation)
if not writing:
# Don't bother to check this for the case of writing=True
# because it's already handles in the wrapping code.
warnings.warn(msg, UserWarning) | Verify that the box obeys the specifications. | Below is the the instruction that describes the task:
### Input:
Verify that the box obeys the specifications.
### Response:
def _validate(self, writing=False):
"""Verify that the box obeys the specifications."""
if self.colorspace is not None and self.icc_profile is not None:
msg = ("Colorspace and icc_profile cannot both be set when "
"creating a ColourSpecificationBox.")
self._dispatch_validation_error(msg, writing=writing)
if self.method not in _COLORSPACE_METHODS.keys():
msg = "Invalid colorspace method value ({method})."
msg = msg.format(method=self.method)
if writing:
raise IOError(msg)
else:
warnings.warn(msg, UserWarning)
if self.approximation not in _APPROXIMATION_MEASURES.keys():
msg = "Invalid colr approximation value ({approx})."
msg = msg.format(approx=self.approximation)
if not writing:
# Don't bother to check this for the case of writing=True
# because it's already handles in the wrapping code.
warnings.warn(msg, UserWarning) |
def to_float(b:Collection[Tensor])->Collection[Tensor]:
"Recursively map lists of tensors in `b ` to FP16."
if is_listy(b): return [to_float(o) for o in b]
return b.float() if b.dtype not in [torch.int64, torch.int32, torch.int16] else b | Recursively map lists of tensors in `b ` to FP16. | Below is the the instruction that describes the task:
### Input:
Recursively map lists of tensors in `b ` to FP16.
### Response:
def to_float(b:Collection[Tensor])->Collection[Tensor]:
"Recursively map lists of tensors in `b ` to FP16."
if is_listy(b): return [to_float(o) for o in b]
return b.float() if b.dtype not in [torch.int64, torch.int32, torch.int16] else b |
def create_uaa(self, admin_secret, **kwargs):
"""
Creates an instance of UAA Service.
:param admin_secret: The secret password for administering the service
such as adding clients and users.
"""
uaa = predix.admin.uaa.UserAccountAuthentication(**kwargs)
if not uaa.exists():
uaa.create(admin_secret, **kwargs)
uaa.add_to_manifest(self)
return uaa | Creates an instance of UAA Service.
:param admin_secret: The secret password for administering the service
such as adding clients and users. | Below is the the instruction that describes the task:
### Input:
Creates an instance of UAA Service.
:param admin_secret: The secret password for administering the service
such as adding clients and users.
### Response:
def create_uaa(self, admin_secret, **kwargs):
"""
Creates an instance of UAA Service.
:param admin_secret: The secret password for administering the service
such as adding clients and users.
"""
uaa = predix.admin.uaa.UserAccountAuthentication(**kwargs)
if not uaa.exists():
uaa.create(admin_secret, **kwargs)
uaa.add_to_manifest(self)
return uaa |
def handle_oauth1_response(self, args):
"""Handles an oauth1 authorization response."""
client = self.make_client()
client.verifier = args.get('oauth_verifier')
tup = session.get('%s_oauthtok' % self.name)
if not tup:
raise OAuthException(
'Token not found, maybe you disabled cookie',
type='token_not_found'
)
client.resource_owner_key = tup[0]
client.resource_owner_secret = tup[1]
uri, headers, data = client.sign(
self.expand_url(self.access_token_url),
_encode(self.access_token_method)
)
headers.update(self._access_token_headers)
resp, content = self.http_request(
uri, headers, to_bytes(data, self.encoding),
method=self.access_token_method
)
data = parse_response(resp, content)
if resp.code not in (200, 201):
raise OAuthException(
'Invalid response from %s' % self.name,
type='invalid_response', data=data
)
return data | Handles an oauth1 authorization response. | Below is the the instruction that describes the task:
### Input:
Handles an oauth1 authorization response.
### Response:
def handle_oauth1_response(self, args):
"""Handles an oauth1 authorization response."""
client = self.make_client()
client.verifier = args.get('oauth_verifier')
tup = session.get('%s_oauthtok' % self.name)
if not tup:
raise OAuthException(
'Token not found, maybe you disabled cookie',
type='token_not_found'
)
client.resource_owner_key = tup[0]
client.resource_owner_secret = tup[1]
uri, headers, data = client.sign(
self.expand_url(self.access_token_url),
_encode(self.access_token_method)
)
headers.update(self._access_token_headers)
resp, content = self.http_request(
uri, headers, to_bytes(data, self.encoding),
method=self.access_token_method
)
data = parse_response(resp, content)
if resp.code not in (200, 201):
raise OAuthException(
'Invalid response from %s' % self.name,
type='invalid_response', data=data
)
return data |
def interpolate_nearest(self, lons, lats, data):
"""
Interpolate using nearest-neighbour approximation
Returns the same as interpolate(lons,lats,data,order=0)
"""
return self.interpolate(lons, lats, data, order=0) | Interpolate using nearest-neighbour approximation
Returns the same as interpolate(lons,lats,data,order=0) | Below is the the instruction that describes the task:
### Input:
Interpolate using nearest-neighbour approximation
Returns the same as interpolate(lons,lats,data,order=0)
### Response:
def interpolate_nearest(self, lons, lats, data):
"""
Interpolate using nearest-neighbour approximation
Returns the same as interpolate(lons,lats,data,order=0)
"""
return self.interpolate(lons, lats, data, order=0) |
def _set_access_mac_vlan_classification(self, v, load=False):
"""
Setter method for access_mac_vlan_classification, mapped from YANG variable /interface/ethernet/switchport/access_mac_vlan_classification (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_access_mac_vlan_classification is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_access_mac_vlan_classification() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=access_mac_vlan_classification.access_mac_vlan_classification, is_container='container', presence=False, yang_name="access-mac-vlan-classification", rest_name="access-mac-vlan-classification", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'hidden': u'full', u'callpoint': u'gvlan-access-port-config-phy'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """access_mac_vlan_classification must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=access_mac_vlan_classification.access_mac_vlan_classification, is_container='container', presence=False, yang_name="access-mac-vlan-classification", rest_name="access-mac-vlan-classification", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'hidden': u'full', u'callpoint': u'gvlan-access-port-config-phy'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__access_mac_vlan_classification = t
if hasattr(self, '_set'):
self._set() | Setter method for access_mac_vlan_classification, mapped from YANG variable /interface/ethernet/switchport/access_mac_vlan_classification (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_access_mac_vlan_classification is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_access_mac_vlan_classification() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for access_mac_vlan_classification, mapped from YANG variable /interface/ethernet/switchport/access_mac_vlan_classification (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_access_mac_vlan_classification is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_access_mac_vlan_classification() directly.
### Response:
def _set_access_mac_vlan_classification(self, v, load=False):
"""
Setter method for access_mac_vlan_classification, mapped from YANG variable /interface/ethernet/switchport/access_mac_vlan_classification (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_access_mac_vlan_classification is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_access_mac_vlan_classification() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=access_mac_vlan_classification.access_mac_vlan_classification, is_container='container', presence=False, yang_name="access-mac-vlan-classification", rest_name="access-mac-vlan-classification", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'hidden': u'full', u'callpoint': u'gvlan-access-port-config-phy'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """access_mac_vlan_classification must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=access_mac_vlan_classification.access_mac_vlan_classification, is_container='container', presence=False, yang_name="access-mac-vlan-classification", rest_name="access-mac-vlan-classification", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'hidden': u'full', u'callpoint': u'gvlan-access-port-config-phy'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__access_mac_vlan_classification = t
if hasattr(self, '_set'):
self._set() |
async def handle_request(self, channel: Channel, body, envelope,
properties, futurize=True):
"""
the 'futurize' param is simply because aioamqp doesnt send another job until
this method returns (completes), so we ensure the future of
ourselves and return immediately so we can handle many requests
at a time.
"""
if futurize:
asyncio.ensure_future(
self.handle_request(channel, body, envelope, properties,
futurize=False))
return
self._counter += 1
headers = properties.headers or {}
query = headers.pop('x-wasp-query-string', '').lstrip('?')
correlation_id = properties.correlation_id
message_id = properties.message_id
reply_to = properties.reply_to
route = envelope.routing_key
method, path = route.split('.', 1)
try:
method = Methods(method.upper())
except ValueError:
path = route
method = 'POST'
path = path.replace('.', '/')
# need to use `?` to represent `.` in rabbit
# since its not valid in a path, it should work correctly everywhere
path = path.replace('?', '.')
path = urllib.parse.unquote(path)
request = Request(
headers=headers,
path=path,
correlation_id=correlation_id,
method=method,
query_string=query,
body=body,
)
if properties.content_type:
headers['content-type'] = properties.content_type
request.content_type = properties.content_type
if properties.content_encoding:
headers['content-encoding'] = properties.content_encoding
logger.debug('received incoming request via rabbitmq: %s', request)
response = await self._handler(request)
if response is None:
# task got cancelled. Dont send a response.
return
if reply_to:
response.headers['Status'] = str(response.status.value)
payload = response.raw_body or b'null'
properties = {
'correlation_id': response.correlation_id,
'headers': response.headers,
'content_type': response.content_type,
'message_id': message_id,
'expiration': '30000',
}
await self._channel_ready.wait()
await channel.basic_publish(exchange_name='',
payload=payload,
routing_key=reply_to,
properties=properties)
if self._use_acks:
await self.channel.basic_client_ack(delivery_tag=envelope.delivery_tag)
self._counter -= 1 | the 'futurize' param is simply because aioamqp doesnt send another job until
this method returns (completes), so we ensure the future of
ourselves and return immediately so we can handle many requests
at a time. | Below is the the instruction that describes the task:
### Input:
the 'futurize' param is simply because aioamqp doesnt send another job until
this method returns (completes), so we ensure the future of
ourselves and return immediately so we can handle many requests
at a time.
### Response:
async def handle_request(self, channel: Channel, body, envelope,
properties, futurize=True):
"""
the 'futurize' param is simply because aioamqp doesnt send another job until
this method returns (completes), so we ensure the future of
ourselves and return immediately so we can handle many requests
at a time.
"""
if futurize:
asyncio.ensure_future(
self.handle_request(channel, body, envelope, properties,
futurize=False))
return
self._counter += 1
headers = properties.headers or {}
query = headers.pop('x-wasp-query-string', '').lstrip('?')
correlation_id = properties.correlation_id
message_id = properties.message_id
reply_to = properties.reply_to
route = envelope.routing_key
method, path = route.split('.', 1)
try:
method = Methods(method.upper())
except ValueError:
path = route
method = 'POST'
path = path.replace('.', '/')
# need to use `?` to represent `.` in rabbit
# since its not valid in a path, it should work correctly everywhere
path = path.replace('?', '.')
path = urllib.parse.unquote(path)
request = Request(
headers=headers,
path=path,
correlation_id=correlation_id,
method=method,
query_string=query,
body=body,
)
if properties.content_type:
headers['content-type'] = properties.content_type
request.content_type = properties.content_type
if properties.content_encoding:
headers['content-encoding'] = properties.content_encoding
logger.debug('received incoming request via rabbitmq: %s', request)
response = await self._handler(request)
if response is None:
# task got cancelled. Dont send a response.
return
if reply_to:
response.headers['Status'] = str(response.status.value)
payload = response.raw_body or b'null'
properties = {
'correlation_id': response.correlation_id,
'headers': response.headers,
'content_type': response.content_type,
'message_id': message_id,
'expiration': '30000',
}
await self._channel_ready.wait()
await channel.basic_publish(exchange_name='',
payload=payload,
routing_key=reply_to,
properties=properties)
if self._use_acks:
await self.channel.basic_client_ack(delivery_tag=envelope.delivery_tag)
self._counter -= 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.