code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def clear_text(self, label):
"""stub"""
if label not in self.my_osid_object_form._my_map['texts']:
raise NotFound()
del self.my_osid_object_form._my_map['texts'][label] | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def clear_text(self, label):
"""stub"""
if label not in self.my_osid_object_form._my_map['texts']:
raise NotFound()
del self.my_osid_object_form._my_map['texts'][label] |
def __process_by_python(self):
"""!
@brief Performs cluster analysis using python code.
"""
if self.__data_type == 'points':
self.__kdtree = kdtree(self.__sample_pointer, range(len(self.__sample_pointer)))
self.__allocate_clusters()
if (self.__amount_clusters is not None) and (self.__amount_clusters != len(self.get_clusters())):
analyser = ordering_analyser(self.get_ordering())
radius, _ = analyser.calculate_connvectivity_radius(self.__amount_clusters)
if radius is not None:
self.__eps = radius
self.__allocate_clusters() | !
@brief Performs cluster analysis using python code. | Below is the the instruction that describes the task:
### Input:
!
@brief Performs cluster analysis using python code.
### Response:
def __process_by_python(self):
"""!
@brief Performs cluster analysis using python code.
"""
if self.__data_type == 'points':
self.__kdtree = kdtree(self.__sample_pointer, range(len(self.__sample_pointer)))
self.__allocate_clusters()
if (self.__amount_clusters is not None) and (self.__amount_clusters != len(self.get_clusters())):
analyser = ordering_analyser(self.get_ordering())
radius, _ = analyser.calculate_connvectivity_radius(self.__amount_clusters)
if radius is not None:
self.__eps = radius
self.__allocate_clusters() |
def get_config_item(self, key):
"""
Returns the value for a given config key.
A list is returned when multiple values are set.
"""
value = _lxc.Container.get_config_item(self, key)
if value is False:
return False
elif value.endswith("\n"):
return value.rstrip("\n").split("\n")
else:
return value | Returns the value for a given config key.
A list is returned when multiple values are set. | Below is the the instruction that describes the task:
### Input:
Returns the value for a given config key.
A list is returned when multiple values are set.
### Response:
def get_config_item(self, key):
"""
Returns the value for a given config key.
A list is returned when multiple values are set.
"""
value = _lxc.Container.get_config_item(self, key)
if value is False:
return False
elif value.endswith("\n"):
return value.rstrip("\n").split("\n")
else:
return value |
def _create_skeleton_3(pc, l, num_section):
"""
Bottom level: {"measurement": [], "model": [{summary, distributions, ensemble}]}
Fill in measurement and model tables with N number of EMPTY meas, summary, ensemble, and distributions.
:param str pc: Paleo or Chron "mode"
:param list l:
:param dict num_section:
:return dict:
"""
logger_excel.info("enter create_skeleton_inner_2")
# Table Template: Model
template_model = {"summaryTable": {}, "ensembleTable": {}, "distributionTable": []}
# Build string appropriate for paleo/chron mode
pc_meas = "{}MeasurementTable".format(pc)
pc_mod = "{}Model".format(pc)
# Loop for each table count
for idx1, table in num_section.items():
try:
# Create N number of empty measurement lists
l[idx1 - 1][pc_meas] = [None] * num_section[idx1]["ct_meas"]
# Create N number of empty model table lists
l[idx1 - 1][pc_mod] = [copy.deepcopy(template_model)] * num_section[idx1]["ct_model"]
# Create N number of empty model tables at list index
#
for idx2, nums in table["ct_in_model"].items():
dists = []
try:
# Create N number of empty distributions at list index
[dists.append({}) for i in range(0, nums["ct_dist"])]
except IndexError as e:
logger_excel.debug("excel: create_metadata_skeleton: paleo tables messed up, {}".format(e))
# Model template complete, insert it at list index
l[idx1 - 1][pc_mod][idx2-1] = {"summaryTable": {}, "ensembleTable": {}, "distributionTable": dists}
except IndexError as e:
logger_excel.warn("create_skeleton_inner_tables: IndexError: {}".format(e))
except KeyError as e:
logger_excel.warn("create_skeleton_inner_tables: KeyError: {}".format(e))
return l | Bottom level: {"measurement": [], "model": [{summary, distributions, ensemble}]}
Fill in measurement and model tables with N number of EMPTY meas, summary, ensemble, and distributions.
:param str pc: Paleo or Chron "mode"
:param list l:
:param dict num_section:
:return dict: | Below is the the instruction that describes the task:
### Input:
Bottom level: {"measurement": [], "model": [{summary, distributions, ensemble}]}
Fill in measurement and model tables with N number of EMPTY meas, summary, ensemble, and distributions.
:param str pc: Paleo or Chron "mode"
:param list l:
:param dict num_section:
:return dict:
### Response:
def _create_skeleton_3(pc, l, num_section):
"""
Bottom level: {"measurement": [], "model": [{summary, distributions, ensemble}]}
Fill in measurement and model tables with N number of EMPTY meas, summary, ensemble, and distributions.
:param str pc: Paleo or Chron "mode"
:param list l:
:param dict num_section:
:return dict:
"""
logger_excel.info("enter create_skeleton_inner_2")
# Table Template: Model
template_model = {"summaryTable": {}, "ensembleTable": {}, "distributionTable": []}
# Build string appropriate for paleo/chron mode
pc_meas = "{}MeasurementTable".format(pc)
pc_mod = "{}Model".format(pc)
# Loop for each table count
for idx1, table in num_section.items():
try:
# Create N number of empty measurement lists
l[idx1 - 1][pc_meas] = [None] * num_section[idx1]["ct_meas"]
# Create N number of empty model table lists
l[idx1 - 1][pc_mod] = [copy.deepcopy(template_model)] * num_section[idx1]["ct_model"]
# Create N number of empty model tables at list index
#
for idx2, nums in table["ct_in_model"].items():
dists = []
try:
# Create N number of empty distributions at list index
[dists.append({}) for i in range(0, nums["ct_dist"])]
except IndexError as e:
logger_excel.debug("excel: create_metadata_skeleton: paleo tables messed up, {}".format(e))
# Model template complete, insert it at list index
l[idx1 - 1][pc_mod][idx2-1] = {"summaryTable": {}, "ensembleTable": {}, "distributionTable": dists}
except IndexError as e:
logger_excel.warn("create_skeleton_inner_tables: IndexError: {}".format(e))
except KeyError as e:
logger_excel.warn("create_skeleton_inner_tables: KeyError: {}".format(e))
return l |
def score_wu(CIJ, s):
'''
The s-core is the largest subnetwork comprising nodes of strength at
least s. This function computes the s-core for a given weighted
undirected connection matrix. Computation is analogous to the more
widely used k-core, but is based on node strengths instead of node
degrees.
Parameters
----------
CIJ : NxN np.ndarray
weighted undirected connection matrix
s : float
level of s-core. Note that can take on any fractional value.
Returns
-------
CIJscore : NxN np.ndarray
connection matrix of the s-core. This matrix contains only nodes with
a strength of at least s.
sn : int
size of s-core
'''
CIJscore = CIJ.copy()
while True:
str = strengths_und(CIJscore) # get strengths of matrix
# find nodes with strength <s
ff, = np.where(np.logical_and(str < s, str > 0))
if ff.size == 0:
break # if none found -> stop
# else peel away found nodes
CIJscore[ff, :] = 0
CIJscore[:, ff] = 0
sn = np.sum(str > 0)
return CIJscore, sn | The s-core is the largest subnetwork comprising nodes of strength at
least s. This function computes the s-core for a given weighted
undirected connection matrix. Computation is analogous to the more
widely used k-core, but is based on node strengths instead of node
degrees.
Parameters
----------
CIJ : NxN np.ndarray
weighted undirected connection matrix
s : float
level of s-core. Note that can take on any fractional value.
Returns
-------
CIJscore : NxN np.ndarray
connection matrix of the s-core. This matrix contains only nodes with
a strength of at least s.
sn : int
size of s-core | Below is the the instruction that describes the task:
### Input:
The s-core is the largest subnetwork comprising nodes of strength at
least s. This function computes the s-core for a given weighted
undirected connection matrix. Computation is analogous to the more
widely used k-core, but is based on node strengths instead of node
degrees.
Parameters
----------
CIJ : NxN np.ndarray
weighted undirected connection matrix
s : float
level of s-core. Note that can take on any fractional value.
Returns
-------
CIJscore : NxN np.ndarray
connection matrix of the s-core. This matrix contains only nodes with
a strength of at least s.
sn : int
size of s-core
### Response:
def score_wu(CIJ, s):
'''
The s-core is the largest subnetwork comprising nodes of strength at
least s. This function computes the s-core for a given weighted
undirected connection matrix. Computation is analogous to the more
widely used k-core, but is based on node strengths instead of node
degrees.
Parameters
----------
CIJ : NxN np.ndarray
weighted undirected connection matrix
s : float
level of s-core. Note that can take on any fractional value.
Returns
-------
CIJscore : NxN np.ndarray
connection matrix of the s-core. This matrix contains only nodes with
a strength of at least s.
sn : int
size of s-core
'''
CIJscore = CIJ.copy()
while True:
str = strengths_und(CIJscore) # get strengths of matrix
# find nodes with strength <s
ff, = np.where(np.logical_and(str < s, str > 0))
if ff.size == 0:
break # if none found -> stop
# else peel away found nodes
CIJscore[ff, :] = 0
CIJscore[:, ff] = 0
sn = np.sum(str > 0)
return CIJscore, sn |
def _set_config_src(self, v, load=False):
"""
Setter method for config_src, mapped from YANG variable /brocade_tunnels_ext_rpc/get_tunnel_info/output/tunnel/config_src (config-src-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_config_src is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config_src() directly.
YANG Description: Tunnel configuration source; indicates how
tunnel was created.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'bgp-evpn': {'value': 3}, u'vtep-controller': {'value': 1}, u'site-config': {'value': 2}},), is_leaf=True, yang_name="config-src", rest_name="config-src", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-tunnels-ext', defining_module='brocade-tunnels-ext', yang_type='config-src-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config_src must be of a type compatible with config-src-type""",
'defined-type': "brocade-tunnels-ext:config-src-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'bgp-evpn': {'value': 3}, u'vtep-controller': {'value': 1}, u'site-config': {'value': 2}},), is_leaf=True, yang_name="config-src", rest_name="config-src", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-tunnels-ext', defining_module='brocade-tunnels-ext', yang_type='config-src-type', is_config=True)""",
})
self.__config_src = t
if hasattr(self, '_set'):
self._set() | Setter method for config_src, mapped from YANG variable /brocade_tunnels_ext_rpc/get_tunnel_info/output/tunnel/config_src (config-src-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_config_src is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config_src() directly.
YANG Description: Tunnel configuration source; indicates how
tunnel was created. | Below is the the instruction that describes the task:
### Input:
Setter method for config_src, mapped from YANG variable /brocade_tunnels_ext_rpc/get_tunnel_info/output/tunnel/config_src (config-src-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_config_src is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config_src() directly.
YANG Description: Tunnel configuration source; indicates how
tunnel was created.
### Response:
def _set_config_src(self, v, load=False):
"""
Setter method for config_src, mapped from YANG variable /brocade_tunnels_ext_rpc/get_tunnel_info/output/tunnel/config_src (config-src-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_config_src is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config_src() directly.
YANG Description: Tunnel configuration source; indicates how
tunnel was created.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'bgp-evpn': {'value': 3}, u'vtep-controller': {'value': 1}, u'site-config': {'value': 2}},), is_leaf=True, yang_name="config-src", rest_name="config-src", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-tunnels-ext', defining_module='brocade-tunnels-ext', yang_type='config-src-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config_src must be of a type compatible with config-src-type""",
'defined-type': "brocade-tunnels-ext:config-src-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'bgp-evpn': {'value': 3}, u'vtep-controller': {'value': 1}, u'site-config': {'value': 2}},), is_leaf=True, yang_name="config-src", rest_name="config-src", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-tunnels-ext', defining_module='brocade-tunnels-ext', yang_type='config-src-type', is_config=True)""",
})
self.__config_src = t
if hasattr(self, '_set'):
self._set() |
def get_tokens(tu, extent):
"""Helper method to return all tokens in an extent.
This functionality is needed multiple places in this module. We define
it here because it seems like a logical place.
"""
tokens_memory = POINTER(Token)()
tokens_count = c_uint()
conf.lib.clang_tokenize(tu, extent, byref(tokens_memory),
byref(tokens_count))
count = int(tokens_count.value)
# If we get no tokens, no memory was allocated. Be sure not to return
# anything and potentially call a destructor on nothing.
if count < 1:
return
tokens_array = cast(tokens_memory, POINTER(Token * count)).contents
token_group = TokenGroup(tu, tokens_memory, tokens_count)
for i in xrange(0, count):
token = Token()
token.int_data = tokens_array[i].int_data
token.ptr_data = tokens_array[i].ptr_data
token._tu = tu
token._group = token_group
yield token | Helper method to return all tokens in an extent.
This functionality is needed multiple places in this module. We define
it here because it seems like a logical place. | Below is the the instruction that describes the task:
### Input:
Helper method to return all tokens in an extent.
This functionality is needed multiple places in this module. We define
it here because it seems like a logical place.
### Response:
def get_tokens(tu, extent):
"""Helper method to return all tokens in an extent.
This functionality is needed multiple places in this module. We define
it here because it seems like a logical place.
"""
tokens_memory = POINTER(Token)()
tokens_count = c_uint()
conf.lib.clang_tokenize(tu, extent, byref(tokens_memory),
byref(tokens_count))
count = int(tokens_count.value)
# If we get no tokens, no memory was allocated. Be sure not to return
# anything and potentially call a destructor on nothing.
if count < 1:
return
tokens_array = cast(tokens_memory, POINTER(Token * count)).contents
token_group = TokenGroup(tu, tokens_memory, tokens_count)
for i in xrange(0, count):
token = Token()
token.int_data = tokens_array[i].int_data
token.ptr_data = tokens_array[i].ptr_data
token._tu = tu
token._group = token_group
yield token |
def value_output(value, quote_method='all', none_handle='strict'):
'''
Format types:
'all'.
(default) everything is embraced with quotes
'needed'.
Quote only if needed. Values are on placed in quotes if:
a. the value contains a quote
b. there is whitespace at the beginning or end of string
'none'.
Quote nothing.
'''
p = str(value)
if value is None:
if none_handle=='strict':
return ""
elif none_handle=='empty':
return ' ""'
elif none_handle=='None':
p = "None"
else:
raise "none handler "+str(none_handle)+" not recognized"
if quote_method=='all':
return ' "'+p+'"'
elif quote_method=='by_need':
if len(p)!=len(p.strip()):
return ' "'+p+'"'
if '"' in p:
return ' "'+p+'"'
return " "+p
elif quote_method=='none':
return " "+p
else:
raise "quote method "+str(quote_method)+" not recognized"
return | Format types:
'all'.
(default) everything is embraced with quotes
'needed'.
Quote only if needed. Values are on placed in quotes if:
a. the value contains a quote
b. there is whitespace at the beginning or end of string
'none'.
Quote nothing. | Below is the the instruction that describes the task:
### Input:
Format types:
'all'.
(default) everything is embraced with quotes
'needed'.
Quote only if needed. Values are on placed in quotes if:
a. the value contains a quote
b. there is whitespace at the beginning or end of string
'none'.
Quote nothing.
### Response:
def value_output(value, quote_method='all', none_handle='strict'):
'''
Format types:
'all'.
(default) everything is embraced with quotes
'needed'.
Quote only if needed. Values are on placed in quotes if:
a. the value contains a quote
b. there is whitespace at the beginning or end of string
'none'.
Quote nothing.
'''
p = str(value)
if value is None:
if none_handle=='strict':
return ""
elif none_handle=='empty':
return ' ""'
elif none_handle=='None':
p = "None"
else:
raise "none handler "+str(none_handle)+" not recognized"
if quote_method=='all':
return ' "'+p+'"'
elif quote_method=='by_need':
if len(p)!=len(p.strip()):
return ' "'+p+'"'
if '"' in p:
return ' "'+p+'"'
return " "+p
elif quote_method=='none':
return " "+p
else:
raise "quote method "+str(quote_method)+" not recognized"
return |
def vlog(self, msg, *args):
"""Logs a message to stderr only if verbose is enabled."""
if self.verbose:
self.log(msg, *args) | Logs a message to stderr only if verbose is enabled. | Below is the the instruction that describes the task:
### Input:
Logs a message to stderr only if verbose is enabled.
### Response:
def vlog(self, msg, *args):
"""Logs a message to stderr only if verbose is enabled."""
if self.verbose:
self.log(msg, *args) |
def factory(db, collection, action):
"""
Instantiate trigger
:param db: db descriptor
:param collection: collection to be updated
:param action: ACTION_BEFORE_DELETE, ACTION_AFTER_DELETE, ACTION_BEFORE_UPSERT, ACTION_AFTER_DELETE
:return: trigger instance if trigger configured in triggers.json or None
"""
triggers_cfg = na3x_cfg[NA3X_TRIGGERS]
if (collection in triggers_cfg) and (action in triggers_cfg[collection]):
return obj_for_name(triggers_cfg[collection][action])(db, collection)
else:
return None | Instantiate trigger
:param db: db descriptor
:param collection: collection to be updated
:param action: ACTION_BEFORE_DELETE, ACTION_AFTER_DELETE, ACTION_BEFORE_UPSERT, ACTION_AFTER_DELETE
:return: trigger instance if trigger configured in triggers.json or None | Below is the the instruction that describes the task:
### Input:
Instantiate trigger
:param db: db descriptor
:param collection: collection to be updated
:param action: ACTION_BEFORE_DELETE, ACTION_AFTER_DELETE, ACTION_BEFORE_UPSERT, ACTION_AFTER_DELETE
:return: trigger instance if trigger configured in triggers.json or None
### Response:
def factory(db, collection, action):
"""
Instantiate trigger
:param db: db descriptor
:param collection: collection to be updated
:param action: ACTION_BEFORE_DELETE, ACTION_AFTER_DELETE, ACTION_BEFORE_UPSERT, ACTION_AFTER_DELETE
:return: trigger instance if trigger configured in triggers.json or None
"""
triggers_cfg = na3x_cfg[NA3X_TRIGGERS]
if (collection in triggers_cfg) and (action in triggers_cfg[collection]):
return obj_for_name(triggers_cfg[collection][action])(db, collection)
else:
return None |
def get_gdns_publisher(config, metrics, **kwargs):
"""Get a GDNSPublisher client.
A factory function that validates configuration and returns a
publisher client (:interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Cloud DNS API related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
publisher.
Returns:
A :class:`GDNSPublisher` instance.
"""
builder = gdns_publisher.GDNSPublisherBuilder(
config, metrics, **kwargs)
return builder.build_publisher() | Get a GDNSPublisher client.
A factory function that validates configuration and returns a
publisher client (:interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Cloud DNS API related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
publisher.
Returns:
A :class:`GDNSPublisher` instance. | Below is the the instruction that describes the task:
### Input:
Get a GDNSPublisher client.
A factory function that validates configuration and returns a
publisher client (:interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Cloud DNS API related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
publisher.
Returns:
A :class:`GDNSPublisher` instance.
### Response:
def get_gdns_publisher(config, metrics, **kwargs):
"""Get a GDNSPublisher client.
A factory function that validates configuration and returns a
publisher client (:interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Cloud DNS API related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
publisher.
Returns:
A :class:`GDNSPublisher` instance.
"""
builder = gdns_publisher.GDNSPublisherBuilder(
config, metrics, **kwargs)
return builder.build_publisher() |
def folders(self):
'''gets the property value for folders'''
if self._folders is None :
self.__init()
if self._folders is not None and isinstance(self._folders, list):
if len(self._folders) == 0:
self._loadFolders()
return self._folders | gets the property value for folders | Below is the the instruction that describes the task:
### Input:
gets the property value for folders
### Response:
def folders(self):
'''gets the property value for folders'''
if self._folders is None :
self.__init()
if self._folders is not None and isinstance(self._folders, list):
if len(self._folders) == 0:
self._loadFolders()
return self._folders |
def wait(
self, timeout: Union[int, float] = None, safe: bool = False
) -> List[Union[Any, Exception]]:
"""
Call :py:meth:`~Process.wait()` on all the Processes in this list.
:param timeout:
Same as :py:meth:`~Process.wait()`.
This parameter controls the timeout for all the Processes combined,
not a single :py:meth:`~Process.wait()` call.
:param safe:
Suppress any errors that occur while waiting for a Process.
The return value of failed :py:meth:`~Process.wait()` calls are substituted with the ``Exception`` that occurred.
:return:
A ``list`` containing the values returned by child Processes of this Context.
"""
if safe:
_wait = self._wait_or_catch_exc
else:
_wait = Process.wait
if timeout is None:
return [_wait(process) for process in self]
else:
final = time.time() + timeout
return [_wait(process, final - time.time()) for process in self] | Call :py:meth:`~Process.wait()` on all the Processes in this list.
:param timeout:
Same as :py:meth:`~Process.wait()`.
This parameter controls the timeout for all the Processes combined,
not a single :py:meth:`~Process.wait()` call.
:param safe:
Suppress any errors that occur while waiting for a Process.
The return value of failed :py:meth:`~Process.wait()` calls are substituted with the ``Exception`` that occurred.
:return:
A ``list`` containing the values returned by child Processes of this Context. | Below is the the instruction that describes the task:
### Input:
Call :py:meth:`~Process.wait()` on all the Processes in this list.
:param timeout:
Same as :py:meth:`~Process.wait()`.
This parameter controls the timeout for all the Processes combined,
not a single :py:meth:`~Process.wait()` call.
:param safe:
Suppress any errors that occur while waiting for a Process.
The return value of failed :py:meth:`~Process.wait()` calls are substituted with the ``Exception`` that occurred.
:return:
A ``list`` containing the values returned by child Processes of this Context.
### Response:
def wait(
self, timeout: Union[int, float] = None, safe: bool = False
) -> List[Union[Any, Exception]]:
"""
Call :py:meth:`~Process.wait()` on all the Processes in this list.
:param timeout:
Same as :py:meth:`~Process.wait()`.
This parameter controls the timeout for all the Processes combined,
not a single :py:meth:`~Process.wait()` call.
:param safe:
Suppress any errors that occur while waiting for a Process.
The return value of failed :py:meth:`~Process.wait()` calls are substituted with the ``Exception`` that occurred.
:return:
A ``list`` containing the values returned by child Processes of this Context.
"""
if safe:
_wait = self._wait_or_catch_exc
else:
_wait = Process.wait
if timeout is None:
return [_wait(process) for process in self]
else:
final = time.time() + timeout
return [_wait(process, final - time.time()) for process in self] |
def get_safe_redirect_target(arg='next'):
"""Get URL to redirect to and ensure that it is local.
:param arg: URL argument.
:returns: The redirect target or ``None``.
"""
for target in request.args.get(arg), request.referrer:
if target:
redirect_uri = urisplit(target)
allowed_hosts = current_app.config.get('APP_ALLOWED_HOSTS', [])
if redirect_uri.host in allowed_hosts:
return target
elif redirect_uri.path:
return uricompose(
path=redirect_uri.path,
query=redirect_uri.query,
fragment=redirect_uri.fragment
)
return None | Get URL to redirect to and ensure that it is local.
:param arg: URL argument.
:returns: The redirect target or ``None``. | Below is the the instruction that describes the task:
### Input:
Get URL to redirect to and ensure that it is local.
:param arg: URL argument.
:returns: The redirect target or ``None``.
### Response:
def get_safe_redirect_target(arg='next'):
"""Get URL to redirect to and ensure that it is local.
:param arg: URL argument.
:returns: The redirect target or ``None``.
"""
for target in request.args.get(arg), request.referrer:
if target:
redirect_uri = urisplit(target)
allowed_hosts = current_app.config.get('APP_ALLOWED_HOSTS', [])
if redirect_uri.host in allowed_hosts:
return target
elif redirect_uri.path:
return uricompose(
path=redirect_uri.path,
query=redirect_uri.query,
fragment=redirect_uri.fragment
)
return None |
def _setup_logger(self, level, log_file):
"""Setup log level and log file if set"""
if logger.handlers:
return
level = getattr(logging, level.upper())
logger.setLevel(level)
formatter = logging.Formatter(
'[%(levelname)s] %(asctime)s - %(module)s.%(funcName)s() - %(message)s')
handler = logging.StreamHandler()
logger.addHandler(handler)
handler.setFormatter(formatter)
if not log_file:
return
try:
handler = TimedRotatingFileHandler(log_file)
except IOError:
logger.error("Could not write to %s, falling back to stdout",
log_file)
else:
logger.addHandler(handler)
handler.setFormatter(formatter) | Setup log level and log file if set | Below is the the instruction that describes the task:
### Input:
Setup log level and log file if set
### Response:
def _setup_logger(self, level, log_file):
"""Setup log level and log file if set"""
if logger.handlers:
return
level = getattr(logging, level.upper())
logger.setLevel(level)
formatter = logging.Formatter(
'[%(levelname)s] %(asctime)s - %(module)s.%(funcName)s() - %(message)s')
handler = logging.StreamHandler()
logger.addHandler(handler)
handler.setFormatter(formatter)
if not log_file:
return
try:
handler = TimedRotatingFileHandler(log_file)
except IOError:
logger.error("Could not write to %s, falling back to stdout",
log_file)
else:
logger.addHandler(handler)
handler.setFormatter(formatter) |
def _cache_dataset(dataset, prefix):
"""Cache the processed npy dataset the dataset into a npz
Parameters
----------
dataset : SimpleDataset
file_path : str
"""
if not os.path.exists(_constants.CACHE_PATH):
os.makedirs(_constants.CACHE_PATH)
src_data = np.concatenate([e[0] for e in dataset])
tgt_data = np.concatenate([e[1] for e in dataset])
src_cumlen = np.cumsum([0]+[len(e[0]) for e in dataset])
tgt_cumlen = np.cumsum([0]+[len(e[1]) for e in dataset])
np.savez(os.path.join(_constants.CACHE_PATH, prefix + '.npz'),
src_data=src_data, tgt_data=tgt_data,
src_cumlen=src_cumlen, tgt_cumlen=tgt_cumlen) | Cache the processed npy dataset the dataset into a npz
Parameters
----------
dataset : SimpleDataset
file_path : str | Below is the the instruction that describes the task:
### Input:
Cache the processed npy dataset the dataset into a npz
Parameters
----------
dataset : SimpleDataset
file_path : str
### Response:
def _cache_dataset(dataset, prefix):
"""Cache the processed npy dataset the dataset into a npz
Parameters
----------
dataset : SimpleDataset
file_path : str
"""
if not os.path.exists(_constants.CACHE_PATH):
os.makedirs(_constants.CACHE_PATH)
src_data = np.concatenate([e[0] for e in dataset])
tgt_data = np.concatenate([e[1] for e in dataset])
src_cumlen = np.cumsum([0]+[len(e[0]) for e in dataset])
tgt_cumlen = np.cumsum([0]+[len(e[1]) for e in dataset])
np.savez(os.path.join(_constants.CACHE_PATH, prefix + '.npz'),
src_data=src_data, tgt_data=tgt_data,
src_cumlen=src_cumlen, tgt_cumlen=tgt_cumlen) |
def instantiate_tasks(self):
""" All loaded tasks are initialized. Depending on configuration fails in such instantiations may be silent """
self.tasks_instances = {}
for task_name, task_class in self.tasks_classes.items():
try:
self.tasks_instances[task_name] = task_class()
except Exception as ex:
if not self.configuration[Configuration.ALGORITHM][Configuration.IOSF]:
raise GOSTaskException("An exception happened during the task instantiation."
"{exception}".format(exception=ex)) | All loaded tasks are initialized. Depending on configuration fails in such instantiations may be silent | Below is the the instruction that describes the task:
### Input:
All loaded tasks are initialized. Depending on configuration fails in such instantiations may be silent
### Response:
def instantiate_tasks(self):
""" All loaded tasks are initialized. Depending on configuration fails in such instantiations may be silent """
self.tasks_instances = {}
for task_name, task_class in self.tasks_classes.items():
try:
self.tasks_instances[task_name] = task_class()
except Exception as ex:
if not self.configuration[Configuration.ALGORITHM][Configuration.IOSF]:
raise GOSTaskException("An exception happened during the task instantiation."
"{exception}".format(exception=ex)) |
def get_references(chebi_ids):
'''Returns references'''
references = []
chebi_ids = [str(chebi_id) for chebi_id in chebi_ids]
filename = get_file('reference.tsv.gz')
with io.open(filename, 'r', encoding='cp1252') as textfile:
next(textfile)
for line in textfile:
tokens = line.strip().split('\t')
if tokens[0] in chebi_ids:
# Append Reference:
if len(tokens) > 3:
ref = Reference(tokens[1], tokens[2], tokens[3],
tokens[4])
else:
ref = Reference(tokens[1], tokens[2])
references.append(ref)
return references | Returns references | Below is the the instruction that describes the task:
### Input:
Returns references
### Response:
def get_references(chebi_ids):
'''Returns references'''
references = []
chebi_ids = [str(chebi_id) for chebi_id in chebi_ids]
filename = get_file('reference.tsv.gz')
with io.open(filename, 'r', encoding='cp1252') as textfile:
next(textfile)
for line in textfile:
tokens = line.strip().split('\t')
if tokens[0] in chebi_ids:
# Append Reference:
if len(tokens) > 3:
ref = Reference(tokens[1], tokens[2], tokens[3],
tokens[4])
else:
ref = Reference(tokens[1], tokens[2])
references.append(ref)
return references |
def _update_versions():
"""Update :attr:`_versions` with the registered plotter methods"""
for pm_name in plot._plot_methods:
pm = getattr(plot, pm_name)
plugin = pm._plugin
if (plugin is not None and plugin not in _versions and
pm.module in sys.modules):
_versions.update(get_versions(key=lambda s: s == plugin)) | Update :attr:`_versions` with the registered plotter methods | Below is the the instruction that describes the task:
### Input:
Update :attr:`_versions` with the registered plotter methods
### Response:
def _update_versions():
"""Update :attr:`_versions` with the registered plotter methods"""
for pm_name in plot._plot_methods:
pm = getattr(plot, pm_name)
plugin = pm._plugin
if (plugin is not None and plugin not in _versions and
pm.module in sys.modules):
_versions.update(get_versions(key=lambda s: s == plugin)) |
def filter_records(root, head, update, filters=()):
"""Apply the filters to the records."""
root, head, update = freeze(root), freeze(head), freeze(update)
for filter_ in filters:
root, head, update = filter_(root, head, update)
return thaw(root), thaw(head), thaw(update) | Apply the filters to the records. | Below is the the instruction that describes the task:
### Input:
Apply the filters to the records.
### Response:
def filter_records(root, head, update, filters=()):
"""Apply the filters to the records."""
root, head, update = freeze(root), freeze(head), freeze(update)
for filter_ in filters:
root, head, update = filter_(root, head, update)
return thaw(root), thaw(head), thaw(update) |
def create_from_request_pdu(pdu):
""" Create instance from request PDU.
:param pdu: A response PDU.
"""
_, address, value = \
struct.unpack('>BH' + conf.MULTI_BIT_VALUE_FORMAT_CHARACTER, pdu)
instance = WriteSingleRegister()
instance.address = address
instance.value = value
return instance | Create instance from request PDU.
:param pdu: A response PDU. | Below is the the instruction that describes the task:
### Input:
Create instance from request PDU.
:param pdu: A response PDU.
### Response:
def create_from_request_pdu(pdu):
""" Create instance from request PDU.
:param pdu: A response PDU.
"""
_, address, value = \
struct.unpack('>BH' + conf.MULTI_BIT_VALUE_FORMAT_CHARACTER, pdu)
instance = WriteSingleRegister()
instance.address = address
instance.value = value
return instance |
def localization_diff(localizable_file, translated_file, excluded_strings_file, output_translation_file):
""" Generates a strings file representing the strings that were yet to be translated.
Args:
localizable_file (str): The path to the localization strings file, meaning the file that represents the strings
that require translation.
translated_file (str): The path to the translated strings file, meaning the file containing the strings that
were already translated.
excluded_strings_file (str): The path to a file that contains all the strings we want to exclude from this and
from future diffs.
output_translation_file (str): The path to the output file, which will contain the strings the require
translation, but are not in the already given translation file.
"""
old_translated_file_dictionary = generate_localization_key_to_entry_dictionary_from_file(translated_file)
if excluded_strings_file is not None and os.path.isfile(excluded_strings_file):
excluded_file_dictionary = generate_localization_key_to_entry_dictionary_from_file(excluded_strings_file)
else:
excluded_file_dictionary = {}
# The reason we keep a list of the keys, and not just pop is because values can repeat themselves.
translated_list = old_translated_file_dictionary.keys()
output_dictionary = {}
output_file_elements = []
f = open_strings_file(localizable_file, "r")
output_file_elements.append(Comment(u"""
/**
* This file contains all the strings that were extracted from our app and that need to be translated.
* Each entry may or may not have a comment explaining context, and a "key" = "%s" equation.
* To localize, you need to fill the right side of the equation with the translation of the left side.
* Please keep special expressions such as '%%@' or '%%1$@' as is. Usually the comment will explain their context.
*/
""" % (VALUE_PLACEHOLDER,)))
for _header_comment, comments, key, value in extract_header_comment_key_value_tuples_from_file(f):
if key in translated_list or key in excluded_file_dictionary:
if key in old_translated_file_dictionary:
old_translated_file_dictionary.pop(key)
elif value in output_dictionary:
output_dictionary[value].add_comments(comments)
output_file_elements.append(Comment(
u"/* There was a value '%s' here but it was a duplicate of an older value and removed. */\n" % value))
else:
loc_obj = LocalizationEntry(comments, value, VALUE_PLACEHOLDER)
output_dictionary[value] = loc_obj
output_file_elements.append(loc_obj)
for key, removed_trans in old_translated_file_dictionary.items():
output_file_elements.append(Comment(u"""
/*
* Entry removed from previous translation file:
* %s
* "%s" = "%s";
*/
""" % (", ".join(removed_trans.comments), removed_trans.key, removed_trans.value)))
write_file_elements_to_strings_file(output_translation_file, output_file_elements) | Generates a strings file representing the strings that were yet to be translated.
Args:
localizable_file (str): The path to the localization strings file, meaning the file that represents the strings
that require translation.
translated_file (str): The path to the translated strings file, meaning the file containing the strings that
were already translated.
excluded_strings_file (str): The path to a file that contains all the strings we want to exclude from this and
from future diffs.
output_translation_file (str): The path to the output file, which will contain the strings the require
translation, but are not in the already given translation file. | Below is the the instruction that describes the task:
### Input:
Generates a strings file representing the strings that were yet to be translated.
Args:
localizable_file (str): The path to the localization strings file, meaning the file that represents the strings
that require translation.
translated_file (str): The path to the translated strings file, meaning the file containing the strings that
were already translated.
excluded_strings_file (str): The path to a file that contains all the strings we want to exclude from this and
from future diffs.
output_translation_file (str): The path to the output file, which will contain the strings the require
translation, but are not in the already given translation file.
### Response:
def localization_diff(localizable_file, translated_file, excluded_strings_file, output_translation_file):
""" Generates a strings file representing the strings that were yet to be translated.
Args:
localizable_file (str): The path to the localization strings file, meaning the file that represents the strings
that require translation.
translated_file (str): The path to the translated strings file, meaning the file containing the strings that
were already translated.
excluded_strings_file (str): The path to a file that contains all the strings we want to exclude from this and
from future diffs.
output_translation_file (str): The path to the output file, which will contain the strings the require
translation, but are not in the already given translation file.
"""
old_translated_file_dictionary = generate_localization_key_to_entry_dictionary_from_file(translated_file)
if excluded_strings_file is not None and os.path.isfile(excluded_strings_file):
excluded_file_dictionary = generate_localization_key_to_entry_dictionary_from_file(excluded_strings_file)
else:
excluded_file_dictionary = {}
# The reason we keep a list of the keys, and not just pop is because values can repeat themselves.
translated_list = old_translated_file_dictionary.keys()
output_dictionary = {}
output_file_elements = []
f = open_strings_file(localizable_file, "r")
output_file_elements.append(Comment(u"""
/**
* This file contains all the strings that were extracted from our app and that need to be translated.
* Each entry may or may not have a comment explaining context, and a "key" = "%s" equation.
* To localize, you need to fill the right side of the equation with the translation of the left side.
* Please keep special expressions such as '%%@' or '%%1$@' as is. Usually the comment will explain their context.
*/
""" % (VALUE_PLACEHOLDER,)))
for _header_comment, comments, key, value in extract_header_comment_key_value_tuples_from_file(f):
if key in translated_list or key in excluded_file_dictionary:
if key in old_translated_file_dictionary:
old_translated_file_dictionary.pop(key)
elif value in output_dictionary:
output_dictionary[value].add_comments(comments)
output_file_elements.append(Comment(
u"/* There was a value '%s' here but it was a duplicate of an older value and removed. */\n" % value))
else:
loc_obj = LocalizationEntry(comments, value, VALUE_PLACEHOLDER)
output_dictionary[value] = loc_obj
output_file_elements.append(loc_obj)
for key, removed_trans in old_translated_file_dictionary.items():
output_file_elements.append(Comment(u"""
/*
* Entry removed from previous translation file:
* %s
* "%s" = "%s";
*/
""" % (", ".join(removed_trans.comments), removed_trans.key, removed_trans.value)))
write_file_elements_to_strings_file(output_translation_file, output_file_elements) |
def _InitApiApprovalFromAff4Object(api_approval, approval_obj):
"""Initializes Api(Client|Hunt|CronJob)Approval from an AFF4 object."""
api_approval.id = approval_obj.urn.Basename()
api_approval.reason = approval_obj.Get(approval_obj.Schema.REASON)
api_approval.requestor = approval_obj.Get(approval_obj.Schema.REQUESTOR)
# We should check the approval validity from the standpoint of the user
# who had requested it.
test_token = access_control.ACLToken(
username=approval_obj.Get(approval_obj.Schema.REQUESTOR))
try:
approval_obj.CheckAccess(test_token)
api_approval.is_valid = True
except access_control.UnauthorizedAccess as e:
api_approval.is_valid = False
api_approval.is_valid_message = utils.SmartStr(e)
notified_users = approval_obj.Get(approval_obj.Schema.NOTIFIED_USERS)
if notified_users:
api_approval.notified_users = sorted(
u.strip() for u in notified_users.split(","))
api_approval.email_message_id = approval_obj.Get(
approval_obj.Schema.EMAIL_MSG_ID)
email_cc = approval_obj.Get(approval_obj.Schema.EMAIL_CC)
email_cc_addresses = sorted(s.strip() for s in email_cc.split(","))
api_approval.email_cc_addresses = (
set(email_cc_addresses) - set(api_approval.notified_users))
api_approval.approvers = sorted(approval_obj.GetNonExpiredApprovers())
return api_approval | Initializes Api(Client|Hunt|CronJob)Approval from an AFF4 object. | Below is the the instruction that describes the task:
### Input:
Initializes Api(Client|Hunt|CronJob)Approval from an AFF4 object.
### Response:
def _InitApiApprovalFromAff4Object(api_approval, approval_obj):
"""Initializes Api(Client|Hunt|CronJob)Approval from an AFF4 object."""
api_approval.id = approval_obj.urn.Basename()
api_approval.reason = approval_obj.Get(approval_obj.Schema.REASON)
api_approval.requestor = approval_obj.Get(approval_obj.Schema.REQUESTOR)
# We should check the approval validity from the standpoint of the user
# who had requested it.
test_token = access_control.ACLToken(
username=approval_obj.Get(approval_obj.Schema.REQUESTOR))
try:
approval_obj.CheckAccess(test_token)
api_approval.is_valid = True
except access_control.UnauthorizedAccess as e:
api_approval.is_valid = False
api_approval.is_valid_message = utils.SmartStr(e)
notified_users = approval_obj.Get(approval_obj.Schema.NOTIFIED_USERS)
if notified_users:
api_approval.notified_users = sorted(
u.strip() for u in notified_users.split(","))
api_approval.email_message_id = approval_obj.Get(
approval_obj.Schema.EMAIL_MSG_ID)
email_cc = approval_obj.Get(approval_obj.Schema.EMAIL_CC)
email_cc_addresses = sorted(s.strip() for s in email_cc.split(","))
api_approval.email_cc_addresses = (
set(email_cc_addresses) - set(api_approval.notified_users))
api_approval.approvers = sorted(approval_obj.GetNonExpiredApprovers())
return api_approval |
def fix_orientation(image):
""" adapted from https://stackoverflow.com/a/30462851/318857
Apply Image.transpose to ensure 0th row of pixels is at the visual
top of the image, and 0th column is the visual left-hand side.
Return the original image if unable to determine the orientation.
As per CIPA DC-008-2012, the orientation field contains an integer,
1 through 8. Other values are reserved.
"""
exif_orientation_tag = 0x0112
exif_transpose_sequences = [
[],
[],
[PIL.Image.FLIP_LEFT_RIGHT],
[PIL.Image.ROTATE_180],
[PIL.Image.FLIP_TOP_BOTTOM],
[PIL.Image.FLIP_LEFT_RIGHT, PIL.Image.ROTATE_90],
[PIL.Image.ROTATE_270],
[PIL.Image.FLIP_TOP_BOTTOM, PIL.Image.ROTATE_90],
[PIL.Image.ROTATE_90],
]
try:
# pylint:disable=protected-access
orientation = image._getexif()[exif_orientation_tag]
sequence = exif_transpose_sequences[orientation]
return functools.reduce(type(image).transpose, sequence, image)
except (TypeError, AttributeError, KeyError):
# either no EXIF tags or no orientation tag
pass
return image | adapted from https://stackoverflow.com/a/30462851/318857
Apply Image.transpose to ensure 0th row of pixels is at the visual
top of the image, and 0th column is the visual left-hand side.
Return the original image if unable to determine the orientation.
As per CIPA DC-008-2012, the orientation field contains an integer,
1 through 8. Other values are reserved. | Below is the the instruction that describes the task:
### Input:
adapted from https://stackoverflow.com/a/30462851/318857
Apply Image.transpose to ensure 0th row of pixels is at the visual
top of the image, and 0th column is the visual left-hand side.
Return the original image if unable to determine the orientation.
As per CIPA DC-008-2012, the orientation field contains an integer,
1 through 8. Other values are reserved.
### Response:
def fix_orientation(image):
""" adapted from https://stackoverflow.com/a/30462851/318857
Apply Image.transpose to ensure 0th row of pixels is at the visual
top of the image, and 0th column is the visual left-hand side.
Return the original image if unable to determine the orientation.
As per CIPA DC-008-2012, the orientation field contains an integer,
1 through 8. Other values are reserved.
"""
exif_orientation_tag = 0x0112
exif_transpose_sequences = [
[],
[],
[PIL.Image.FLIP_LEFT_RIGHT],
[PIL.Image.ROTATE_180],
[PIL.Image.FLIP_TOP_BOTTOM],
[PIL.Image.FLIP_LEFT_RIGHT, PIL.Image.ROTATE_90],
[PIL.Image.ROTATE_270],
[PIL.Image.FLIP_TOP_BOTTOM, PIL.Image.ROTATE_90],
[PIL.Image.ROTATE_90],
]
try:
# pylint:disable=protected-access
orientation = image._getexif()[exif_orientation_tag]
sequence = exif_transpose_sequences[orientation]
return functools.reduce(type(image).transpose, sequence, image)
except (TypeError, AttributeError, KeyError):
# either no EXIF tags or no orientation tag
pass
return image |
def create_tar_file(self, full_archive=False):
"""
Create tar file to be compressed
"""
tar_file_name = os.path.join(self.archive_tmp_dir, self.archive_name)
ext = "" if self.compressor == "none" else ".%s" % self.compressor
tar_file_name = tar_file_name + ".tar" + ext
logger.debug("Tar File: " + tar_file_name)
subprocess.call(shlex.split("tar c%sfS %s -C %s ." % (
self.get_compression_flag(self.compressor),
tar_file_name,
# for the docker "uber archive,"use archive_dir
# rather than tmp_dir for all the files we tar,
# because all the individual archives are in there
self.tmp_dir if not full_archive else self.archive_dir)),
stderr=subprocess.PIPE)
self.delete_archive_dir()
logger.debug("Tar File Size: %s", str(os.path.getsize(tar_file_name)))
return tar_file_name | Create tar file to be compressed | Below is the the instruction that describes the task:
### Input:
Create tar file to be compressed
### Response:
def create_tar_file(self, full_archive=False):
"""
Create tar file to be compressed
"""
tar_file_name = os.path.join(self.archive_tmp_dir, self.archive_name)
ext = "" if self.compressor == "none" else ".%s" % self.compressor
tar_file_name = tar_file_name + ".tar" + ext
logger.debug("Tar File: " + tar_file_name)
subprocess.call(shlex.split("tar c%sfS %s -C %s ." % (
self.get_compression_flag(self.compressor),
tar_file_name,
# for the docker "uber archive,"use archive_dir
# rather than tmp_dir for all the files we tar,
# because all the individual archives are in there
self.tmp_dir if not full_archive else self.archive_dir)),
stderr=subprocess.PIPE)
self.delete_archive_dir()
logger.debug("Tar File Size: %s", str(os.path.getsize(tar_file_name)))
return tar_file_name |
def __get_lookup(in_fn, selected_type=None):
"""Determine which lookup func to use based on inpt files and type option."""
lookup_func = None
if selected_type is not None:
lookup_func = get_lookup_by_filetype(selected_type)
else:
extension = os.path.splitext(in_fn)[1]
lookup_func = get_lookup_by_file_extension(extension)
assert(lookup_func is not None)
return lookup_func | Determine which lookup func to use based on inpt files and type option. | Below is the the instruction that describes the task:
### Input:
Determine which lookup func to use based on inpt files and type option.
### Response:
def __get_lookup(in_fn, selected_type=None):
"""Determine which lookup func to use based on inpt files and type option."""
lookup_func = None
if selected_type is not None:
lookup_func = get_lookup_by_filetype(selected_type)
else:
extension = os.path.splitext(in_fn)[1]
lookup_func = get_lookup_by_file_extension(extension)
assert(lookup_func is not None)
return lookup_func |
def is_different(old_value, new_value):
"""Numpy aware comparison between two values."""
if opt.has_numpy:
return not opt.np.array_equal(old_value, new_value)
else:
return old_value != new_value | Numpy aware comparison between two values. | Below is the the instruction that describes the task:
### Input:
Numpy aware comparison between two values.
### Response:
def is_different(old_value, new_value):
"""Numpy aware comparison between two values."""
if opt.has_numpy:
return not opt.np.array_equal(old_value, new_value)
else:
return old_value != new_value |
def push(self, item):
'''Push the value item onto the heap, maintaining the heap invariant.
If the item is not hashable, a TypeError is raised.
'''
hash(item)
heapq.heappush(self._items, item) | Push the value item onto the heap, maintaining the heap invariant.
If the item is not hashable, a TypeError is raised. | Below is the the instruction that describes the task:
### Input:
Push the value item onto the heap, maintaining the heap invariant.
If the item is not hashable, a TypeError is raised.
### Response:
def push(self, item):
'''Push the value item onto the heap, maintaining the heap invariant.
If the item is not hashable, a TypeError is raised.
'''
hash(item)
heapq.heappush(self._items, item) |
def pickTextBackgroundColor(self):
"""
Prompts the user to select a text color.
"""
clr = QColorDialog.getColor(self.textBackgroundColor(),
self.window(),
'Pick Background Color')
if clr.isValid():
self.setTextBackgroundColor(clr) | Prompts the user to select a text color. | Below is the the instruction that describes the task:
### Input:
Prompts the user to select a text color.
### Response:
def pickTextBackgroundColor(self):
"""
Prompts the user to select a text color.
"""
clr = QColorDialog.getColor(self.textBackgroundColor(),
self.window(),
'Pick Background Color')
if clr.isValid():
self.setTextBackgroundColor(clr) |
def sent2vec(self, words, transformer):
"""
Used with sqrt kernel
:param words:
:param transformer:
:return:
"""
sent_vec = np.zeros(transformer.vector_size)
numw = 0
for w in words:
try:
sent_vec = np.add(sent_vec, transformer.wv[w])
numw += 1
except:
continue
return sent_vec / np.sqrt(sent_vec.dot(sent_vec)) | Used with sqrt kernel
:param words:
:param transformer:
:return: | Below is the the instruction that describes the task:
### Input:
Used with sqrt kernel
:param words:
:param transformer:
:return:
### Response:
def sent2vec(self, words, transformer):
"""
Used with sqrt kernel
:param words:
:param transformer:
:return:
"""
sent_vec = np.zeros(transformer.vector_size)
numw = 0
for w in words:
try:
sent_vec = np.add(sent_vec, transformer.wv[w])
numw += 1
except:
continue
return sent_vec / np.sqrt(sent_vec.dot(sent_vec)) |
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
# --- begin patch ---
collector = PolymorphicAwareNestedObjects(using=using)
# --- end patch ---
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
model_count = {model._meta.verbose_name_plural: len(objs) for model, objs in collector.model_objs.items()}
return to_delete, model_count, perms_needed, protected | Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter. | Below is the the instruction that describes the task:
### Input:
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
### Response:
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
# --- begin patch ---
collector = PolymorphicAwareNestedObjects(using=using)
# --- end patch ---
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
model_count = {model._meta.verbose_name_plural: len(objs) for model, objs in collector.model_objs.items()}
return to_delete, model_count, perms_needed, protected |
def _ip_string_from_prefix(self, prefixlen=None):
"""Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string.
"""
if not prefixlen:
prefixlen = self._prefixlen
return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen)) | Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string. | Below is the the instruction that describes the task:
### Input:
Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string.
### Response:
def _ip_string_from_prefix(self, prefixlen=None):
"""Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string.
"""
if not prefixlen:
prefixlen = self._prefixlen
return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen)) |
def show_input_endpoint(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Show an input endpoint associated with the deployment
CLI Example:
.. code-block:: bash
salt-cloud -f show_input_endpoint my-azure service=myservice \\
deployment=mydeployment name=SSH
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_input_endpoint function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('An endpoint name must be specified as "name"')
data = list_input_endpoints(kwargs=kwargs, call='function')
return data.get(kwargs['name'], None) | .. versionadded:: 2015.8.0
Show an input endpoint associated with the deployment
CLI Example:
.. code-block:: bash
salt-cloud -f show_input_endpoint my-azure service=myservice \\
deployment=mydeployment name=SSH | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2015.8.0
Show an input endpoint associated with the deployment
CLI Example:
.. code-block:: bash
salt-cloud -f show_input_endpoint my-azure service=myservice \\
deployment=mydeployment name=SSH
### Response:
def show_input_endpoint(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Show an input endpoint associated with the deployment
CLI Example:
.. code-block:: bash
salt-cloud -f show_input_endpoint my-azure service=myservice \\
deployment=mydeployment name=SSH
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_input_endpoint function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('An endpoint name must be specified as "name"')
data = list_input_endpoints(kwargs=kwargs, call='function')
return data.get(kwargs['name'], None) |
def map(self, key_pattern, func, all_args, timeout=None):
'''Cache return value of multiple calls.
Args:
key_pattern (str): the key pattern to use for generating
keys for caches of the decorated function.
func (function): the function to call.
all_args (list): a list of args to be used to make calls to
the function.
timeout (int): the cache timeout
Returns:
A list of the return values of the calls.
Example::
def add(a, b):
return a + b
cache.map(key_pat, add, [(1, 2), (3, 4)]) == [3, 7]
'''
results = []
keys = [
make_key(key_pattern, func, args, {})
for args in all_args
]
cached = dict(zip(keys, self.get_many(keys)))
cache_to_add = {}
for key, args in zip(keys, all_args):
val = cached[key]
if val is None:
val = func(*args)
cache_to_add[key] = val if val is not None else NONE_RESULT
if val == NONE_RESULT:
val = None
results.append(val)
if cache_to_add:
self.set_many(cache_to_add, timeout)
return results | Cache return value of multiple calls.
Args:
key_pattern (str): the key pattern to use for generating
keys for caches of the decorated function.
func (function): the function to call.
all_args (list): a list of args to be used to make calls to
the function.
timeout (int): the cache timeout
Returns:
A list of the return values of the calls.
Example::
def add(a, b):
return a + b
cache.map(key_pat, add, [(1, 2), (3, 4)]) == [3, 7] | Below is the the instruction that describes the task:
### Input:
Cache return value of multiple calls.
Args:
key_pattern (str): the key pattern to use for generating
keys for caches of the decorated function.
func (function): the function to call.
all_args (list): a list of args to be used to make calls to
the function.
timeout (int): the cache timeout
Returns:
A list of the return values of the calls.
Example::
def add(a, b):
return a + b
cache.map(key_pat, add, [(1, 2), (3, 4)]) == [3, 7]
### Response:
def map(self, key_pattern, func, all_args, timeout=None):
'''Cache return value of multiple calls.
Args:
key_pattern (str): the key pattern to use for generating
keys for caches of the decorated function.
func (function): the function to call.
all_args (list): a list of args to be used to make calls to
the function.
timeout (int): the cache timeout
Returns:
A list of the return values of the calls.
Example::
def add(a, b):
return a + b
cache.map(key_pat, add, [(1, 2), (3, 4)]) == [3, 7]
'''
results = []
keys = [
make_key(key_pattern, func, args, {})
for args in all_args
]
cached = dict(zip(keys, self.get_many(keys)))
cache_to_add = {}
for key, args in zip(keys, all_args):
val = cached[key]
if val is None:
val = func(*args)
cache_to_add[key] = val if val is not None else NONE_RESULT
if val == NONE_RESULT:
val = None
results.append(val)
if cache_to_add:
self.set_many(cache_to_add, timeout)
return results |
def NormalizeRelativePath(path):
"""Normalize camelCase entries in path."""
path_components = path.split('/')
normalized_components = []
for component in path_components:
if re.match(r'{[A-Za-z0-9_]+}$', component):
normalized_components.append(
'{%s}' % Names.CleanName(component[1:-1]))
else:
normalized_components.append(component)
return '/'.join(normalized_components) | Normalize camelCase entries in path. | Below is the the instruction that describes the task:
### Input:
Normalize camelCase entries in path.
### Response:
def NormalizeRelativePath(path):
"""Normalize camelCase entries in path."""
path_components = path.split('/')
normalized_components = []
for component in path_components:
if re.match(r'{[A-Za-z0-9_]+}$', component):
normalized_components.append(
'{%s}' % Names.CleanName(component[1:-1]))
else:
normalized_components.append(component)
return '/'.join(normalized_components) |
def fast_distance(r1: 'Region', r2: 'Region'):
""" A quicker way of calculating approximate distance. Lower accuracy but faster results."""
return abs(r1.x - r2.x) + abs(r1.y - r2.y) | A quicker way of calculating approximate distance. Lower accuracy but faster results. | Below is the the instruction that describes the task:
### Input:
A quicker way of calculating approximate distance. Lower accuracy but faster results.
### Response:
def fast_distance(r1: 'Region', r2: 'Region'):
""" A quicker way of calculating approximate distance. Lower accuracy but faster results."""
return abs(r1.x - r2.x) + abs(r1.y - r2.y) |
def load_extension(self, module_str):
"""Load an IPython extension by its module name.
If :func:`load_ipython_extension` returns anything, this function
will return that object.
"""
from IPython.utils.syspathcontext import prepended_to_syspath
if module_str not in sys.modules:
with prepended_to_syspath(self.ipython_extension_dir):
__import__(module_str)
mod = sys.modules[module_str]
return self._call_load_ipython_extension(mod) | Load an IPython extension by its module name.
If :func:`load_ipython_extension` returns anything, this function
will return that object. | Below is the the instruction that describes the task:
### Input:
Load an IPython extension by its module name.
If :func:`load_ipython_extension` returns anything, this function
will return that object.
### Response:
def load_extension(self, module_str):
"""Load an IPython extension by its module name.
If :func:`load_ipython_extension` returns anything, this function
will return that object.
"""
from IPython.utils.syspathcontext import prepended_to_syspath
if module_str not in sys.modules:
with prepended_to_syspath(self.ipython_extension_dir):
__import__(module_str)
mod = sys.modules[module_str]
return self._call_load_ipython_extension(mod) |
def _redis_process_checkpoint(host, port):
'''this helper method checks if
redis server is available in the sys
if not fires up one
'''
try:
subprocess.check_output("pgrep redis", shell=True)
except Exception:
logger.warning(
'Your redis server is offline, fake2db will try to launch it now!',
extra=extra_information)
# close_fds = True argument is the flag that is responsible
# for Popen to launch the process completely independent
subprocess.Popen("redis-server --bind %s --port %s" % (host, port),
close_fds=True,
shell=True)
time.sleep(3) | this helper method checks if
redis server is available in the sys
if not fires up one | Below is the the instruction that describes the task:
### Input:
this helper method checks if
redis server is available in the sys
if not fires up one
### Response:
def _redis_process_checkpoint(host, port):
'''this helper method checks if
redis server is available in the sys
if not fires up one
'''
try:
subprocess.check_output("pgrep redis", shell=True)
except Exception:
logger.warning(
'Your redis server is offline, fake2db will try to launch it now!',
extra=extra_information)
# close_fds = True argument is the flag that is responsible
# for Popen to launch the process completely independent
subprocess.Popen("redis-server --bind %s --port %s" % (host, port),
close_fds=True,
shell=True)
time.sleep(3) |
def select(*cases):
"""
Select the first case that becomes ready.
If a default case (:class:`goless.dcase`) is present,
return that if no other cases are ready.
If there is no default case and no case is ready,
block until one becomes ready.
See Go's ``reflect.Select`` method for an analog
(http://golang.org/pkg/reflect/#Select).
:param cases: List of case instances, such as
:class:`goless.rcase`, :class:`goless.scase`, or :class:`goless.dcase`.
:return: ``(chosen case, received value)``.
If the chosen case is not an :class:`goless.rcase`, it will be None.
"""
if len(cases) == 0:
return
# If the first argument is a list, it should be the only argument
if isinstance(cases[0], list):
if len(cases) != 1:
raise TypeError('Select can be called either with a list of cases '
'or multiple case arguments, but not both.')
cases = cases[0]
if not cases:
# Handle the case of an empty list as an argument,
# and prevent the raising of a SystemError by libev.
return
default = None
for c in cases:
if c.ready():
return c, c.exec_()
if isinstance(c, dcase):
assert default is None, 'Only one default case is allowd.'
default = c
if default is not None:
# noinspection PyCallingNonCallable
return default, None
# We need to check for deadlocks before selecting.
# We can't rely on the underlying backend to do it,
# as we do for channels, since we don't do an actual send or recv here.
# It's possible to still have a deadlock unless we move the check into
# the loop, but since the check is slow
# (gevent doesn't provide a fast way), let's leave it out here.
if _be.would_deadlock():
raise _Deadlock('No other tasklets running, cannot select.')
while True:
for c in cases:
if c.ready():
return c, c.exec_()
_be.yield_() | Select the first case that becomes ready.
If a default case (:class:`goless.dcase`) is present,
return that if no other cases are ready.
If there is no default case and no case is ready,
block until one becomes ready.
See Go's ``reflect.Select`` method for an analog
(http://golang.org/pkg/reflect/#Select).
:param cases: List of case instances, such as
:class:`goless.rcase`, :class:`goless.scase`, or :class:`goless.dcase`.
:return: ``(chosen case, received value)``.
If the chosen case is not an :class:`goless.rcase`, it will be None. | Below is the the instruction that describes the task:
### Input:
Select the first case that becomes ready.
If a default case (:class:`goless.dcase`) is present,
return that if no other cases are ready.
If there is no default case and no case is ready,
block until one becomes ready.
See Go's ``reflect.Select`` method for an analog
(http://golang.org/pkg/reflect/#Select).
:param cases: List of case instances, such as
:class:`goless.rcase`, :class:`goless.scase`, or :class:`goless.dcase`.
:return: ``(chosen case, received value)``.
If the chosen case is not an :class:`goless.rcase`, it will be None.
### Response:
def select(*cases):
"""
Select the first case that becomes ready.
If a default case (:class:`goless.dcase`) is present,
return that if no other cases are ready.
If there is no default case and no case is ready,
block until one becomes ready.
See Go's ``reflect.Select`` method for an analog
(http://golang.org/pkg/reflect/#Select).
:param cases: List of case instances, such as
:class:`goless.rcase`, :class:`goless.scase`, or :class:`goless.dcase`.
:return: ``(chosen case, received value)``.
If the chosen case is not an :class:`goless.rcase`, it will be None.
"""
if len(cases) == 0:
return
# If the first argument is a list, it should be the only argument
if isinstance(cases[0], list):
if len(cases) != 1:
raise TypeError('Select can be called either with a list of cases '
'or multiple case arguments, but not both.')
cases = cases[0]
if not cases:
# Handle the case of an empty list as an argument,
# and prevent the raising of a SystemError by libev.
return
default = None
for c in cases:
if c.ready():
return c, c.exec_()
if isinstance(c, dcase):
assert default is None, 'Only one default case is allowd.'
default = c
if default is not None:
# noinspection PyCallingNonCallable
return default, None
# We need to check for deadlocks before selecting.
# We can't rely on the underlying backend to do it,
# as we do for channels, since we don't do an actual send or recv here.
# It's possible to still have a deadlock unless we move the check into
# the loop, but since the check is slow
# (gevent doesn't provide a fast way), let's leave it out here.
if _be.would_deadlock():
raise _Deadlock('No other tasklets running, cannot select.')
while True:
for c in cases:
if c.ready():
return c, c.exec_()
_be.yield_() |
def default_instruction_to_svg(self, instruction):
"""As :meth:`instruction_to_svg` but it only takes the ``default.svg``
file into account.
In case no file is found for an instruction in
:meth:`instruction_to_svg`,
this method is used to determine the default svg for it.
The content is created by replacing the text ``{instruction.type}`` in
the whole svg file named ``default.svg``.
If no file ``default.svg`` was loaded, an empty string is returned.
"""
svg_dict = self.default_instruction_to_svg_dict(instruction)
return xmltodict.unparse(svg_dict) | As :meth:`instruction_to_svg` but it only takes the ``default.svg``
file into account.
In case no file is found for an instruction in
:meth:`instruction_to_svg`,
this method is used to determine the default svg for it.
The content is created by replacing the text ``{instruction.type}`` in
the whole svg file named ``default.svg``.
If no file ``default.svg`` was loaded, an empty string is returned. | Below is the the instruction that describes the task:
### Input:
As :meth:`instruction_to_svg` but it only takes the ``default.svg``
file into account.
In case no file is found for an instruction in
:meth:`instruction_to_svg`,
this method is used to determine the default svg for it.
The content is created by replacing the text ``{instruction.type}`` in
the whole svg file named ``default.svg``.
If no file ``default.svg`` was loaded, an empty string is returned.
### Response:
def default_instruction_to_svg(self, instruction):
"""As :meth:`instruction_to_svg` but it only takes the ``default.svg``
file into account.
In case no file is found for an instruction in
:meth:`instruction_to_svg`,
this method is used to determine the default svg for it.
The content is created by replacing the text ``{instruction.type}`` in
the whole svg file named ``default.svg``.
If no file ``default.svg`` was loaded, an empty string is returned.
"""
svg_dict = self.default_instruction_to_svg_dict(instruction)
return xmltodict.unparse(svg_dict) |
def list_roots():
'''
Return all of the files names in all available environments
'''
ret = {}
for saltenv in __opts__['pillar_roots']:
ret[saltenv] = []
ret[saltenv].append(list_env(saltenv))
return ret | Return all of the files names in all available environments | Below is the the instruction that describes the task:
### Input:
Return all of the files names in all available environments
### Response:
def list_roots():
'''
Return all of the files names in all available environments
'''
ret = {}
for saltenv in __opts__['pillar_roots']:
ret[saltenv] = []
ret[saltenv].append(list_env(saltenv))
return ret |
def get_event_questions(self, id, **data):
"""
GET /events/:id/questions/
Eventbrite allows event organizers to add custom questions that attendees fill
out upon registration. This endpoint can be helpful for determining what
custom information is collected and available per event.
This endpoint will return :format:`question`.
"""
return self.get("/events/{0}/questions/".format(id), data=data) | GET /events/:id/questions/
Eventbrite allows event organizers to add custom questions that attendees fill
out upon registration. This endpoint can be helpful for determining what
custom information is collected and available per event.
This endpoint will return :format:`question`. | Below is the the instruction that describes the task:
### Input:
GET /events/:id/questions/
Eventbrite allows event organizers to add custom questions that attendees fill
out upon registration. This endpoint can be helpful for determining what
custom information is collected and available per event.
This endpoint will return :format:`question`.
### Response:
def get_event_questions(self, id, **data):
"""
GET /events/:id/questions/
Eventbrite allows event organizers to add custom questions that attendees fill
out upon registration. This endpoint can be helpful for determining what
custom information is collected and available per event.
This endpoint will return :format:`question`.
"""
return self.get("/events/{0}/questions/".format(id), data=data) |
def parse_chinese_morphemes(seq, context=False):
"""
Parse a Chinese syllable and return its basic structure.
"""
# get the tokens
if isinstance(seq, list):
tokens = [s for s in seq]
else:
tokens = lingpy.ipa2tokens(seq, merge_vowels=False)
# get the sound classes according to the art-model
arts = [int(x) for x in lingpy.tokens2class(tokens, _art, cldf=True)]
# get the pro-string
prostring = lingpy.prosodic_string(arts)
# parse the zip of tokens and arts
I,M,N,C,T = '','','','',''
ini = False
med = False
nuc = False
cod = False
ton = False
triples = [('?','?','?')]+list(zip(
tokens,arts,prostring))+[('?','?','?')]
for i in range(1,len(triples)-1): #enumerate(triples[1:-1]): #zip(tokens,arts,prostring):
t,c,p = triples[i]
_t,_c,_p = triples[i-1]
t_,c_,p_ = triples[i+1]
# check for initial entry first
if p == 'A' and _t == '?':
# now, if we have a j-sound and a vowel follows, we go directly to
# medial environment
if t[0] in 'jɥw':
med = True
ini,nuc,cod,ton = False,False,False,False
else:
ini = True
med,nuc,doc,ton = False,False,False,False
# check for initial vowel
elif p == 'X' and _t == '?':
if t[0] in 'iuy' and c_ == '7':
med = True
ini,nuc,cod,ton = False,False,False,False
else:
nuc = True
ini,med,cod,ton = False,False,False,False
# check for medial after initial
elif p == 'C':
med = True
ini,nuc,cod,ton = False,False,False,False
# check for vowel medial
elif p == 'X' and p_ == 'Y':
# if we have a medial vowel, we classify it as medial
if t in 'iyu':
med = True
ini,nuc,cod,ton = False,False,False,False
else:
nuc = True
ini,med,cod,ton = False,False,False,False
# check for vowel without medial
elif p == 'X' or p == 'Y':
if p_ in 'LTY' or p_ == '?':
nuc = True
ini,med,cod,ton = False,False,False,False
elif p == 'Y':
nuc = True
ini,med,cod,ton = 4 * [False]
else:
cod = True
ini,med,nuc,ton = 4 * [False]
# check for consonant
elif p == 'L':
cod = True
ini,med,nuc,ton = 4 * [False]
# check for tone
elif p == 'T':
ton = True
ini,med,nuc,cod = 4 * [False]
if ini:
I += t
elif med:
M += t
elif nuc:
N += t
elif cod:
C += t
else:
T += t
# bad conversion for output, but makes what it is supposed to do
out = [I,M,N,C,T]
tf = lambda x: x if x else '-'
out = [tf(x) for x in out]
# transform tones to normal letters
tones = dict(zip('¹²³⁴⁵⁶⁷⁸⁹⁰₁₂₃₄₅₆₇₈₉₀','1234567890123456789'))
# now, if context is wanted, we'll yield that
ic = '1' if [x for x in I if x in 'bdgmnŋȵɳɴ'] else '0'
mc = '1' if [m for m in M+N if m in 'ijyɥ'] else '0'
cc = '1' if C in 'ptkʔ' else '0'
tc = ''.join([tones.get(x, x) for x in T])
IC = '/'.join(['I',ic,mc,cc,tc]) if I else ''
MC = '/'.join(['M',ic,mc,cc,tc]) if M else ''
NC = '/'.join(['N',ic,mc,cc,tc]) if N else ''
CC = '/'.join(['C',ic,mc,cc,tc]) if C else ''
TC = '/'.join(['T',ic,mc,cc,tc]) if T else ''
if context:
return out, [x for x in [IC,MC,NC,CC,TC] if x]
return out | Parse a Chinese syllable and return its basic structure. | Below is the the instruction that describes the task:
### Input:
Parse a Chinese syllable and return its basic structure.
### Response:
def parse_chinese_morphemes(seq, context=False):
"""
Parse a Chinese syllable and return its basic structure.
"""
# get the tokens
if isinstance(seq, list):
tokens = [s for s in seq]
else:
tokens = lingpy.ipa2tokens(seq, merge_vowels=False)
# get the sound classes according to the art-model
arts = [int(x) for x in lingpy.tokens2class(tokens, _art, cldf=True)]
# get the pro-string
prostring = lingpy.prosodic_string(arts)
# parse the zip of tokens and arts
I,M,N,C,T = '','','','',''
ini = False
med = False
nuc = False
cod = False
ton = False
triples = [('?','?','?')]+list(zip(
tokens,arts,prostring))+[('?','?','?')]
for i in range(1,len(triples)-1): #enumerate(triples[1:-1]): #zip(tokens,arts,prostring):
t,c,p = triples[i]
_t,_c,_p = triples[i-1]
t_,c_,p_ = triples[i+1]
# check for initial entry first
if p == 'A' and _t == '?':
# now, if we have a j-sound and a vowel follows, we go directly to
# medial environment
if t[0] in 'jɥw':
med = True
ini,nuc,cod,ton = False,False,False,False
else:
ini = True
med,nuc,doc,ton = False,False,False,False
# check for initial vowel
elif p == 'X' and _t == '?':
if t[0] in 'iuy' and c_ == '7':
med = True
ini,nuc,cod,ton = False,False,False,False
else:
nuc = True
ini,med,cod,ton = False,False,False,False
# check for medial after initial
elif p == 'C':
med = True
ini,nuc,cod,ton = False,False,False,False
# check for vowel medial
elif p == 'X' and p_ == 'Y':
# if we have a medial vowel, we classify it as medial
if t in 'iyu':
med = True
ini,nuc,cod,ton = False,False,False,False
else:
nuc = True
ini,med,cod,ton = False,False,False,False
# check for vowel without medial
elif p == 'X' or p == 'Y':
if p_ in 'LTY' or p_ == '?':
nuc = True
ini,med,cod,ton = False,False,False,False
elif p == 'Y':
nuc = True
ini,med,cod,ton = 4 * [False]
else:
cod = True
ini,med,nuc,ton = 4 * [False]
# check for consonant
elif p == 'L':
cod = True
ini,med,nuc,ton = 4 * [False]
# check for tone
elif p == 'T':
ton = True
ini,med,nuc,cod = 4 * [False]
if ini:
I += t
elif med:
M += t
elif nuc:
N += t
elif cod:
C += t
else:
T += t
# bad conversion for output, but makes what it is supposed to do
out = [I,M,N,C,T]
tf = lambda x: x if x else '-'
out = [tf(x) for x in out]
# transform tones to normal letters
tones = dict(zip('¹²³⁴⁵⁶⁷⁸⁹⁰₁₂₃₄₅₆₇₈₉₀','1234567890123456789'))
# now, if context is wanted, we'll yield that
ic = '1' if [x for x in I if x in 'bdgmnŋȵɳɴ'] else '0'
mc = '1' if [m for m in M+N if m in 'ijyɥ'] else '0'
cc = '1' if C in 'ptkʔ' else '0'
tc = ''.join([tones.get(x, x) for x in T])
IC = '/'.join(['I',ic,mc,cc,tc]) if I else ''
MC = '/'.join(['M',ic,mc,cc,tc]) if M else ''
NC = '/'.join(['N',ic,mc,cc,tc]) if N else ''
CC = '/'.join(['C',ic,mc,cc,tc]) if C else ''
TC = '/'.join(['T',ic,mc,cc,tc]) if T else ''
if context:
return out, [x for x in [IC,MC,NC,CC,TC] if x]
return out |
def transformer_ada_lmpackedbase_dialog():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.max_length = 1024
hparams.ffn_layer = "dense_relu_dense"
hparams.batch_size = 4096
return hparams | Set of hyperparameters. | Below is the the instruction that describes the task:
### Input:
Set of hyperparameters.
### Response:
def transformer_ada_lmpackedbase_dialog():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.max_length = 1024
hparams.ffn_layer = "dense_relu_dense"
hparams.batch_size = 4096
return hparams |
def docx_preprocess(docx, batch=False):
"""
Load docx files from local filepath if not already b64 encoded
"""
if batch:
return [docx_preprocess(doc, batch=False) for doc in docx]
if os.path.isfile(docx):
# a filepath is provided, read and encode
return b64encode(open(docx, 'rb').read())
else:
# assume doc is already b64 encoded
return docx | Load docx files from local filepath if not already b64 encoded | Below is the the instruction that describes the task:
### Input:
Load docx files from local filepath if not already b64 encoded
### Response:
def docx_preprocess(docx, batch=False):
"""
Load docx files from local filepath if not already b64 encoded
"""
if batch:
return [docx_preprocess(doc, batch=False) for doc in docx]
if os.path.isfile(docx):
# a filepath is provided, read and encode
return b64encode(open(docx, 'rb').read())
else:
# assume doc is already b64 encoded
return docx |
def readbhfv1(filename, load_data=False, bdfext='.bdf', bhfext='.bhf'):
"""Read header data from bdf/bhf file (Bessy Data Format v1)
Input:
filename: the name of the file
load_data: if the matrices are to be loaded
Output:
bdf: the BDF header structure
Adapted the bdf_read.m macro from Sylvio Haas.
"""
# strip the bhf or bdf extension if there.
if filename.endswith(bdfext):
basename = filename[:-len(bdfext)]
elif filename.endswith(bhfext):
basename = filename[:-len(bhfext)]
else: # assume a single file of header and data.
basename, bhfext = os.path.splitext(filename)
bdfext = bhfext
headername = basename + bhfext
dataname = basename + bdfext
bdf = {}
bdf['his'] = [] # empty list for history
bdf['C'] = {} # empty list for bdf file descriptions
namelists = {}
valuelists = {}
with open(headername, 'rb') as fid: # if fails, an exception is raised
for line in fid:
if not line.strip():
continue # empty line
mat = line.split(None, 1)
prefix = mat[0]
if prefix == '#C':
left, right = mat[1].split('=', 1)
left = left.strip()
right = right.strip()
if left in ['xdim', 'ydim']:
bdf[left] = int(right)
elif left in ['type', 'bdf']:
bdf[left] = right
if left in ['Sendtime']:
bdf['C'][left] = float(right)
elif left in ['xdim', 'ydim']:
bdf['C'][left] = int(right)
else:
bdf['C'][left] = misc.parse_number(right)
elif prefix.startswith("#H"):
bdf['his'].append(mat[1])
# elif prefix.startswith("#DATA"):
# if not load_data:
# break
# darray = np.fromfile(fid, dtype = bdf['type'], count = int(bdf['xdim'] * bdf['ydim']))
# bdf['data'] = np.rot90((darray.reshape(bdf['xdim'], bdf['ydim'])).astype('double').T, 1).copy() # this weird transformation is needed to get the matrix in the same form as bdf_read.m gets it.
# elif prefix.startswith('#ERROR'):
# if not load_data:
# break
# darray = np.fromfile(fid, dtype = bdf['type'], count = int(bdf['xdim'] * bdf['ydim']))
# bdf['error'] = np.rot90((darray.reshape(bdf['xdim'], bdf['ydim'])).astype('double').T, 1).copy()
else:
for prf in ['M', 'G', 'S', 'T']:
if prefix.startswith('#C%sL' % prf):
if prf not in namelists:
namelists[prf] = []
namelists[prf].extend(mat[1].split())
elif prefix.startswith('#C%sV' % prf):
if prf not in valuelists:
valuelists[prf] = []
valuelists[prf].extend([float(x)
for x in mat[1].split()])
else:
continue
for dictname, prfname in zip(['M', 'CG', 'CS', 'CT'], ['M', 'G', 'S', 'T']):
bdf[dictname] = dict(
list(zip(namelists[prfname], valuelists[prfname])))
bdf['__Origin__'] = 'BDFv1'
bdf['__particle__'] = 'photon'
if load_data:
f = open(dataname, 'r')
try:
s = f.read()
except IOError as ioe:
# an ugly bug (M$ KB899149) in W!nd0w$ causes an error if loading too
# large a file from a network drive and opening it read-only.
if ioe.errno == 22:
f.close()
try:
# one work-around is to open it read-write.
f = open(dataname, 'r+b')
s = f.read()
except IOError:
# if this does not work, inform the user to either obtain
# write permission for that file or copy it to a local
# drive
f.close()
raise IOError(22, """
You were probably trying to open a read-only file from a network drive on
Windows, weren\'t you? There is a bug in Windows causing this error
(see http://support.microsoft.com/default.aspx?scid=kb;en-us;899149).
To work around this, please either obtain write permission for that file
(I won't write anything to it, I promise!!!) or copy it to a local drive.
Sorry for the inconvenience.""", ioe.filename)
datasets = re.findall(
'#\s*(?P<name>\w+)\[(?P<xsize>\d+):(?P<ysize>\d+)\]', s)
names = [d[0] for d in datasets]
xsize = [int(d[1]) for d in datasets]
ysize = [int(d[2]) for d in datasets]
dt = np.dtype(bdf['type'])
for i in range(len(datasets)):
start = s.find('#%s' % names[i])
if i < len(datasets) - 1:
end = s.find('#%s' % (names[i + 1]))
else:
end = len(s)
s1 = s[start:end]
datasize = xsize[i] * ysize[i] * dt.itemsize
if datasize > len(s1):
# assume we are dealing with a BOOL matrix
bdf[names[i]] = np.fromstring(
s1[-xsize[i] * ysize[i]:], dtype=np.uint8)
else:
bdf[names[i]] = np.fromstring(
s1[-xsize[i] * ysize[i] * dt.itemsize:], dtype=dt)
# conversion: Matlab saves the array in Fortran-style ordering (columns first).
# Python however loads in C-style: rows first. We need to take care:
# 1) reshape from linear to (ysize,xsize) and not (xsize,ysize)
# 2) transpose (swaps columns and rows)
# After these operations, we only have to rotate this counter-clockwise by 90
# degrees because bdf2_write rotates by +270 degrees before saving.
bdf[names[i]] = np.rot90(
bdf[names[i]].reshape((ysize[i], xsize[i]), order='F'), 1)
return bdf | Read header data from bdf/bhf file (Bessy Data Format v1)
Input:
filename: the name of the file
load_data: if the matrices are to be loaded
Output:
bdf: the BDF header structure
Adapted the bdf_read.m macro from Sylvio Haas. | Below is the the instruction that describes the task:
### Input:
Read header data from bdf/bhf file (Bessy Data Format v1)
Input:
filename: the name of the file
load_data: if the matrices are to be loaded
Output:
bdf: the BDF header structure
Adapted the bdf_read.m macro from Sylvio Haas.
### Response:
def readbhfv1(filename, load_data=False, bdfext='.bdf', bhfext='.bhf'):
"""Read header data from bdf/bhf file (Bessy Data Format v1)
Input:
filename: the name of the file
load_data: if the matrices are to be loaded
Output:
bdf: the BDF header structure
Adapted the bdf_read.m macro from Sylvio Haas.
"""
# strip the bhf or bdf extension if there.
if filename.endswith(bdfext):
basename = filename[:-len(bdfext)]
elif filename.endswith(bhfext):
basename = filename[:-len(bhfext)]
else: # assume a single file of header and data.
basename, bhfext = os.path.splitext(filename)
bdfext = bhfext
headername = basename + bhfext
dataname = basename + bdfext
bdf = {}
bdf['his'] = [] # empty list for history
bdf['C'] = {} # empty list for bdf file descriptions
namelists = {}
valuelists = {}
with open(headername, 'rb') as fid: # if fails, an exception is raised
for line in fid:
if not line.strip():
continue # empty line
mat = line.split(None, 1)
prefix = mat[0]
if prefix == '#C':
left, right = mat[1].split('=', 1)
left = left.strip()
right = right.strip()
if left in ['xdim', 'ydim']:
bdf[left] = int(right)
elif left in ['type', 'bdf']:
bdf[left] = right
if left in ['Sendtime']:
bdf['C'][left] = float(right)
elif left in ['xdim', 'ydim']:
bdf['C'][left] = int(right)
else:
bdf['C'][left] = misc.parse_number(right)
elif prefix.startswith("#H"):
bdf['his'].append(mat[1])
# elif prefix.startswith("#DATA"):
# if not load_data:
# break
# darray = np.fromfile(fid, dtype = bdf['type'], count = int(bdf['xdim'] * bdf['ydim']))
# bdf['data'] = np.rot90((darray.reshape(bdf['xdim'], bdf['ydim'])).astype('double').T, 1).copy() # this weird transformation is needed to get the matrix in the same form as bdf_read.m gets it.
# elif prefix.startswith('#ERROR'):
# if not load_data:
# break
# darray = np.fromfile(fid, dtype = bdf['type'], count = int(bdf['xdim'] * bdf['ydim']))
# bdf['error'] = np.rot90((darray.reshape(bdf['xdim'], bdf['ydim'])).astype('double').T, 1).copy()
else:
for prf in ['M', 'G', 'S', 'T']:
if prefix.startswith('#C%sL' % prf):
if prf not in namelists:
namelists[prf] = []
namelists[prf].extend(mat[1].split())
elif prefix.startswith('#C%sV' % prf):
if prf not in valuelists:
valuelists[prf] = []
valuelists[prf].extend([float(x)
for x in mat[1].split()])
else:
continue
for dictname, prfname in zip(['M', 'CG', 'CS', 'CT'], ['M', 'G', 'S', 'T']):
bdf[dictname] = dict(
list(zip(namelists[prfname], valuelists[prfname])))
bdf['__Origin__'] = 'BDFv1'
bdf['__particle__'] = 'photon'
if load_data:
f = open(dataname, 'r')
try:
s = f.read()
except IOError as ioe:
# an ugly bug (M$ KB899149) in W!nd0w$ causes an error if loading too
# large a file from a network drive and opening it read-only.
if ioe.errno == 22:
f.close()
try:
# one work-around is to open it read-write.
f = open(dataname, 'r+b')
s = f.read()
except IOError:
# if this does not work, inform the user to either obtain
# write permission for that file or copy it to a local
# drive
f.close()
raise IOError(22, """
You were probably trying to open a read-only file from a network drive on
Windows, weren\'t you? There is a bug in Windows causing this error
(see http://support.microsoft.com/default.aspx?scid=kb;en-us;899149).
To work around this, please either obtain write permission for that file
(I won't write anything to it, I promise!!!) or copy it to a local drive.
Sorry for the inconvenience.""", ioe.filename)
datasets = re.findall(
'#\s*(?P<name>\w+)\[(?P<xsize>\d+):(?P<ysize>\d+)\]', s)
names = [d[0] for d in datasets]
xsize = [int(d[1]) for d in datasets]
ysize = [int(d[2]) for d in datasets]
dt = np.dtype(bdf['type'])
for i in range(len(datasets)):
start = s.find('#%s' % names[i])
if i < len(datasets) - 1:
end = s.find('#%s' % (names[i + 1]))
else:
end = len(s)
s1 = s[start:end]
datasize = xsize[i] * ysize[i] * dt.itemsize
if datasize > len(s1):
# assume we are dealing with a BOOL matrix
bdf[names[i]] = np.fromstring(
s1[-xsize[i] * ysize[i]:], dtype=np.uint8)
else:
bdf[names[i]] = np.fromstring(
s1[-xsize[i] * ysize[i] * dt.itemsize:], dtype=dt)
# conversion: Matlab saves the array in Fortran-style ordering (columns first).
# Python however loads in C-style: rows first. We need to take care:
# 1) reshape from linear to (ysize,xsize) and not (xsize,ysize)
# 2) transpose (swaps columns and rows)
# After these operations, we only have to rotate this counter-clockwise by 90
# degrees because bdf2_write rotates by +270 degrees before saving.
bdf[names[i]] = np.rot90(
bdf[names[i]].reshape((ysize[i], xsize[i]), order='F'), 1)
return bdf |
def chapters(self, title):
"""
Get a list of chapters for a visual novel. Keep in mind, this can be slow. I've certainly tried to make it as fast as possible, but it's still pulling text out of a webpage.
:param str title: The title of the novel you want chapters from
:return OrderedDict: An OrderedDict which contains the chapters found for the visual novel supplied
"""
r = requests.get("https://www.baka-tsuki.org/project/index.php?title={}".format(title.replace(" ", "_")),
headers=self.header)
if r.status_code != 200:
raise requests.HTTPError("Not Found")
else:
parsed = soup(r.text, 'html.parser')
dd = parsed.find_all("a")
volumes = []
for link in dd:
if 'class' in link.attrs:
if 'image' in link.get('class'):
continue
if 'href' in link.attrs:
if re.search(self.chapter_regex, link.get('href')) is not None and not link.get('href').startswith('#'):
volumes.append(link)
seplist = OrderedDict()
for item in volumes:
if 'title' in item.attrs:
result = re.search(self.separate_regex, item.get('title').lower())
else:
result = re.search(self.separate_regex, item.text.lower())
if result and result.groups():
if result.group('chapter').lstrip('0') in seplist:
seplist[result.group('chapter').lstrip('0')].append([item.get('href'),
item.get('title') if 'title' in item.attrs else item.text])
else:
seplist[result.group('chapter').lstrip('0')] = [[item.get('href'),
item.get('title') if 'title' in item.attrs else item.text]]
return seplist | Get a list of chapters for a visual novel. Keep in mind, this can be slow. I've certainly tried to make it as fast as possible, but it's still pulling text out of a webpage.
:param str title: The title of the novel you want chapters from
:return OrderedDict: An OrderedDict which contains the chapters found for the visual novel supplied | Below is the the instruction that describes the task:
### Input:
Get a list of chapters for a visual novel. Keep in mind, this can be slow. I've certainly tried to make it as fast as possible, but it's still pulling text out of a webpage.
:param str title: The title of the novel you want chapters from
:return OrderedDict: An OrderedDict which contains the chapters found for the visual novel supplied
### Response:
def chapters(self, title):
"""
Get a list of chapters for a visual novel. Keep in mind, this can be slow. I've certainly tried to make it as fast as possible, but it's still pulling text out of a webpage.
:param str title: The title of the novel you want chapters from
:return OrderedDict: An OrderedDict which contains the chapters found for the visual novel supplied
"""
r = requests.get("https://www.baka-tsuki.org/project/index.php?title={}".format(title.replace(" ", "_")),
headers=self.header)
if r.status_code != 200:
raise requests.HTTPError("Not Found")
else:
parsed = soup(r.text, 'html.parser')
dd = parsed.find_all("a")
volumes = []
for link in dd:
if 'class' in link.attrs:
if 'image' in link.get('class'):
continue
if 'href' in link.attrs:
if re.search(self.chapter_regex, link.get('href')) is not None and not link.get('href').startswith('#'):
volumes.append(link)
seplist = OrderedDict()
for item in volumes:
if 'title' in item.attrs:
result = re.search(self.separate_regex, item.get('title').lower())
else:
result = re.search(self.separate_regex, item.text.lower())
if result and result.groups():
if result.group('chapter').lstrip('0') in seplist:
seplist[result.group('chapter').lstrip('0')].append([item.get('href'),
item.get('title') if 'title' in item.attrs else item.text])
else:
seplist[result.group('chapter').lstrip('0')] = [[item.get('href'),
item.get('title') if 'title' in item.attrs else item.text]]
return seplist |
def query(self, time_indices):
"""Query the values at given time indices.
Args:
time_indices: 0-based time indices to query, as a `list` of `int`.
Returns:
Values as a list of `numpy.ndarray` (for time indices in memory) or
`None` (for time indices discarded).
"""
if self._disposed:
raise ValueError(
'Cannot query: this _WatchStore instance is already disposed')
if not isinstance(time_indices, (tuple, list)):
time_indices = [time_indices]
output = []
for time_index in time_indices:
if isinstance(self._data[time_index], _TensorValueDiscarded):
output.append(None)
else:
data_item = self._data[time_index]
if (hasattr(data_item, 'dtype') and
tensor_helper.translate_dtype(data_item.dtype) == 'string'):
_, _, data_item = tensor_helper.array_view(data_item)
data_item = np.array(
tensor_helper.process_buffers_for_display(data_item),
dtype=np.object)
output.append(data_item)
return output | Query the values at given time indices.
Args:
time_indices: 0-based time indices to query, as a `list` of `int`.
Returns:
Values as a list of `numpy.ndarray` (for time indices in memory) or
`None` (for time indices discarded). | Below is the the instruction that describes the task:
### Input:
Query the values at given time indices.
Args:
time_indices: 0-based time indices to query, as a `list` of `int`.
Returns:
Values as a list of `numpy.ndarray` (for time indices in memory) or
`None` (for time indices discarded).
### Response:
def query(self, time_indices):
"""Query the values at given time indices.
Args:
time_indices: 0-based time indices to query, as a `list` of `int`.
Returns:
Values as a list of `numpy.ndarray` (for time indices in memory) or
`None` (for time indices discarded).
"""
if self._disposed:
raise ValueError(
'Cannot query: this _WatchStore instance is already disposed')
if not isinstance(time_indices, (tuple, list)):
time_indices = [time_indices]
output = []
for time_index in time_indices:
if isinstance(self._data[time_index], _TensorValueDiscarded):
output.append(None)
else:
data_item = self._data[time_index]
if (hasattr(data_item, 'dtype') and
tensor_helper.translate_dtype(data_item.dtype) == 'string'):
_, _, data_item = tensor_helper.array_view(data_item)
data_item = np.array(
tensor_helper.process_buffers_for_display(data_item),
dtype=np.object)
output.append(data_item)
return output |
def convert_regex_to_flask_path(url_path):
"""
Converts a regex matching url to one that can be used with flask
"""
for token in ["$"]:
url_path = url_path.replace(token, "")
def caller(reg):
match_name, match_pattern = reg.groups()
return '<regex("{0}"):{1}>'.format(match_pattern, match_name)
url_path = re.sub("\(\?P<(.*?)>(.*?)\)", caller, url_path)
if url_path.endswith("/?"):
# Flask does own handling of trailing slashes
url_path = url_path.rstrip("/?")
return url_path | Converts a regex matching url to one that can be used with flask | Below is the the instruction that describes the task:
### Input:
Converts a regex matching url to one that can be used with flask
### Response:
def convert_regex_to_flask_path(url_path):
"""
Converts a regex matching url to one that can be used with flask
"""
for token in ["$"]:
url_path = url_path.replace(token, "")
def caller(reg):
match_name, match_pattern = reg.groups()
return '<regex("{0}"):{1}>'.format(match_pattern, match_name)
url_path = re.sub("\(\?P<(.*?)>(.*?)\)", caller, url_path)
if url_path.endswith("/?"):
# Flask does own handling of trailing slashes
url_path = url_path.rstrip("/?")
return url_path |
def connect(self):
"""Connect to LASAF through a CAM-socket."""
self.socket = socket.socket()
self.socket.connect((self.host, self.port))
self.socket.settimeout(False) # non-blocking
sleep(self.delay) # wait for response
self.welcome_msg = self.socket.recv(
self.buffer_size) | Connect to LASAF through a CAM-socket. | Below is the the instruction that describes the task:
### Input:
Connect to LASAF through a CAM-socket.
### Response:
def connect(self):
"""Connect to LASAF through a CAM-socket."""
self.socket = socket.socket()
self.socket.connect((self.host, self.port))
self.socket.settimeout(False) # non-blocking
sleep(self.delay) # wait for response
self.welcome_msg = self.socket.recv(
self.buffer_size) |
def _shannon_radii_from_cn(species_list, cn_roman, radius_to_compare=0):
"""
Utility func to get Shannon radii for a particular coordination number.
As the Shannon radii depends on charge state and coordination number,
species without an entry for a particular coordination number will
be skipped.
Args:
species_list (list): A list of Species to get the Shannon radii for.
cn_roman (str): The coordination number as a roman numeral. See
Specie.get_shannon_radius for more details.
radius_to_compare (float, optional): If set, the data will be returned
with a "radii_diff" key, containing the difference between the
shannon radii and this radius.
Returns:
(list of dict): The Shannon radii for all Species in species. Formatted
as a list of dictionaries, with the keys:
- "species": The species with charge state.
- "radius": The Shannon radius for the species.
- "radius_diff": The difference between the Shannon radius and the
radius_to_compare optional argument.
"""
shannon_radii = []
for s in species_list:
try:
radius = s.get_shannon_radius(cn_roman)
shannon_radii.append({
'species': s, 'radius': radius,
'radii_diff': radius - radius_to_compare})
except KeyError:
pass
return shannon_radii | Utility func to get Shannon radii for a particular coordination number.
As the Shannon radii depends on charge state and coordination number,
species without an entry for a particular coordination number will
be skipped.
Args:
species_list (list): A list of Species to get the Shannon radii for.
cn_roman (str): The coordination number as a roman numeral. See
Specie.get_shannon_radius for more details.
radius_to_compare (float, optional): If set, the data will be returned
with a "radii_diff" key, containing the difference between the
shannon radii and this radius.
Returns:
(list of dict): The Shannon radii for all Species in species. Formatted
as a list of dictionaries, with the keys:
- "species": The species with charge state.
- "radius": The Shannon radius for the species.
- "radius_diff": The difference between the Shannon radius and the
radius_to_compare optional argument. | Below is the the instruction that describes the task:
### Input:
Utility func to get Shannon radii for a particular coordination number.
As the Shannon radii depends on charge state and coordination number,
species without an entry for a particular coordination number will
be skipped.
Args:
species_list (list): A list of Species to get the Shannon radii for.
cn_roman (str): The coordination number as a roman numeral. See
Specie.get_shannon_radius for more details.
radius_to_compare (float, optional): If set, the data will be returned
with a "radii_diff" key, containing the difference between the
shannon radii and this radius.
Returns:
(list of dict): The Shannon radii for all Species in species. Formatted
as a list of dictionaries, with the keys:
- "species": The species with charge state.
- "radius": The Shannon radius for the species.
- "radius_diff": The difference between the Shannon radius and the
radius_to_compare optional argument.
### Response:
def _shannon_radii_from_cn(species_list, cn_roman, radius_to_compare=0):
"""
Utility func to get Shannon radii for a particular coordination number.
As the Shannon radii depends on charge state and coordination number,
species without an entry for a particular coordination number will
be skipped.
Args:
species_list (list): A list of Species to get the Shannon radii for.
cn_roman (str): The coordination number as a roman numeral. See
Specie.get_shannon_radius for more details.
radius_to_compare (float, optional): If set, the data will be returned
with a "radii_diff" key, containing the difference between the
shannon radii and this radius.
Returns:
(list of dict): The Shannon radii for all Species in species. Formatted
as a list of dictionaries, with the keys:
- "species": The species with charge state.
- "radius": The Shannon radius for the species.
- "radius_diff": The difference between the Shannon radius and the
radius_to_compare optional argument.
"""
shannon_radii = []
for s in species_list:
try:
radius = s.get_shannon_radius(cn_roman)
shannon_radii.append({
'species': s, 'radius': radius,
'radii_diff': radius - radius_to_compare})
except KeyError:
pass
return shannon_radii |
def _addSpecfile(self, specfile, path):
"""Adds a new specfile entry to MsrunContainer.info. See also
:class:`MsrunContainer.addSpecfile()`.
:param specfile: the name of an ms-run file
:param path: filedirectory used for loading and saving ``mrc`` files
"""
datatypeStatus = {'rm': False, 'ci': False, 'smi': False, 'sai': False,
'si': False
}
self.info[specfile] = {'path': path, 'status': datatypeStatus} | Adds a new specfile entry to MsrunContainer.info. See also
:class:`MsrunContainer.addSpecfile()`.
:param specfile: the name of an ms-run file
:param path: filedirectory used for loading and saving ``mrc`` files | Below is the the instruction that describes the task:
### Input:
Adds a new specfile entry to MsrunContainer.info. See also
:class:`MsrunContainer.addSpecfile()`.
:param specfile: the name of an ms-run file
:param path: filedirectory used for loading and saving ``mrc`` files
### Response:
def _addSpecfile(self, specfile, path):
"""Adds a new specfile entry to MsrunContainer.info. See also
:class:`MsrunContainer.addSpecfile()`.
:param specfile: the name of an ms-run file
:param path: filedirectory used for loading and saving ``mrc`` files
"""
datatypeStatus = {'rm': False, 'ci': False, 'smi': False, 'sai': False,
'si': False
}
self.info[specfile] = {'path': path, 'status': datatypeStatus} |
def _set_widget_background_color(widget, color):
"""
Changes the base color of a widget (background).
:param widget: widget to modify
:param color: the color to apply
"""
pal = widget.palette()
pal.setColor(pal.Base, color)
widget.setPalette(pal) | Changes the base color of a widget (background).
:param widget: widget to modify
:param color: the color to apply | Below is the the instruction that describes the task:
### Input:
Changes the base color of a widget (background).
:param widget: widget to modify
:param color: the color to apply
### Response:
def _set_widget_background_color(widget, color):
"""
Changes the base color of a widget (background).
:param widget: widget to modify
:param color: the color to apply
"""
pal = widget.palette()
pal.setColor(pal.Base, color)
widget.setPalette(pal) |
def bgp_normalize_table_data(bgp_table):
"""The 'show bgp all summary vrf all' table can have entries that wrap multiple lines.
2001:db8:4:701::2
4 65535 163664 163693 145 0 0 3w2d 3
2001:db8:e0:dd::1
4 10 327491 327278 145 0 0 3w1d 4
Normalize this so the line wrap doesn't exit.
"""
bgp_table = bgp_table.strip()
bgp_multiline_pattern = r"({})\s*\n".format(IPV4_OR_IPV6_REGEX)
# Strip out the newline
return re.sub(bgp_multiline_pattern, r"\1", bgp_table) | The 'show bgp all summary vrf all' table can have entries that wrap multiple lines.
2001:db8:4:701::2
4 65535 163664 163693 145 0 0 3w2d 3
2001:db8:e0:dd::1
4 10 327491 327278 145 0 0 3w1d 4
Normalize this so the line wrap doesn't exit. | Below is the the instruction that describes the task:
### Input:
The 'show bgp all summary vrf all' table can have entries that wrap multiple lines.
2001:db8:4:701::2
4 65535 163664 163693 145 0 0 3w2d 3
2001:db8:e0:dd::1
4 10 327491 327278 145 0 0 3w1d 4
Normalize this so the line wrap doesn't exit.
### Response:
def bgp_normalize_table_data(bgp_table):
"""The 'show bgp all summary vrf all' table can have entries that wrap multiple lines.
2001:db8:4:701::2
4 65535 163664 163693 145 0 0 3w2d 3
2001:db8:e0:dd::1
4 10 327491 327278 145 0 0 3w1d 4
Normalize this so the line wrap doesn't exit.
"""
bgp_table = bgp_table.strip()
bgp_multiline_pattern = r"({})\s*\n".format(IPV4_OR_IPV6_REGEX)
# Strip out the newline
return re.sub(bgp_multiline_pattern, r"\1", bgp_table) |
def check(self, window_name, object_name):
"""
Check item.
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
# FIXME: Check for object type
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
if object_handle.AXValue == 1:
# Already checked
return 1
# AXPress doesn't work with Instruments
# So did the following work around
self._grabfocus(object_handle)
x, y, width, height = self._getobjectsize(object_handle)
# Mouse left click on the object
# Note: x + width/2, y + height / 2 doesn't work
self.generatemouseevent(x + width / 2, y + height / 2, "b1c")
return 1 | Check item.
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer | Below is the the instruction that describes the task:
### Input:
Check item.
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer
### Response:
def check(self, window_name, object_name):
"""
Check item.
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
# FIXME: Check for object type
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
if object_handle.AXValue == 1:
# Already checked
return 1
# AXPress doesn't work with Instruments
# So did the following work around
self._grabfocus(object_handle)
x, y, width, height = self._getobjectsize(object_handle)
# Mouse left click on the object
# Note: x + width/2, y + height / 2 doesn't work
self.generatemouseevent(x + width / 2, y + height / 2, "b1c")
return 1 |
def walk_propagation_tree(logger):
"""
Walk through the propagation hierarchy of the given logger.
:param logger: The logger whose hierarchy to walk (a
:class:`~logging.Logger` object).
:returns: A generator of :class:`~logging.Logger` objects.
.. note:: This uses the undocumented :class:`logging.Logger.parent`
attribute to find higher level loggers, however it won't
raise an exception if the attribute isn't available.
"""
while isinstance(logger, logging.Logger):
# Yield the logger to our caller.
yield logger
# Check if the logger has propagation enabled.
if logger.propagate:
# Continue with the parent logger. We use getattr() because the
# `parent' attribute isn't documented so properly speaking we
# shouldn't break if it's not available.
logger = getattr(logger, 'parent', None)
else:
# The propagation chain stops here.
logger = None | Walk through the propagation hierarchy of the given logger.
:param logger: The logger whose hierarchy to walk (a
:class:`~logging.Logger` object).
:returns: A generator of :class:`~logging.Logger` objects.
.. note:: This uses the undocumented :class:`logging.Logger.parent`
attribute to find higher level loggers, however it won't
raise an exception if the attribute isn't available. | Below is the the instruction that describes the task:
### Input:
Walk through the propagation hierarchy of the given logger.
:param logger: The logger whose hierarchy to walk (a
:class:`~logging.Logger` object).
:returns: A generator of :class:`~logging.Logger` objects.
.. note:: This uses the undocumented :class:`logging.Logger.parent`
attribute to find higher level loggers, however it won't
raise an exception if the attribute isn't available.
### Response:
def walk_propagation_tree(logger):
"""
Walk through the propagation hierarchy of the given logger.
:param logger: The logger whose hierarchy to walk (a
:class:`~logging.Logger` object).
:returns: A generator of :class:`~logging.Logger` objects.
.. note:: This uses the undocumented :class:`logging.Logger.parent`
attribute to find higher level loggers, however it won't
raise an exception if the attribute isn't available.
"""
while isinstance(logger, logging.Logger):
# Yield the logger to our caller.
yield logger
# Check if the logger has propagation enabled.
if logger.propagate:
# Continue with the parent logger. We use getattr() because the
# `parent' attribute isn't documented so properly speaking we
# shouldn't break if it's not available.
logger = getattr(logger, 'parent', None)
else:
# The propagation chain stops here.
logger = None |
def update_ar_listing_catalog(portal):
"""Add Indexes/Metadata to bika_catalog_analysisrequest_listing
"""
cat_id = CATALOG_ANALYSIS_REQUEST_LISTING
catalog = api.get_tool(cat_id)
logger.info("Updating Indexes/Metadata of Catalog '{}'".format(cat_id))
indexes_to_add = [
# name, attribute, metatype
("getClientID", "getClientID", "FieldIndex"),
("is_active", "is_active", "BooleanIndex"),
("is_received", "is_received", "BooleanIndex"),
]
metadata_to_add = [
"getClientID",
]
for index in indexes_to_add:
add_index(portal, cat_id, *index)
for metadata in metadata_to_add:
refresh = metadata not in catalog.schema()
add_metadata(portal, cat_id, metadata, refresh_catalog=refresh) | Add Indexes/Metadata to bika_catalog_analysisrequest_listing | Below is the the instruction that describes the task:
### Input:
Add Indexes/Metadata to bika_catalog_analysisrequest_listing
### Response:
def update_ar_listing_catalog(portal):
"""Add Indexes/Metadata to bika_catalog_analysisrequest_listing
"""
cat_id = CATALOG_ANALYSIS_REQUEST_LISTING
catalog = api.get_tool(cat_id)
logger.info("Updating Indexes/Metadata of Catalog '{}'".format(cat_id))
indexes_to_add = [
# name, attribute, metatype
("getClientID", "getClientID", "FieldIndex"),
("is_active", "is_active", "BooleanIndex"),
("is_received", "is_received", "BooleanIndex"),
]
metadata_to_add = [
"getClientID",
]
for index in indexes_to_add:
add_index(portal, cat_id, *index)
for metadata in metadata_to_add:
refresh = metadata not in catalog.schema()
add_metadata(portal, cat_id, metadata, refresh_catalog=refresh) |
def iter_random_chars(bits,
keyspace=string.ascii_letters + string.digits + '#/.',
rng=None):
""" Yields a cryptographically secure random key of desired @bits of
entropy within @keyspace using :class:random.SystemRandom
@bits: (#int) minimum bits of entropy
@keyspace: (#str) or iterable allowed output chars
..
from vital.security import iter_rand
for char in iter_rand(512):
do_something_with(char)
"""
if bits < 8:
raise ValueError('Bits cannot be <8')
else:
chars = chars_in(bits, keyspace)
rng = rng or random.SystemRandom()
for char in range(int(ceil(chars))):
yield rng.choice(keyspace) | Yields a cryptographically secure random key of desired @bits of
entropy within @keyspace using :class:random.SystemRandom
@bits: (#int) minimum bits of entropy
@keyspace: (#str) or iterable allowed output chars
..
from vital.security import iter_rand
for char in iter_rand(512):
do_something_with(char) | Below is the the instruction that describes the task:
### Input:
Yields a cryptographically secure random key of desired @bits of
entropy within @keyspace using :class:random.SystemRandom
@bits: (#int) minimum bits of entropy
@keyspace: (#str) or iterable allowed output chars
..
from vital.security import iter_rand
for char in iter_rand(512):
do_something_with(char)
### Response:
def iter_random_chars(bits,
keyspace=string.ascii_letters + string.digits + '#/.',
rng=None):
""" Yields a cryptographically secure random key of desired @bits of
entropy within @keyspace using :class:random.SystemRandom
@bits: (#int) minimum bits of entropy
@keyspace: (#str) or iterable allowed output chars
..
from vital.security import iter_rand
for char in iter_rand(512):
do_something_with(char)
"""
if bits < 8:
raise ValueError('Bits cannot be <8')
else:
chars = chars_in(bits, keyspace)
rng = rng or random.SystemRandom()
for char in range(int(ceil(chars))):
yield rng.choice(keyspace) |
def remove_bond(self, particle_pair):
"""Deletes a bond between a pair of Particles
Parameters
----------
particle_pair : indexable object, length=2, dtype=mb.Compound
The pair of Particles to remove the bond between
"""
from mbuild.port import Port
if self.root.bond_graph is None or not self.root.bond_graph.has_edge(
*particle_pair):
warn("Bond between {} and {} doesn't exist!".format(*particle_pair))
return
self.root.bond_graph.remove_edge(*particle_pair)
bond_vector = particle_pair[0].pos - particle_pair[1].pos
if np.allclose(bond_vector, np.zeros(3)):
warn("Particles {} and {} overlap! Ports will not be added."
"".format(*particle_pair))
return
distance = np.linalg.norm(bond_vector)
particle_pair[0].parent.add(Port(anchor=particle_pair[0],
orientation=-bond_vector,
separation=distance / 2), 'port[$]')
particle_pair[1].parent.add(Port(anchor=particle_pair[1],
orientation=bond_vector,
separation=distance / 2), 'port[$]') | Deletes a bond between a pair of Particles
Parameters
----------
particle_pair : indexable object, length=2, dtype=mb.Compound
The pair of Particles to remove the bond between | Below is the the instruction that describes the task:
### Input:
Deletes a bond between a pair of Particles
Parameters
----------
particle_pair : indexable object, length=2, dtype=mb.Compound
The pair of Particles to remove the bond between
### Response:
def remove_bond(self, particle_pair):
"""Deletes a bond between a pair of Particles
Parameters
----------
particle_pair : indexable object, length=2, dtype=mb.Compound
The pair of Particles to remove the bond between
"""
from mbuild.port import Port
if self.root.bond_graph is None or not self.root.bond_graph.has_edge(
*particle_pair):
warn("Bond between {} and {} doesn't exist!".format(*particle_pair))
return
self.root.bond_graph.remove_edge(*particle_pair)
bond_vector = particle_pair[0].pos - particle_pair[1].pos
if np.allclose(bond_vector, np.zeros(3)):
warn("Particles {} and {} overlap! Ports will not be added."
"".format(*particle_pair))
return
distance = np.linalg.norm(bond_vector)
particle_pair[0].parent.add(Port(anchor=particle_pair[0],
orientation=-bond_vector,
separation=distance / 2), 'port[$]')
particle_pair[1].parent.add(Port(anchor=particle_pair[1],
orientation=bond_vector,
separation=distance / 2), 'port[$]') |
def _needs_new_cc_config_for_update(old_template, old_version, new_template, new_version):
"""
Given two templates and their respective versions, return True if a new cookiecutter
config needs to be obtained from the user
"""
if old_template != new_template:
return True
else:
return _cookiecutter_configs_have_changed(new_template,
old_version,
new_version) | Given two templates and their respective versions, return True if a new cookiecutter
config needs to be obtained from the user | Below is the the instruction that describes the task:
### Input:
Given two templates and their respective versions, return True if a new cookiecutter
config needs to be obtained from the user
### Response:
def _needs_new_cc_config_for_update(old_template, old_version, new_template, new_version):
"""
Given two templates and their respective versions, return True if a new cookiecutter
config needs to be obtained from the user
"""
if old_template != new_template:
return True
else:
return _cookiecutter_configs_have_changed(new_template,
old_version,
new_version) |
def _initialize_client_from_environment():
''' Initialize a KeenClient instance using environment variables. '''
global _client, project_id, write_key, read_key, master_key, base_url
if _client is None:
# check environment for project ID and keys
project_id = project_id or os.environ.get("KEEN_PROJECT_ID")
write_key = write_key or os.environ.get("KEEN_WRITE_KEY")
read_key = read_key or os.environ.get("KEEN_READ_KEY")
master_key = master_key or os.environ.get("KEEN_MASTER_KEY")
base_url = base_url or os.environ.get("KEEN_BASE_URL")
if not project_id:
raise InvalidEnvironmentError("Please set the KEEN_PROJECT_ID environment variable or set keen.project_id!")
_client = KeenClient(project_id,
write_key=write_key,
read_key=read_key,
master_key=master_key,
base_url=base_url) | Initialize a KeenClient instance using environment variables. | Below is the the instruction that describes the task:
### Input:
Initialize a KeenClient instance using environment variables.
### Response:
def _initialize_client_from_environment():
''' Initialize a KeenClient instance using environment variables. '''
global _client, project_id, write_key, read_key, master_key, base_url
if _client is None:
# check environment for project ID and keys
project_id = project_id or os.environ.get("KEEN_PROJECT_ID")
write_key = write_key or os.environ.get("KEEN_WRITE_KEY")
read_key = read_key or os.environ.get("KEEN_READ_KEY")
master_key = master_key or os.environ.get("KEEN_MASTER_KEY")
base_url = base_url or os.environ.get("KEEN_BASE_URL")
if not project_id:
raise InvalidEnvironmentError("Please set the KEEN_PROJECT_ID environment variable or set keen.project_id!")
_client = KeenClient(project_id,
write_key=write_key,
read_key=read_key,
master_key=master_key,
base_url=base_url) |
def _convert_to_config(self):
"""self.parsed_data->self.config"""
for k, v in vars(self.parsed_data).iteritems():
exec "self.config.%s = v"%k in locals(), globals() | self.parsed_data->self.config | Below is the the instruction that describes the task:
### Input:
self.parsed_data->self.config
### Response:
def _convert_to_config(self):
"""self.parsed_data->self.config"""
for k, v in vars(self.parsed_data).iteritems():
exec "self.config.%s = v"%k in locals(), globals() |
def validatePrepare(self, prepare: Prepare, sender: str) -> bool:
"""
Return whether the PREPARE specified is valid.
:param prepare: the PREPARE to validate
:param sender: the name of the node that sent the PREPARE
:return: True if PREPARE is valid, False otherwise
"""
key = (prepare.viewNo, prepare.ppSeqNo)
primaryStatus = self.isPrimaryForMsg(prepare)
ppReq = self.getPrePrepare(*key)
# If a non primary replica and receiving a PREPARE request before a
# PRE-PREPARE request, then proceed
# PREPARE should not be sent from primary
if self.isMsgFromPrimary(prepare, sender):
raise SuspiciousNode(sender, Suspicions.PR_FRM_PRIMARY, prepare)
# If non primary replica
if primaryStatus is False:
if self.prepares.hasPrepareFrom(prepare, sender):
raise SuspiciousNode(
sender, Suspicions.DUPLICATE_PR_SENT, prepare)
# If PRE-PREPARE not received for the PREPARE, might be slow
# network
if not ppReq:
self.enqueue_prepare(prepare, sender)
self._setup_last_ordered_for_non_master()
return False
# If primary replica
if primaryStatus is True:
if self.prepares.hasPrepareFrom(prepare, sender):
raise SuspiciousNode(
sender, Suspicions.DUPLICATE_PR_SENT, prepare)
# If PRE-PREPARE was not sent for this PREPARE, certainly
# malicious behavior
elif not ppReq:
raise SuspiciousNode(
sender, Suspicions.UNKNOWN_PR_SENT, prepare)
if primaryStatus is None and not ppReq:
self.enqueue_prepare(prepare, sender)
self._setup_last_ordered_for_non_master()
return False
if prepare.digest != ppReq.digest:
raise SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG, prepare)
elif prepare.stateRootHash != ppReq.stateRootHash:
raise SuspiciousNode(sender, Suspicions.PR_STATE_WRONG,
prepare)
elif prepare.txnRootHash != ppReq.txnRootHash:
raise SuspiciousNode(sender, Suspicions.PR_TXN_WRONG,
prepare)
elif prepare.auditTxnRootHash != ppReq.auditTxnRootHash:
raise SuspiciousNode(sender, Suspicions.PR_AUDIT_TXN_ROOT_HASH_WRONG,
prepare)
try:
self.execute_hook(ReplicaHooks.VALIDATE_PR, prepare, ppReq)
except Exception as ex:
self.logger.warning('{} encountered exception in replica '
'hook {} : {}'.
format(self, ReplicaHooks.VALIDATE_PR, ex))
raise SuspiciousNode(sender, Suspicions.PR_PLUGIN_EXCEPTION,
prepare)
# BLS multi-sig:
self._bls_bft_replica.validate_prepare(prepare, sender)
return True | Return whether the PREPARE specified is valid.
:param prepare: the PREPARE to validate
:param sender: the name of the node that sent the PREPARE
:return: True if PREPARE is valid, False otherwise | Below is the the instruction that describes the task:
### Input:
Return whether the PREPARE specified is valid.
:param prepare: the PREPARE to validate
:param sender: the name of the node that sent the PREPARE
:return: True if PREPARE is valid, False otherwise
### Response:
def validatePrepare(self, prepare: Prepare, sender: str) -> bool:
"""
Return whether the PREPARE specified is valid.
:param prepare: the PREPARE to validate
:param sender: the name of the node that sent the PREPARE
:return: True if PREPARE is valid, False otherwise
"""
key = (prepare.viewNo, prepare.ppSeqNo)
primaryStatus = self.isPrimaryForMsg(prepare)
ppReq = self.getPrePrepare(*key)
# If a non primary replica and receiving a PREPARE request before a
# PRE-PREPARE request, then proceed
# PREPARE should not be sent from primary
if self.isMsgFromPrimary(prepare, sender):
raise SuspiciousNode(sender, Suspicions.PR_FRM_PRIMARY, prepare)
# If non primary replica
if primaryStatus is False:
if self.prepares.hasPrepareFrom(prepare, sender):
raise SuspiciousNode(
sender, Suspicions.DUPLICATE_PR_SENT, prepare)
# If PRE-PREPARE not received for the PREPARE, might be slow
# network
if not ppReq:
self.enqueue_prepare(prepare, sender)
self._setup_last_ordered_for_non_master()
return False
# If primary replica
if primaryStatus is True:
if self.prepares.hasPrepareFrom(prepare, sender):
raise SuspiciousNode(
sender, Suspicions.DUPLICATE_PR_SENT, prepare)
# If PRE-PREPARE was not sent for this PREPARE, certainly
# malicious behavior
elif not ppReq:
raise SuspiciousNode(
sender, Suspicions.UNKNOWN_PR_SENT, prepare)
if primaryStatus is None and not ppReq:
self.enqueue_prepare(prepare, sender)
self._setup_last_ordered_for_non_master()
return False
if prepare.digest != ppReq.digest:
raise SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG, prepare)
elif prepare.stateRootHash != ppReq.stateRootHash:
raise SuspiciousNode(sender, Suspicions.PR_STATE_WRONG,
prepare)
elif prepare.txnRootHash != ppReq.txnRootHash:
raise SuspiciousNode(sender, Suspicions.PR_TXN_WRONG,
prepare)
elif prepare.auditTxnRootHash != ppReq.auditTxnRootHash:
raise SuspiciousNode(sender, Suspicions.PR_AUDIT_TXN_ROOT_HASH_WRONG,
prepare)
try:
self.execute_hook(ReplicaHooks.VALIDATE_PR, prepare, ppReq)
except Exception as ex:
self.logger.warning('{} encountered exception in replica '
'hook {} : {}'.
format(self, ReplicaHooks.VALIDATE_PR, ex))
raise SuspiciousNode(sender, Suspicions.PR_PLUGIN_EXCEPTION,
prepare)
# BLS multi-sig:
self._bls_bft_replica.validate_prepare(prepare, sender)
return True |
def send_file(self, url, name, **fileinfo):
"""Send a pre-uploaded file to the room.
See http://matrix.org/docs/spec/r0.2.0/client_server.html#m-file for
fileinfo.
Args:
url (str): The mxc url of the file.
name (str): The filename of the image.
fileinfo (): Extra information about the file
"""
return self.client.api.send_content(
self.room_id, url, name, "m.file",
extra_information=fileinfo
) | Send a pre-uploaded file to the room.
See http://matrix.org/docs/spec/r0.2.0/client_server.html#m-file for
fileinfo.
Args:
url (str): The mxc url of the file.
name (str): The filename of the image.
fileinfo (): Extra information about the file | Below is the the instruction that describes the task:
### Input:
Send a pre-uploaded file to the room.
See http://matrix.org/docs/spec/r0.2.0/client_server.html#m-file for
fileinfo.
Args:
url (str): The mxc url of the file.
name (str): The filename of the image.
fileinfo (): Extra information about the file
### Response:
def send_file(self, url, name, **fileinfo):
"""Send a pre-uploaded file to the room.
See http://matrix.org/docs/spec/r0.2.0/client_server.html#m-file for
fileinfo.
Args:
url (str): The mxc url of the file.
name (str): The filename of the image.
fileinfo (): Extra information about the file
"""
return self.client.api.send_content(
self.room_id, url, name, "m.file",
extra_information=fileinfo
) |
def transaction(self, tx_hash):
"""The transaction details endpoint provides information on a single
transaction.
`GET /transactions/{hash}
<https://www.stellar.org/developers/horizon/reference/endpoints/transactions-single.html>`_
:param str tx_hash: The hex-encoded transaction hash.
:return: A single transaction's details.
:rtype: dict
"""
endpoint = '/transactions/{tx_hash}'.format(tx_hash=tx_hash)
return self.query(endpoint) | The transaction details endpoint provides information on a single
transaction.
`GET /transactions/{hash}
<https://www.stellar.org/developers/horizon/reference/endpoints/transactions-single.html>`_
:param str tx_hash: The hex-encoded transaction hash.
:return: A single transaction's details.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
The transaction details endpoint provides information on a single
transaction.
`GET /transactions/{hash}
<https://www.stellar.org/developers/horizon/reference/endpoints/transactions-single.html>`_
:param str tx_hash: The hex-encoded transaction hash.
:return: A single transaction's details.
:rtype: dict
### Response:
def transaction(self, tx_hash):
"""The transaction details endpoint provides information on a single
transaction.
`GET /transactions/{hash}
<https://www.stellar.org/developers/horizon/reference/endpoints/transactions-single.html>`_
:param str tx_hash: The hex-encoded transaction hash.
:return: A single transaction's details.
:rtype: dict
"""
endpoint = '/transactions/{tx_hash}'.format(tx_hash=tx_hash)
return self.query(endpoint) |
def make_configure_tab(self):
""" initial set up of configure tab"""
# Setup the choice between single and multicolor
modeframe = tk.Frame(self.tab_configure)
self.mode = tk.IntVar()
singlecolor = tk.Radiobutton(modeframe, text="Single color", variable=self.mode,
value=1, command=lambda: self.disable_multicolor())
multicolor = tk.Radiobutton(modeframe, text="Three color", variable=self.mode,
value=3, command=lambda: self.disable_singlecolor())
self.mode.set(3)
singlecolor.pack(side=tk.LEFT)
multicolor.pack(side=tk.LEFT)
updatebutton = tk.Button(master=modeframe, text="Update",
command=self.update_button_action)
updatebutton.pack(side=tk.RIGHT)
modeframe.grid(row=0, column=0)
self.setup_multicolor()
self.setup_singlecolor() | initial set up of configure tab | Below is the the instruction that describes the task:
### Input:
initial set up of configure tab
### Response:
def make_configure_tab(self):
""" initial set up of configure tab"""
# Setup the choice between single and multicolor
modeframe = tk.Frame(self.tab_configure)
self.mode = tk.IntVar()
singlecolor = tk.Radiobutton(modeframe, text="Single color", variable=self.mode,
value=1, command=lambda: self.disable_multicolor())
multicolor = tk.Radiobutton(modeframe, text="Three color", variable=self.mode,
value=3, command=lambda: self.disable_singlecolor())
self.mode.set(3)
singlecolor.pack(side=tk.LEFT)
multicolor.pack(side=tk.LEFT)
updatebutton = tk.Button(master=modeframe, text="Update",
command=self.update_button_action)
updatebutton.pack(side=tk.RIGHT)
modeframe.grid(row=0, column=0)
self.setup_multicolor()
self.setup_singlecolor() |
def _create_xml_node(cls):
"""Create XML node from "_xml_map".
"""
try:
xml_map = cls._xml_map
except AttributeError:
raise ValueError("This model has no XML definition")
return _create_xml_node(
xml_map.get('name', cls.__name__),
xml_map.get("prefix", None),
xml_map.get("ns", None)
) | Create XML node from "_xml_map". | Below is the the instruction that describes the task:
### Input:
Create XML node from "_xml_map".
### Response:
def _create_xml_node(cls):
"""Create XML node from "_xml_map".
"""
try:
xml_map = cls._xml_map
except AttributeError:
raise ValueError("This model has no XML definition")
return _create_xml_node(
xml_map.get('name', cls.__name__),
xml_map.get("prefix", None),
xml_map.get("ns", None)
) |
def remove(self, cb):
"""Removes a callback."""
if cb in self.callbacks:
self.callbacks.remove(cb)
new_monitor = 0
for c in self.callbacks:
new_monitor |= c.bit
if new_monitor != self.monitor:
self.monitor = new_monitor
yield from self.pi._pigpio_aio_command(
_PI_CMD_NB, self.handle, self.monitor) | Removes a callback. | Below is the the instruction that describes the task:
### Input:
Removes a callback.
### Response:
def remove(self, cb):
"""Removes a callback."""
if cb in self.callbacks:
self.callbacks.remove(cb)
new_monitor = 0
for c in self.callbacks:
new_monitor |= c.bit
if new_monitor != self.monitor:
self.monitor = new_monitor
yield from self.pi._pigpio_aio_command(
_PI_CMD_NB, self.handle, self.monitor) |
def create_gzip_cache(pelican):
'''Create a gzip cache file for every file that a webserver would
reasonably want to cache (e.g., text type files).
:param pelican: The Pelican instance
'''
for dirpath, _, filenames in os.walk(pelican.settings['OUTPUT_PATH']):
for name in filenames:
if should_compress(name):
filepath = os.path.join(dirpath, name)
create_gzip_file(filepath, should_overwrite(pelican.settings)) | Create a gzip cache file for every file that a webserver would
reasonably want to cache (e.g., text type files).
:param pelican: The Pelican instance | Below is the the instruction that describes the task:
### Input:
Create a gzip cache file for every file that a webserver would
reasonably want to cache (e.g., text type files).
:param pelican: The Pelican instance
### Response:
def create_gzip_cache(pelican):
'''Create a gzip cache file for every file that a webserver would
reasonably want to cache (e.g., text type files).
:param pelican: The Pelican instance
'''
for dirpath, _, filenames in os.walk(pelican.settings['OUTPUT_PATH']):
for name in filenames:
if should_compress(name):
filepath = os.path.join(dirpath, name)
create_gzip_file(filepath, should_overwrite(pelican.settings)) |
def setVerbosity(self, verbose):
"""
Set verbosity of the SenseApi object.
@param verbose (boolean) - True of False
@return (boolean) - Boolean indicating whether setVerbosity succeeded
"""
if not (verbose == True or verbose == False):
return False
else:
self.__verbose__ = verbose
return True | Set verbosity of the SenseApi object.
@param verbose (boolean) - True of False
@return (boolean) - Boolean indicating whether setVerbosity succeeded | Below is the the instruction that describes the task:
### Input:
Set verbosity of the SenseApi object.
@param verbose (boolean) - True of False
@return (boolean) - Boolean indicating whether setVerbosity succeeded
### Response:
def setVerbosity(self, verbose):
"""
Set verbosity of the SenseApi object.
@param verbose (boolean) - True of False
@return (boolean) - Boolean indicating whether setVerbosity succeeded
"""
if not (verbose == True or verbose == False):
return False
else:
self.__verbose__ = verbose
return True |
def remove_pattern(self, pattern):
"""
Removes given pattern from the Model.
:param pattern: Pattern.
:type pattern: unicode
:return: Method success.
:rtype: bool
"""
for index, node in enumerate(self.root_node.children):
if node.name != pattern:
continue
LOGGER.debug("> Removing '{0}' at '{1}' index.".format(pattern, index))
self.beginRemoveRows(self.get_node_index(self.root_node), index, index)
pattern_node = self.root_node.child(index)
self.root_node.remove_child(index)
self.endRemoveRows()
self.pattern_removed.emit(pattern_node)
return True | Removes given pattern from the Model.
:param pattern: Pattern.
:type pattern: unicode
:return: Method success.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Removes given pattern from the Model.
:param pattern: Pattern.
:type pattern: unicode
:return: Method success.
:rtype: bool
### Response:
def remove_pattern(self, pattern):
"""
Removes given pattern from the Model.
:param pattern: Pattern.
:type pattern: unicode
:return: Method success.
:rtype: bool
"""
for index, node in enumerate(self.root_node.children):
if node.name != pattern:
continue
LOGGER.debug("> Removing '{0}' at '{1}' index.".format(pattern, index))
self.beginRemoveRows(self.get_node_index(self.root_node), index, index)
pattern_node = self.root_node.child(index)
self.root_node.remove_child(index)
self.endRemoveRows()
self.pattern_removed.emit(pattern_node)
return True |
def get_gsod_filenames(self, year=None, with_host=False):
""" Get filenames of raw GSOD station data. """
return get_gsod_filenames(self.usaf_id, year, with_host=with_host) | Get filenames of raw GSOD station data. | Below is the the instruction that describes the task:
### Input:
Get filenames of raw GSOD station data.
### Response:
def get_gsod_filenames(self, year=None, with_host=False):
""" Get filenames of raw GSOD station data. """
return get_gsod_filenames(self.usaf_id, year, with_host=with_host) |
def logCdfNormal(z):
"""
Robust implementations of log cdf of a standard normal.
@see [[https://github.com/mseeger/apbsint/blob/master/src/eptools/potentials/SpecfunServices.h original implementation]]
in C from Matthias Seeger.
"""
if (abs(z) < ERF_CODY_LIMIT1):
# Phi(z) approx (1+y R_3(y^2))/2, y=z/sqrt(2)
return np.log1p((z / M_SQRT2) * _erfRationalHelperR3(0.5 * z * z)) - M_LN2
elif (z < 0.0):
# Phi(z) approx N(z)Q(-z)/(-z), z<0
return logPdfNormal(z) - np.log(-z) + np.log(_erfRationalHelper(-z))
else:
return np.log1p(-(np.exp(logPdfNormal(z))) * _erfRationalHelper(z) / z) | Robust implementations of log cdf of a standard normal.
@see [[https://github.com/mseeger/apbsint/blob/master/src/eptools/potentials/SpecfunServices.h original implementation]]
in C from Matthias Seeger. | Below is the the instruction that describes the task:
### Input:
Robust implementations of log cdf of a standard normal.
@see [[https://github.com/mseeger/apbsint/blob/master/src/eptools/potentials/SpecfunServices.h original implementation]]
in C from Matthias Seeger.
### Response:
def logCdfNormal(z):
"""
Robust implementations of log cdf of a standard normal.
@see [[https://github.com/mseeger/apbsint/blob/master/src/eptools/potentials/SpecfunServices.h original implementation]]
in C from Matthias Seeger.
"""
if (abs(z) < ERF_CODY_LIMIT1):
# Phi(z) approx (1+y R_3(y^2))/2, y=z/sqrt(2)
return np.log1p((z / M_SQRT2) * _erfRationalHelperR3(0.5 * z * z)) - M_LN2
elif (z < 0.0):
# Phi(z) approx N(z)Q(-z)/(-z), z<0
return logPdfNormal(z) - np.log(-z) + np.log(_erfRationalHelper(-z))
else:
return np.log1p(-(np.exp(logPdfNormal(z))) * _erfRationalHelper(z) / z) |
def hybridize(self, active=True, **kwargs):
"""Activates or deactivates `HybridBlock` s recursively. Has no effect on
non-hybrid children.
Parameters
----------
active : bool, default True
Whether to turn hybrid on or off.
**kwargs : string
Additional flags for hybridized operator.
"""
if self._children and all(isinstance(c, HybridBlock) for c in self._children.values()):
warnings.warn(
"All children of this Sequential layer '%s' are HybridBlocks. Consider "
"using HybridSequential for the best performance."%self.prefix, stacklevel=2)
super(Sequential, self).hybridize(active, **kwargs) | Activates or deactivates `HybridBlock` s recursively. Has no effect on
non-hybrid children.
Parameters
----------
active : bool, default True
Whether to turn hybrid on or off.
**kwargs : string
Additional flags for hybridized operator. | Below is the the instruction that describes the task:
### Input:
Activates or deactivates `HybridBlock` s recursively. Has no effect on
non-hybrid children.
Parameters
----------
active : bool, default True
Whether to turn hybrid on or off.
**kwargs : string
Additional flags for hybridized operator.
### Response:
def hybridize(self, active=True, **kwargs):
"""Activates or deactivates `HybridBlock` s recursively. Has no effect on
non-hybrid children.
Parameters
----------
active : bool, default True
Whether to turn hybrid on or off.
**kwargs : string
Additional flags for hybridized operator.
"""
if self._children and all(isinstance(c, HybridBlock) for c in self._children.values()):
warnings.warn(
"All children of this Sequential layer '%s' are HybridBlocks. Consider "
"using HybridSequential for the best performance."%self.prefix, stacklevel=2)
super(Sequential, self).hybridize(active, **kwargs) |
def geom_reflect(g, nv):
""" Reflection symmetry operation.
nv is normal vector to reflection plane
g is assumed already translated to center of mass @ origin
.. todo:: Complete geom_reflect docstring
"""
# Imports
import numpy as np
# Force g to n-vector
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
# Transform the geometry and return
refl_g = np.dot(mtx_refl(nv, reps=(g.shape[0] // 3)), g) \
.reshape((g.shape[0],1))
return refl_g | Reflection symmetry operation.
nv is normal vector to reflection plane
g is assumed already translated to center of mass @ origin
.. todo:: Complete geom_reflect docstring | Below is the the instruction that describes the task:
### Input:
Reflection symmetry operation.
nv is normal vector to reflection plane
g is assumed already translated to center of mass @ origin
.. todo:: Complete geom_reflect docstring
### Response:
def geom_reflect(g, nv):
""" Reflection symmetry operation.
nv is normal vector to reflection plane
g is assumed already translated to center of mass @ origin
.. todo:: Complete geom_reflect docstring
"""
# Imports
import numpy as np
# Force g to n-vector
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
# Transform the geometry and return
refl_g = np.dot(mtx_refl(nv, reps=(g.shape[0] // 3)), g) \
.reshape((g.shape[0],1))
return refl_g |
def QA_fetch_get_hkstock_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# 港股 HKMARKET
27 5 香港指数 FH
31 2 香港主板 KH
48 2 香港创业板 KG
49 2 香港基金 KT
43 1 B股转H股 HB
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==31 or market==48') | [summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# 港股 HKMARKET
27 5 香港指数 FH
31 2 香港主板 KH
48 2 香港创业板 KG
49 2 香港基金 KT
43 1 B股转H股 HB | Below is the the instruction that describes the task:
### Input:
[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# 港股 HKMARKET
27 5 香港指数 FH
31 2 香港主板 KH
48 2 香港创业板 KG
49 2 香港基金 KT
43 1 B股转H股 HB
### Response:
def QA_fetch_get_hkstock_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# 港股 HKMARKET
27 5 香港指数 FH
31 2 香港主板 KH
48 2 香港创业板 KG
49 2 香港基金 KT
43 1 B股转H股 HB
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==31 or market==48') |
def pop_events(self, regex_pattern, timeout):
"""Pop events whose names match a regex pattern.
If such event(s) exist, pop one event from each event queue that
satisfies the condition. Otherwise, wait for an event that satisfies
the condition to occur, with timeout.
Results are sorted by timestamp in ascending order.
Args:
regex_pattern: The regular expression pattern that an event name
should match in order to be popped.
timeout: Number of seconds to wait for events in case no event
matching the condition exits when the function is called.
Returns:
Events whose names match a regex pattern.
Empty if none exist and the wait timed out.
Raises:
IllegalStateError: Raised if pop is called before the dispatcher
starts polling.
queue.Empty: Raised if no event was found before time out.
"""
if not self.started:
raise IllegalStateError(
"Dispatcher needs to be started before popping.")
deadline = time.time() + timeout
while True:
#TODO: fix the sleep loop
results = self._match_and_pop(regex_pattern)
if len(results) != 0 or time.time() > deadline:
break
time.sleep(1)
if len(results) == 0:
raise queue.Empty('Timeout after {}s waiting for event: {}'.format(
timeout, regex_pattern))
return sorted(results, key=lambda event: event['time']) | Pop events whose names match a regex pattern.
If such event(s) exist, pop one event from each event queue that
satisfies the condition. Otherwise, wait for an event that satisfies
the condition to occur, with timeout.
Results are sorted by timestamp in ascending order.
Args:
regex_pattern: The regular expression pattern that an event name
should match in order to be popped.
timeout: Number of seconds to wait for events in case no event
matching the condition exits when the function is called.
Returns:
Events whose names match a regex pattern.
Empty if none exist and the wait timed out.
Raises:
IllegalStateError: Raised if pop is called before the dispatcher
starts polling.
queue.Empty: Raised if no event was found before time out. | Below is the the instruction that describes the task:
### Input:
Pop events whose names match a regex pattern.
If such event(s) exist, pop one event from each event queue that
satisfies the condition. Otherwise, wait for an event that satisfies
the condition to occur, with timeout.
Results are sorted by timestamp in ascending order.
Args:
regex_pattern: The regular expression pattern that an event name
should match in order to be popped.
timeout: Number of seconds to wait for events in case no event
matching the condition exits when the function is called.
Returns:
Events whose names match a regex pattern.
Empty if none exist and the wait timed out.
Raises:
IllegalStateError: Raised if pop is called before the dispatcher
starts polling.
queue.Empty: Raised if no event was found before time out.
### Response:
def pop_events(self, regex_pattern, timeout):
"""Pop events whose names match a regex pattern.
If such event(s) exist, pop one event from each event queue that
satisfies the condition. Otherwise, wait for an event that satisfies
the condition to occur, with timeout.
Results are sorted by timestamp in ascending order.
Args:
regex_pattern: The regular expression pattern that an event name
should match in order to be popped.
timeout: Number of seconds to wait for events in case no event
matching the condition exits when the function is called.
Returns:
Events whose names match a regex pattern.
Empty if none exist and the wait timed out.
Raises:
IllegalStateError: Raised if pop is called before the dispatcher
starts polling.
queue.Empty: Raised if no event was found before time out.
"""
if not self.started:
raise IllegalStateError(
"Dispatcher needs to be started before popping.")
deadline = time.time() + timeout
while True:
#TODO: fix the sleep loop
results = self._match_and_pop(regex_pattern)
if len(results) != 0 or time.time() > deadline:
break
time.sleep(1)
if len(results) == 0:
raise queue.Empty('Timeout after {}s waiting for event: {}'.format(
timeout, regex_pattern))
return sorted(results, key=lambda event: event['time']) |
def _handleModemNotification(self, lines):
""" Handler for unsolicited notifications from the modem
This method simply spawns a separate thread to handle the actual notification
(in order to release the read thread so that the handlers are able to write back to the modem, etc)
:param lines The lines that were read
"""
threading.Thread(target=self.__threadedHandleModemNotification, kwargs={'lines': lines}).start() | Handler for unsolicited notifications from the modem
This method simply spawns a separate thread to handle the actual notification
(in order to release the read thread so that the handlers are able to write back to the modem, etc)
:param lines The lines that were read | Below is the the instruction that describes the task:
### Input:
Handler for unsolicited notifications from the modem
This method simply spawns a separate thread to handle the actual notification
(in order to release the read thread so that the handlers are able to write back to the modem, etc)
:param lines The lines that were read
### Response:
def _handleModemNotification(self, lines):
""" Handler for unsolicited notifications from the modem
This method simply spawns a separate thread to handle the actual notification
(in order to release the read thread so that the handlers are able to write back to the modem, etc)
:param lines The lines that were read
"""
threading.Thread(target=self.__threadedHandleModemNotification, kwargs={'lines': lines}).start() |
def copy_with_new_atts(self, **attributes):
"""Returns a new FmtStr with the same content but new formatting"""
return FmtStr(*[Chunk(bfs.s, bfs.atts.extend(attributes))
for bfs in self.chunks]) | Returns a new FmtStr with the same content but new formatting | Below is the the instruction that describes the task:
### Input:
Returns a new FmtStr with the same content but new formatting
### Response:
def copy_with_new_atts(self, **attributes):
"""Returns a new FmtStr with the same content but new formatting"""
return FmtStr(*[Chunk(bfs.s, bfs.atts.extend(attributes))
for bfs in self.chunks]) |
def generate_folds(node_label_matrix, labelled_node_indices, number_of_categories, percentage, number_of_folds=10):
"""
Form the seed nodes for training and testing.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- labelled_node_indices: A NumPy array containing the labelled node indices.
- number_of_categories: The number of categories/classes in the learning.
- percentage: The percentage of labelled samples that will be used for training.
Output: - folds: A generator containing train/test set folds.
"""
number_of_labeled_nodes = labelled_node_indices.size
training_set_size = int(np.ceil(percentage*number_of_labeled_nodes/100))
####################################################################################################################
# Generate folds
####################################################################################################################
train_list = list()
test_list = list()
for trial in np.arange(number_of_folds):
train, test = valid_train_test(node_label_matrix[labelled_node_indices, :],
training_set_size,
number_of_categories,
trial)
train = labelled_node_indices[train]
test = labelled_node_indices[test]
train_list.append(train)
test_list.append(test)
folds = ((train, test) for train, test in zip(train_list, test_list))
return folds | Form the seed nodes for training and testing.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- labelled_node_indices: A NumPy array containing the labelled node indices.
- number_of_categories: The number of categories/classes in the learning.
- percentage: The percentage of labelled samples that will be used for training.
Output: - folds: A generator containing train/test set folds. | Below is the the instruction that describes the task:
### Input:
Form the seed nodes for training and testing.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- labelled_node_indices: A NumPy array containing the labelled node indices.
- number_of_categories: The number of categories/classes in the learning.
- percentage: The percentage of labelled samples that will be used for training.
Output: - folds: A generator containing train/test set folds.
### Response:
def generate_folds(node_label_matrix, labelled_node_indices, number_of_categories, percentage, number_of_folds=10):
"""
Form the seed nodes for training and testing.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- labelled_node_indices: A NumPy array containing the labelled node indices.
- number_of_categories: The number of categories/classes in the learning.
- percentage: The percentage of labelled samples that will be used for training.
Output: - folds: A generator containing train/test set folds.
"""
number_of_labeled_nodes = labelled_node_indices.size
training_set_size = int(np.ceil(percentage*number_of_labeled_nodes/100))
####################################################################################################################
# Generate folds
####################################################################################################################
train_list = list()
test_list = list()
for trial in np.arange(number_of_folds):
train, test = valid_train_test(node_label_matrix[labelled_node_indices, :],
training_set_size,
number_of_categories,
trial)
train = labelled_node_indices[train]
test = labelled_node_indices[test]
train_list.append(train)
test_list.append(test)
folds = ((train, test) for train, test in zip(train_list, test_list))
return folds |
def register_success(self, nick, message, channel, cmd_channel):
"""\
Received registration acknowledgement from the BotnetBot, as well as the
name of the command channel, so join up and indicate that registration
succeeded
"""
# the boss will tell what channel to join
self.channel = cmd_channel
self.conn.join(self.channel)
# indicate that registered so we'll stop trying
self.registered.set() | \
Received registration acknowledgement from the BotnetBot, as well as the
name of the command channel, so join up and indicate that registration
succeeded | Below is the the instruction that describes the task:
### Input:
\
Received registration acknowledgement from the BotnetBot, as well as the
name of the command channel, so join up and indicate that registration
succeeded
### Response:
def register_success(self, nick, message, channel, cmd_channel):
"""\
Received registration acknowledgement from the BotnetBot, as well as the
name of the command channel, so join up and indicate that registration
succeeded
"""
# the boss will tell what channel to join
self.channel = cmd_channel
self.conn.join(self.channel)
# indicate that registered so we'll stop trying
self.registered.set() |
def delete_policy(name, policy_name, region=None, key=None, keyid=None,
profile=None):
'''
Delete an ELB policy.
.. versionadded:: 2016.3.0
CLI example:
.. code-block:: bash
salt myminion boto_elb.delete_policy myelb mypolicy
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not exists(name, region, key, keyid, profile):
return True
try:
conn.delete_lb_policy(name, policy_name)
log.info('Deleted policy %s on ELB %s', policy_name, name)
return True
except boto.exception.BotoServerError as e:
log.error('Failed to delete policy %s on ELB %s: %s',
policy_name, name, e.message,
exc_info_on_loglevel=logging.DEBUG)
return False | Delete an ELB policy.
.. versionadded:: 2016.3.0
CLI example:
.. code-block:: bash
salt myminion boto_elb.delete_policy myelb mypolicy | Below is the the instruction that describes the task:
### Input:
Delete an ELB policy.
.. versionadded:: 2016.3.0
CLI example:
.. code-block:: bash
salt myminion boto_elb.delete_policy myelb mypolicy
### Response:
def delete_policy(name, policy_name, region=None, key=None, keyid=None,
profile=None):
'''
Delete an ELB policy.
.. versionadded:: 2016.3.0
CLI example:
.. code-block:: bash
salt myminion boto_elb.delete_policy myelb mypolicy
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not exists(name, region, key, keyid, profile):
return True
try:
conn.delete_lb_policy(name, policy_name)
log.info('Deleted policy %s on ELB %s', policy_name, name)
return True
except boto.exception.BotoServerError as e:
log.error('Failed to delete policy %s on ELB %s: %s',
policy_name, name, e.message,
exc_info_on_loglevel=logging.DEBUG)
return False |
def init_environment(self):
"""Configure the user's environment.
"""
env = os.environ
# These two ensure 'ls' produces nice coloring on BSD-derived systems
env['TERM'] = 'xterm-color'
env['CLICOLOR'] = '1'
# Since normal pagers don't work at all (over pexpect we don't have
# single-key control of the subprocess), try to disable paging in
# subprocesses as much as possible.
env['PAGER'] = 'cat'
env['GIT_PAGER'] = 'cat'
# And install the payload version of page.
install_payload_page() | Configure the user's environment. | Below is the the instruction that describes the task:
### Input:
Configure the user's environment.
### Response:
def init_environment(self):
"""Configure the user's environment.
"""
env = os.environ
# These two ensure 'ls' produces nice coloring on BSD-derived systems
env['TERM'] = 'xterm-color'
env['CLICOLOR'] = '1'
# Since normal pagers don't work at all (over pexpect we don't have
# single-key control of the subprocess), try to disable paging in
# subprocesses as much as possible.
env['PAGER'] = 'cat'
env['GIT_PAGER'] = 'cat'
# And install the payload version of page.
install_payload_page() |
def add_noise(weights, other_weights):
'''add noise to the layer.
'''
w_range = np.ptp(other_weights.flatten())
noise_range = NOISE_RATIO * w_range
noise = np.random.uniform(-noise_range / 2.0, noise_range / 2.0, weights.shape)
return np.add(noise, weights) | add noise to the layer. | Below is the the instruction that describes the task:
### Input:
add noise to the layer.
### Response:
def add_noise(weights, other_weights):
'''add noise to the layer.
'''
w_range = np.ptp(other_weights.flatten())
noise_range = NOISE_RATIO * w_range
noise = np.random.uniform(-noise_range / 2.0, noise_range / 2.0, weights.shape)
return np.add(noise, weights) |
def _is_extra_source(asset, field, map):
"""
Internal method that determines if this asset/field combination
represents a fetcher value or a regular OHLCVP lookup.
"""
# If we have an extra source with a column called "price", only look
# at it if it's on something like palladium and not AAPL (since our
# own price data always wins when dealing with assets).
return not (field in BASE_FIELDS and
(isinstance(asset, (Asset, ContinuousFuture)))) | Internal method that determines if this asset/field combination
represents a fetcher value or a regular OHLCVP lookup. | Below is the the instruction that describes the task:
### Input:
Internal method that determines if this asset/field combination
represents a fetcher value or a regular OHLCVP lookup.
### Response:
def _is_extra_source(asset, field, map):
"""
Internal method that determines if this asset/field combination
represents a fetcher value or a regular OHLCVP lookup.
"""
# If we have an extra source with a column called "price", only look
# at it if it's on something like palladium and not AAPL (since our
# own price data always wins when dealing with assets).
return not (field in BASE_FIELDS and
(isinstance(asset, (Asset, ContinuousFuture)))) |
def generate_keypair(keypair_file):
'''generate_keypair is used by some of the helpers that need a keypair.
The function should be used if the client doesn't have the attribute
self.key. We generate the key and return it.
We use pycryptodome (3.7.2)
Parameters
=========
keypair_file: fullpath to where to save keypair
'''
from Crypto.PublicKey import RSA
key = RSA.generate(2048)
# Ensure helper directory exists
keypair_dir = os.path.dirname(keypair_file)
if not os.path.exists(keypair_dir):
os.makedirs(keypair_dir)
# Save key
with open(keypair_file, 'wb') as filey:
filey.write(key.exportKey('PEM'))
return key | generate_keypair is used by some of the helpers that need a keypair.
The function should be used if the client doesn't have the attribute
self.key. We generate the key and return it.
We use pycryptodome (3.7.2)
Parameters
=========
keypair_file: fullpath to where to save keypair | Below is the the instruction that describes the task:
### Input:
generate_keypair is used by some of the helpers that need a keypair.
The function should be used if the client doesn't have the attribute
self.key. We generate the key and return it.
We use pycryptodome (3.7.2)
Parameters
=========
keypair_file: fullpath to where to save keypair
### Response:
def generate_keypair(keypair_file):
'''generate_keypair is used by some of the helpers that need a keypair.
The function should be used if the client doesn't have the attribute
self.key. We generate the key and return it.
We use pycryptodome (3.7.2)
Parameters
=========
keypair_file: fullpath to where to save keypair
'''
from Crypto.PublicKey import RSA
key = RSA.generate(2048)
# Ensure helper directory exists
keypair_dir = os.path.dirname(keypair_file)
if not os.path.exists(keypair_dir):
os.makedirs(keypair_dir)
# Save key
with open(keypair_file, 'wb') as filey:
filey.write(key.exportKey('PEM'))
return key |
def _convert_to_storage_entity_with_encryption_metadata(
self, options, store_raw_metadata, sa, entity, vio, is_file,
container, dir, file_snapshot):
# type: (SourcePath, StorageCredentials, any, bool, StorageAccount,
# any, blobxfer.models.metadata.VectoredStripe, bool, str,
# str) -> StorageEntity
"""Convert entity into StorageEntity with encryption metadata if avail
:param SourcePath self: this
:param StorageCredentials creds: storage creds
:param object options: download or synccopy options
:param bool store_raw_metadata: store raw metadata
:param StorageAccount sa: storage account
:param object entity: Storage File or Blob object
:param blobxfer.models.metadata.VectoredStripe vio: Vectored stripe
:param bool is_file: is a file object
:param str container: container
:param str dir: Azure File directory structure
:rtype: StorageEntity
:return: Azure storage entity object
"""
if (not store_raw_metadata and
blobxfer.models.crypto.EncryptionMetadata.
encryption_metadata_exists(entity.metadata)):
ed = blobxfer.models.crypto.EncryptionMetadata()
ed.convert_from_json(
entity.metadata, entity.name, options.rsa_private_key)
else:
ed = None
ase = blobxfer.models.azure.StorageEntity(container, ed)
if is_file:
ase.populate_from_file(
sa, entity, dir, vio=vio,
store_raw_metadata=store_raw_metadata, snapshot=file_snapshot)
else:
ase.populate_from_blob(
sa, entity, vio=vio, store_raw_metadata=store_raw_metadata)
return ase | Convert entity into StorageEntity with encryption metadata if avail
:param SourcePath self: this
:param StorageCredentials creds: storage creds
:param object options: download or synccopy options
:param bool store_raw_metadata: store raw metadata
:param StorageAccount sa: storage account
:param object entity: Storage File or Blob object
:param blobxfer.models.metadata.VectoredStripe vio: Vectored stripe
:param bool is_file: is a file object
:param str container: container
:param str dir: Azure File directory structure
:rtype: StorageEntity
:return: Azure storage entity object | Below is the the instruction that describes the task:
### Input:
Convert entity into StorageEntity with encryption metadata if avail
:param SourcePath self: this
:param StorageCredentials creds: storage creds
:param object options: download or synccopy options
:param bool store_raw_metadata: store raw metadata
:param StorageAccount sa: storage account
:param object entity: Storage File or Blob object
:param blobxfer.models.metadata.VectoredStripe vio: Vectored stripe
:param bool is_file: is a file object
:param str container: container
:param str dir: Azure File directory structure
:rtype: StorageEntity
:return: Azure storage entity object
### Response:
def _convert_to_storage_entity_with_encryption_metadata(
self, options, store_raw_metadata, sa, entity, vio, is_file,
container, dir, file_snapshot):
# type: (SourcePath, StorageCredentials, any, bool, StorageAccount,
# any, blobxfer.models.metadata.VectoredStripe, bool, str,
# str) -> StorageEntity
"""Convert entity into StorageEntity with encryption metadata if avail
:param SourcePath self: this
:param StorageCredentials creds: storage creds
:param object options: download or synccopy options
:param bool store_raw_metadata: store raw metadata
:param StorageAccount sa: storage account
:param object entity: Storage File or Blob object
:param blobxfer.models.metadata.VectoredStripe vio: Vectored stripe
:param bool is_file: is a file object
:param str container: container
:param str dir: Azure File directory structure
:rtype: StorageEntity
:return: Azure storage entity object
"""
if (not store_raw_metadata and
blobxfer.models.crypto.EncryptionMetadata.
encryption_metadata_exists(entity.metadata)):
ed = blobxfer.models.crypto.EncryptionMetadata()
ed.convert_from_json(
entity.metadata, entity.name, options.rsa_private_key)
else:
ed = None
ase = blobxfer.models.azure.StorageEntity(container, ed)
if is_file:
ase.populate_from_file(
sa, entity, dir, vio=vio,
store_raw_metadata=store_raw_metadata, snapshot=file_snapshot)
else:
ase.populate_from_blob(
sa, entity, vio=vio, store_raw_metadata=store_raw_metadata)
return ase |
def map_alleles(self, mapping, copy=True):
"""Transform alleles via a mapping.
Parameters
----------
mapping : ndarray, int8, shape (n_variants, max_allele)
An array defining the allele mapping for each variant.
copy : bool, optional
If True, return a new array; if False, apply mapping in place
(only applies for arrays with dtype int8; all other dtypes
require a copy).
Returns
-------
gm : GenotypeArray
Examples
--------
>>> import allel
>>> import numpy as np
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[1, 2], [2, 1]],
... [[2, 2], [-1, -1]]], dtype='i1')
>>> mapping = np.array([[1, 2, 0],
... [2, 0, 1],
... [2, 1, 0],
... [0, 2, 1]], dtype='i1')
>>> g.map_alleles(mapping)
<GenotypeArray shape=(4, 2, 2) dtype=int8>
1/1 1/2
2/1 0/0
1/0 0/1
1/1 ./.
>>> v = g[:, 0]
>>> v
<GenotypeVector shape=(4, 2) dtype=int8>
0/0 0/2 1/2 2/2
>>> v.map_alleles(mapping)
<GenotypeVector shape=(4, 2) dtype=int8>
1/1 2/1 1/0 1/1
Notes
-----
If a mask has been set, it is ignored by this function.
For arrays with dtype int8 an optimised implementation is used which is
faster and uses far less memory. It is recommended to convert arrays to
dtype int8 where possible before calling this method.
See Also
--------
create_allele_mapping
"""
h = self.to_haplotypes()
hm = h.map_alleles(mapping, copy=copy)
if self.ndim == 2:
gm = GenotypeVector(hm)
else:
gm = hm.to_genotypes(ploidy=self.ploidy)
return gm | Transform alleles via a mapping.
Parameters
----------
mapping : ndarray, int8, shape (n_variants, max_allele)
An array defining the allele mapping for each variant.
copy : bool, optional
If True, return a new array; if False, apply mapping in place
(only applies for arrays with dtype int8; all other dtypes
require a copy).
Returns
-------
gm : GenotypeArray
Examples
--------
>>> import allel
>>> import numpy as np
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[1, 2], [2, 1]],
... [[2, 2], [-1, -1]]], dtype='i1')
>>> mapping = np.array([[1, 2, 0],
... [2, 0, 1],
... [2, 1, 0],
... [0, 2, 1]], dtype='i1')
>>> g.map_alleles(mapping)
<GenotypeArray shape=(4, 2, 2) dtype=int8>
1/1 1/2
2/1 0/0
1/0 0/1
1/1 ./.
>>> v = g[:, 0]
>>> v
<GenotypeVector shape=(4, 2) dtype=int8>
0/0 0/2 1/2 2/2
>>> v.map_alleles(mapping)
<GenotypeVector shape=(4, 2) dtype=int8>
1/1 2/1 1/0 1/1
Notes
-----
If a mask has been set, it is ignored by this function.
For arrays with dtype int8 an optimised implementation is used which is
faster and uses far less memory. It is recommended to convert arrays to
dtype int8 where possible before calling this method.
See Also
--------
create_allele_mapping | Below is the the instruction that describes the task:
### Input:
Transform alleles via a mapping.
Parameters
----------
mapping : ndarray, int8, shape (n_variants, max_allele)
An array defining the allele mapping for each variant.
copy : bool, optional
If True, return a new array; if False, apply mapping in place
(only applies for arrays with dtype int8; all other dtypes
require a copy).
Returns
-------
gm : GenotypeArray
Examples
--------
>>> import allel
>>> import numpy as np
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[1, 2], [2, 1]],
... [[2, 2], [-1, -1]]], dtype='i1')
>>> mapping = np.array([[1, 2, 0],
... [2, 0, 1],
... [2, 1, 0],
... [0, 2, 1]], dtype='i1')
>>> g.map_alleles(mapping)
<GenotypeArray shape=(4, 2, 2) dtype=int8>
1/1 1/2
2/1 0/0
1/0 0/1
1/1 ./.
>>> v = g[:, 0]
>>> v
<GenotypeVector shape=(4, 2) dtype=int8>
0/0 0/2 1/2 2/2
>>> v.map_alleles(mapping)
<GenotypeVector shape=(4, 2) dtype=int8>
1/1 2/1 1/0 1/1
Notes
-----
If a mask has been set, it is ignored by this function.
For arrays with dtype int8 an optimised implementation is used which is
faster and uses far less memory. It is recommended to convert arrays to
dtype int8 where possible before calling this method.
See Also
--------
create_allele_mapping
### Response:
def map_alleles(self, mapping, copy=True):
"""Transform alleles via a mapping.
Parameters
----------
mapping : ndarray, int8, shape (n_variants, max_allele)
An array defining the allele mapping for each variant.
copy : bool, optional
If True, return a new array; if False, apply mapping in place
(only applies for arrays with dtype int8; all other dtypes
require a copy).
Returns
-------
gm : GenotypeArray
Examples
--------
>>> import allel
>>> import numpy as np
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[1, 2], [2, 1]],
... [[2, 2], [-1, -1]]], dtype='i1')
>>> mapping = np.array([[1, 2, 0],
... [2, 0, 1],
... [2, 1, 0],
... [0, 2, 1]], dtype='i1')
>>> g.map_alleles(mapping)
<GenotypeArray shape=(4, 2, 2) dtype=int8>
1/1 1/2
2/1 0/0
1/0 0/1
1/1 ./.
>>> v = g[:, 0]
>>> v
<GenotypeVector shape=(4, 2) dtype=int8>
0/0 0/2 1/2 2/2
>>> v.map_alleles(mapping)
<GenotypeVector shape=(4, 2) dtype=int8>
1/1 2/1 1/0 1/1
Notes
-----
If a mask has been set, it is ignored by this function.
For arrays with dtype int8 an optimised implementation is used which is
faster and uses far less memory. It is recommended to convert arrays to
dtype int8 where possible before calling this method.
See Also
--------
create_allele_mapping
"""
h = self.to_haplotypes()
hm = h.map_alleles(mapping, copy=copy)
if self.ndim == 2:
gm = GenotypeVector(hm)
else:
gm = hm.to_genotypes(ploidy=self.ploidy)
return gm |
def send(self, ws, seq):
"""
Sends heartbeat message to Discord
Attributes:
ws: Websocket connection to discord
seq: Sequence number of heartbeat
"""
payload = {'op': 1, 'd': seq}
payload = json.dumps(payload)
logger.debug("Sending heartbeat with payload {}".format(payload))
ws.send(payload)
return | Sends heartbeat message to Discord
Attributes:
ws: Websocket connection to discord
seq: Sequence number of heartbeat | Below is the the instruction that describes the task:
### Input:
Sends heartbeat message to Discord
Attributes:
ws: Websocket connection to discord
seq: Sequence number of heartbeat
### Response:
def send(self, ws, seq):
"""
Sends heartbeat message to Discord
Attributes:
ws: Websocket connection to discord
seq: Sequence number of heartbeat
"""
payload = {'op': 1, 'd': seq}
payload = json.dumps(payload)
logger.debug("Sending heartbeat with payload {}".format(payload))
ws.send(payload)
return |
def _main():
""" Command line interface for testing.
"""
import pprint
import tempfile
try:
image = sys.argv[1]
except IndexError:
print("Usage: python -m pyrobase.webservice.imgur <url>")
else:
try:
pprint.pprint(copy_image_from_url(image, cache_dir=tempfile.gettempdir()))
except UploadError as exc:
print("Upload error. %s" % exc) | Command line interface for testing. | Below is the the instruction that describes the task:
### Input:
Command line interface for testing.
### Response:
def _main():
""" Command line interface for testing.
"""
import pprint
import tempfile
try:
image = sys.argv[1]
except IndexError:
print("Usage: python -m pyrobase.webservice.imgur <url>")
else:
try:
pprint.pprint(copy_image_from_url(image, cache_dir=tempfile.gettempdir()))
except UploadError as exc:
print("Upload error. %s" % exc) |
def _add_trace_frame(self, graph: TraceGraph, trace_frame: TraceFrame) -> None:
""" Copies the trace frame from 'graph' to this (self) graph.
Also copies all the trace_frame-leaf assocs since we don't
know which ones are needed until we know the issue that reaches it
"""
trace_frame_id = trace_frame.id.local_id
self.add_trace_frame(trace_frame)
self._populate_shared_text(graph, trace_frame.filename_id)
self._populate_shared_text(graph, trace_frame.caller_id)
self._populate_shared_text(graph, trace_frame.callee_id)
for (leaf_id, depth) in graph._trace_frame_leaf_assoc[trace_frame_id]:
leaf = graph._shared_texts[leaf_id]
if leaf_id not in self._shared_texts:
self.add_shared_text(leaf)
self.add_trace_frame_leaf_assoc(trace_frame, leaf, depth) | Copies the trace frame from 'graph' to this (self) graph.
Also copies all the trace_frame-leaf assocs since we don't
know which ones are needed until we know the issue that reaches it | Below is the the instruction that describes the task:
### Input:
Copies the trace frame from 'graph' to this (self) graph.
Also copies all the trace_frame-leaf assocs since we don't
know which ones are needed until we know the issue that reaches it
### Response:
def _add_trace_frame(self, graph: TraceGraph, trace_frame: TraceFrame) -> None:
""" Copies the trace frame from 'graph' to this (self) graph.
Also copies all the trace_frame-leaf assocs since we don't
know which ones are needed until we know the issue that reaches it
"""
trace_frame_id = trace_frame.id.local_id
self.add_trace_frame(trace_frame)
self._populate_shared_text(graph, trace_frame.filename_id)
self._populate_shared_text(graph, trace_frame.caller_id)
self._populate_shared_text(graph, trace_frame.callee_id)
for (leaf_id, depth) in graph._trace_frame_leaf_assoc[trace_frame_id]:
leaf = graph._shared_texts[leaf_id]
if leaf_id not in self._shared_texts:
self.add_shared_text(leaf)
self.add_trace_frame_leaf_assoc(trace_frame, leaf, depth) |
def set_brightness(self, brightness):
"""Set brightness of entire display to specified value (16 levels, from
0 to 15).
"""
if brightness < 0 or brightness > 15:
raise ValueError('Brightness must be a value of 0 to 15.')
self._device.writeList(HT16K33_CMD_BRIGHTNESS | brightness, []) | Set brightness of entire display to specified value (16 levels, from
0 to 15). | Below is the the instruction that describes the task:
### Input:
Set brightness of entire display to specified value (16 levels, from
0 to 15).
### Response:
def set_brightness(self, brightness):
"""Set brightness of entire display to specified value (16 levels, from
0 to 15).
"""
if brightness < 0 or brightness > 15:
raise ValueError('Brightness must be a value of 0 to 15.')
self._device.writeList(HT16K33_CMD_BRIGHTNESS | brightness, []) |
def assertTimeZoneNotEqual(self, dt, tz, msg=None):
'''Fail if ``dt``'s ``tzinfo`` attribute equals ``tz`` as
determined by the '!=' operator.
Parameters
----------
dt : datetime
tz : timezone
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
TypeError
If ``tz`` is not a timezone object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
if not isinstance(tz, timezone):
raise TypeError('Second argument is not a timezone object')
self.assertNotEqual(dt.tzinfo, tz, msg=msg) | Fail if ``dt``'s ``tzinfo`` attribute equals ``tz`` as
determined by the '!=' operator.
Parameters
----------
dt : datetime
tz : timezone
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
TypeError
If ``tz`` is not a timezone object. | Below is the the instruction that describes the task:
### Input:
Fail if ``dt``'s ``tzinfo`` attribute equals ``tz`` as
determined by the '!=' operator.
Parameters
----------
dt : datetime
tz : timezone
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
TypeError
If ``tz`` is not a timezone object.
### Response:
def assertTimeZoneNotEqual(self, dt, tz, msg=None):
'''Fail if ``dt``'s ``tzinfo`` attribute equals ``tz`` as
determined by the '!=' operator.
Parameters
----------
dt : datetime
tz : timezone
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
TypeError
If ``tz`` is not a timezone object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
if not isinstance(tz, timezone):
raise TypeError('Second argument is not a timezone object')
self.assertNotEqual(dt.tzinfo, tz, msg=msg) |
def filter(self, **kwargs):
'''
Only columns/attributes that have been specified as having an index with
the ``index=True`` option on the column definition can be filtered with
this method. Prefix, suffix, and pattern match filters must be provided
using the ``.startswith()``, ``.endswith()``, and the ``.like()``
methods on the query object, respectively. Geo location queries should
be performed using the ``.near()`` method.
Filters should be of the form::
# for numeric ranges, use None for open-ended ranges
attribute=(min, max)
# you can also query for equality by passing a single number
attribute=value
# for string searches, passing a plain string will require that
# string to be in the index as a literal
attribute=string
# to perform an 'or' query on strings, you can pass a list of
# strings
attribute=[string1, string2]
As an example, the following will return entities that have both
``hello`` and ``world`` in the ``String`` column ``scol`` and has a
``Numeric`` column ``ncol`` with value between 2 and 10 (including the
endpoints)::
results = MyModel.query \\
.filter(scol='hello') \\
.filter(scol='world') \\
.filter(ncol=(2, 10)) \\
.all()
If you only want to match a single value as part of your range query,
you can pass an integer, float, or Decimal object by itself, similar
to the ``Model.get_by()`` method::
results = MyModel.query \\
.filter(ncol=5) \\
.execute()
.. note:: Trying to use a range query `attribute=(min, max)` on indexed
string columns won't return any results.
.. note:: This method only filters columns that have been defined with
``index=True``.
'''
cur_filters = list(self._filters)
for attr, value in kwargs.items():
value = self._check(attr, value, which='filter')
if isinstance(value, NUMERIC_TYPES):
# for simple numeric equiality filters
value = (value, value)
if isinstance(value, six.string_types):
cur_filters.append('%s:%s'%(attr, value))
elif six.PY3 and isinstance(value, bytes):
cur_filters.append('%s:%s'%(attr, value.decode('latin-1')))
elif isinstance(value, tuple):
if value is NOT_NULL:
from .columns import OneToOne, ManyToOne
ctype = type(self._model._columns[attr])
if not issubclass(ctype, (OneToOne, ManyToOne)):
raise QueryError("Can only query for non-null column values " \
"on OneToOne or ManyToOne columns, %r is of type %r"%(attr, ctype))
if len(value) != 2:
raise QueryError("Numeric ranges require 2 endpoints, you provided %s with %r"%(len(value), value))
tt = []
for v in value:
if isinstance(v, date):
v = dt2ts(v)
if isinstance(v, dtime):
v = t2ts(v)
tt.append(v)
value = tt
cur_filters.append((attr, value[0], value[1]))
elif isinstance(value, list) and value:
cur_filters.append(['%s:%s'%(attr, _ts(v)) for v in value])
else:
raise QueryError("Sorry, we don't know how to filter %r by %r"%(attr, value))
return self.replace(filters=tuple(cur_filters)) | Only columns/attributes that have been specified as having an index with
the ``index=True`` option on the column definition can be filtered with
this method. Prefix, suffix, and pattern match filters must be provided
using the ``.startswith()``, ``.endswith()``, and the ``.like()``
methods on the query object, respectively. Geo location queries should
be performed using the ``.near()`` method.
Filters should be of the form::
# for numeric ranges, use None for open-ended ranges
attribute=(min, max)
# you can also query for equality by passing a single number
attribute=value
# for string searches, passing a plain string will require that
# string to be in the index as a literal
attribute=string
# to perform an 'or' query on strings, you can pass a list of
# strings
attribute=[string1, string2]
As an example, the following will return entities that have both
``hello`` and ``world`` in the ``String`` column ``scol`` and has a
``Numeric`` column ``ncol`` with value between 2 and 10 (including the
endpoints)::
results = MyModel.query \\
.filter(scol='hello') \\
.filter(scol='world') \\
.filter(ncol=(2, 10)) \\
.all()
If you only want to match a single value as part of your range query,
you can pass an integer, float, or Decimal object by itself, similar
to the ``Model.get_by()`` method::
results = MyModel.query \\
.filter(ncol=5) \\
.execute()
.. note:: Trying to use a range query `attribute=(min, max)` on indexed
string columns won't return any results.
.. note:: This method only filters columns that have been defined with
``index=True``. | Below is the the instruction that describes the task:
### Input:
Only columns/attributes that have been specified as having an index with
the ``index=True`` option on the column definition can be filtered with
this method. Prefix, suffix, and pattern match filters must be provided
using the ``.startswith()``, ``.endswith()``, and the ``.like()``
methods on the query object, respectively. Geo location queries should
be performed using the ``.near()`` method.
Filters should be of the form::
# for numeric ranges, use None for open-ended ranges
attribute=(min, max)
# you can also query for equality by passing a single number
attribute=value
# for string searches, passing a plain string will require that
# string to be in the index as a literal
attribute=string
# to perform an 'or' query on strings, you can pass a list of
# strings
attribute=[string1, string2]
As an example, the following will return entities that have both
``hello`` and ``world`` in the ``String`` column ``scol`` and has a
``Numeric`` column ``ncol`` with value between 2 and 10 (including the
endpoints)::
results = MyModel.query \\
.filter(scol='hello') \\
.filter(scol='world') \\
.filter(ncol=(2, 10)) \\
.all()
If you only want to match a single value as part of your range query,
you can pass an integer, float, or Decimal object by itself, similar
to the ``Model.get_by()`` method::
results = MyModel.query \\
.filter(ncol=5) \\
.execute()
.. note:: Trying to use a range query `attribute=(min, max)` on indexed
string columns won't return any results.
.. note:: This method only filters columns that have been defined with
``index=True``.
### Response:
def filter(self, **kwargs):
'''
Only columns/attributes that have been specified as having an index with
the ``index=True`` option on the column definition can be filtered with
this method. Prefix, suffix, and pattern match filters must be provided
using the ``.startswith()``, ``.endswith()``, and the ``.like()``
methods on the query object, respectively. Geo location queries should
be performed using the ``.near()`` method.
Filters should be of the form::
# for numeric ranges, use None for open-ended ranges
attribute=(min, max)
# you can also query for equality by passing a single number
attribute=value
# for string searches, passing a plain string will require that
# string to be in the index as a literal
attribute=string
# to perform an 'or' query on strings, you can pass a list of
# strings
attribute=[string1, string2]
As an example, the following will return entities that have both
``hello`` and ``world`` in the ``String`` column ``scol`` and has a
``Numeric`` column ``ncol`` with value between 2 and 10 (including the
endpoints)::
results = MyModel.query \\
.filter(scol='hello') \\
.filter(scol='world') \\
.filter(ncol=(2, 10)) \\
.all()
If you only want to match a single value as part of your range query,
you can pass an integer, float, or Decimal object by itself, similar
to the ``Model.get_by()`` method::
results = MyModel.query \\
.filter(ncol=5) \\
.execute()
.. note:: Trying to use a range query `attribute=(min, max)` on indexed
string columns won't return any results.
.. note:: This method only filters columns that have been defined with
``index=True``.
'''
cur_filters = list(self._filters)
for attr, value in kwargs.items():
value = self._check(attr, value, which='filter')
if isinstance(value, NUMERIC_TYPES):
# for simple numeric equiality filters
value = (value, value)
if isinstance(value, six.string_types):
cur_filters.append('%s:%s'%(attr, value))
elif six.PY3 and isinstance(value, bytes):
cur_filters.append('%s:%s'%(attr, value.decode('latin-1')))
elif isinstance(value, tuple):
if value is NOT_NULL:
from .columns import OneToOne, ManyToOne
ctype = type(self._model._columns[attr])
if not issubclass(ctype, (OneToOne, ManyToOne)):
raise QueryError("Can only query for non-null column values " \
"on OneToOne or ManyToOne columns, %r is of type %r"%(attr, ctype))
if len(value) != 2:
raise QueryError("Numeric ranges require 2 endpoints, you provided %s with %r"%(len(value), value))
tt = []
for v in value:
if isinstance(v, date):
v = dt2ts(v)
if isinstance(v, dtime):
v = t2ts(v)
tt.append(v)
value = tt
cur_filters.append((attr, value[0], value[1]))
elif isinstance(value, list) and value:
cur_filters.append(['%s:%s'%(attr, _ts(v)) for v in value])
else:
raise QueryError("Sorry, we don't know how to filter %r by %r"%(attr, value))
return self.replace(filters=tuple(cur_filters)) |
def get_substructure_mapping(self, other, limit=1):
"""
get self to other substructure mapping
:param limit: number of matches. if 0 return iterator for all possible; if 1 return dict or None;
if > 1 return list of dicts
"""
i = self._matcher(other).subgraph_isomorphisms_iter()
if limit == 1:
m = next(i, None)
if m:
return {v: k for k, v in m.items()}
return
elif limit == 0:
return ({v: k for k, v in m.items()} for m in i)
return [{v: k for k, v in m.items()} for m in islice(i, limit)] | get self to other substructure mapping
:param limit: number of matches. if 0 return iterator for all possible; if 1 return dict or None;
if > 1 return list of dicts | Below is the the instruction that describes the task:
### Input:
get self to other substructure mapping
:param limit: number of matches. if 0 return iterator for all possible; if 1 return dict or None;
if > 1 return list of dicts
### Response:
def get_substructure_mapping(self, other, limit=1):
"""
get self to other substructure mapping
:param limit: number of matches. if 0 return iterator for all possible; if 1 return dict or None;
if > 1 return list of dicts
"""
i = self._matcher(other).subgraph_isomorphisms_iter()
if limit == 1:
m = next(i, None)
if m:
return {v: k for k, v in m.items()}
return
elif limit == 0:
return ({v: k for k, v in m.items()} for m in i)
return [{v: k for k, v in m.items()} for m in islice(i, limit)] |
def start_thread(self, lpStartAddress, lpParameter=0, bSuspended = False):
"""
Remotely creates a new thread in the process.
@type lpStartAddress: int
@param lpStartAddress: Start address for the new thread.
@type lpParameter: int
@param lpParameter: Optional argument for the new thread.
@type bSuspended: bool
@param bSuspended: C{True} if the new thread should be suspended.
In that case use L{Thread.resume} to start execution.
"""
if bSuspended:
dwCreationFlags = win32.CREATE_SUSPENDED
else:
dwCreationFlags = 0
hProcess = self.get_handle( win32.PROCESS_CREATE_THREAD |
win32.PROCESS_QUERY_INFORMATION |
win32.PROCESS_VM_OPERATION |
win32.PROCESS_VM_WRITE |
win32.PROCESS_VM_READ )
hThread, dwThreadId = win32.CreateRemoteThread(
hProcess, 0, 0, lpStartAddress, lpParameter, dwCreationFlags)
aThread = Thread(dwThreadId, hThread, self)
self._add_thread(aThread)
return aThread | Remotely creates a new thread in the process.
@type lpStartAddress: int
@param lpStartAddress: Start address for the new thread.
@type lpParameter: int
@param lpParameter: Optional argument for the new thread.
@type bSuspended: bool
@param bSuspended: C{True} if the new thread should be suspended.
In that case use L{Thread.resume} to start execution. | Below is the the instruction that describes the task:
### Input:
Remotely creates a new thread in the process.
@type lpStartAddress: int
@param lpStartAddress: Start address for the new thread.
@type lpParameter: int
@param lpParameter: Optional argument for the new thread.
@type bSuspended: bool
@param bSuspended: C{True} if the new thread should be suspended.
In that case use L{Thread.resume} to start execution.
### Response:
def start_thread(self, lpStartAddress, lpParameter=0, bSuspended = False):
"""
Remotely creates a new thread in the process.
@type lpStartAddress: int
@param lpStartAddress: Start address for the new thread.
@type lpParameter: int
@param lpParameter: Optional argument for the new thread.
@type bSuspended: bool
@param bSuspended: C{True} if the new thread should be suspended.
In that case use L{Thread.resume} to start execution.
"""
if bSuspended:
dwCreationFlags = win32.CREATE_SUSPENDED
else:
dwCreationFlags = 0
hProcess = self.get_handle( win32.PROCESS_CREATE_THREAD |
win32.PROCESS_QUERY_INFORMATION |
win32.PROCESS_VM_OPERATION |
win32.PROCESS_VM_WRITE |
win32.PROCESS_VM_READ )
hThread, dwThreadId = win32.CreateRemoteThread(
hProcess, 0, 0, lpStartAddress, lpParameter, dwCreationFlags)
aThread = Thread(dwThreadId, hThread, self)
self._add_thread(aThread)
return aThread |
def retain_all(self, items):
"""
Removes the items which are not contained in the specified collection. In other words, only the items that
are contained in the specified collection will be retained.
:param items: (Collection), collection which includes the elements to be retained in this set.
:return: (bool), ``true`` if this queue changed as a result of the call.
"""
check_not_none(items, "Value can't be None")
data_items = []
for item in items:
check_not_none(item, "Value can't be None")
data_items.append(self._to_data(item))
return self._encode_invoke(queue_compare_and_retain_all_codec, data_list=data_items) | Removes the items which are not contained in the specified collection. In other words, only the items that
are contained in the specified collection will be retained.
:param items: (Collection), collection which includes the elements to be retained in this set.
:return: (bool), ``true`` if this queue changed as a result of the call. | Below is the the instruction that describes the task:
### Input:
Removes the items which are not contained in the specified collection. In other words, only the items that
are contained in the specified collection will be retained.
:param items: (Collection), collection which includes the elements to be retained in this set.
:return: (bool), ``true`` if this queue changed as a result of the call.
### Response:
def retain_all(self, items):
"""
Removes the items which are not contained in the specified collection. In other words, only the items that
are contained in the specified collection will be retained.
:param items: (Collection), collection which includes the elements to be retained in this set.
:return: (bool), ``true`` if this queue changed as a result of the call.
"""
check_not_none(items, "Value can't be None")
data_items = []
for item in items:
check_not_none(item, "Value can't be None")
data_items.append(self._to_data(item))
return self._encode_invoke(queue_compare_and_retain_all_codec, data_list=data_items) |
def _run_purecn(paired, work_dir):
"""Run PureCN.R wrapper with pre-segmented CNVkit or GATK4 inputs.
"""
segfns = {"cnvkit": _segment_normalized_cnvkit, "gatk-cnv": _segment_normalized_gatk}
out_base, out, all_files = _get_purecn_files(paired, work_dir)
failed_file = out_base + "-failed.log"
cnr_file = tz.get_in(["depth", "bins", "normalized"], paired.tumor_data)
if not utils.file_uptodate(out["rds"], cnr_file) and not utils.file_exists(failed_file):
cnr_file, seg_file = segfns[cnvkit.bin_approach(paired.tumor_data)](cnr_file, work_dir, paired)
from bcbio import heterogeneity
vcf_file = heterogeneity.get_variants(paired.tumor_data, include_germline=False)[0]["vrn_file"]
vcf_file = germline.filter_to_pass_and_reject(vcf_file, paired, out_dir=work_dir)
with file_transaction(paired.tumor_data, out_base) as tx_out_base:
# Use UCSC style naming for human builds to support BSgenome
genome = ("hg19" if dd.get_genome_build(paired.tumor_data) in ["GRCh37", "hg19"]
else dd.get_genome_build(paired.tumor_data))
cmd = ["PureCN.R", "--seed", "42", "--out", tx_out_base, "--rds", "%s.rds" % tx_out_base,
"--sampleid", dd.get_sample_name(paired.tumor_data),
"--genome", genome,
"--vcf", vcf_file, "--tumor", cnr_file,
"--segfile", seg_file, "--funsegmentation", "Hclust", "--maxnonclonal", "0.3"]
if dd.get_num_cores(paired.tumor_data) > 1:
cmd += ["--cores", str(dd.get_num_cores(paired.tumor_data))]
try:
cmd = "export R_LIBS_USER=%s && %s && %s" % (utils.R_sitelib(), utils.get_R_exports(),
" ".join([str(x) for x in cmd]))
do.run(cmd, "PureCN copy number calling")
except subprocess.CalledProcessError as msg:
if _allowed_errors(str(msg)):
logger.info("PureCN failed to find solution for %s: skipping" %
dd.get_sample_name(paired.tumor_data))
with open(failed_file, "w") as out_handle:
out_handle.write(str(msg))
else:
logger.exception()
raise
for f in all_files:
if os.path.exists(os.path.join(os.path.dirname(tx_out_base), f)):
shutil.move(os.path.join(os.path.dirname(tx_out_base), f),
os.path.join(os.path.dirname(out_base), f))
out = _get_purecn_files(paired, work_dir, require_exist=True)[1]
return out if (out.get("rds") and os.path.exists(out["rds"])) else None | Run PureCN.R wrapper with pre-segmented CNVkit or GATK4 inputs. | Below is the the instruction that describes the task:
### Input:
Run PureCN.R wrapper with pre-segmented CNVkit or GATK4 inputs.
### Response:
def _run_purecn(paired, work_dir):
"""Run PureCN.R wrapper with pre-segmented CNVkit or GATK4 inputs.
"""
segfns = {"cnvkit": _segment_normalized_cnvkit, "gatk-cnv": _segment_normalized_gatk}
out_base, out, all_files = _get_purecn_files(paired, work_dir)
failed_file = out_base + "-failed.log"
cnr_file = tz.get_in(["depth", "bins", "normalized"], paired.tumor_data)
if not utils.file_uptodate(out["rds"], cnr_file) and not utils.file_exists(failed_file):
cnr_file, seg_file = segfns[cnvkit.bin_approach(paired.tumor_data)](cnr_file, work_dir, paired)
from bcbio import heterogeneity
vcf_file = heterogeneity.get_variants(paired.tumor_data, include_germline=False)[0]["vrn_file"]
vcf_file = germline.filter_to_pass_and_reject(vcf_file, paired, out_dir=work_dir)
with file_transaction(paired.tumor_data, out_base) as tx_out_base:
# Use UCSC style naming for human builds to support BSgenome
genome = ("hg19" if dd.get_genome_build(paired.tumor_data) in ["GRCh37", "hg19"]
else dd.get_genome_build(paired.tumor_data))
cmd = ["PureCN.R", "--seed", "42", "--out", tx_out_base, "--rds", "%s.rds" % tx_out_base,
"--sampleid", dd.get_sample_name(paired.tumor_data),
"--genome", genome,
"--vcf", vcf_file, "--tumor", cnr_file,
"--segfile", seg_file, "--funsegmentation", "Hclust", "--maxnonclonal", "0.3"]
if dd.get_num_cores(paired.tumor_data) > 1:
cmd += ["--cores", str(dd.get_num_cores(paired.tumor_data))]
try:
cmd = "export R_LIBS_USER=%s && %s && %s" % (utils.R_sitelib(), utils.get_R_exports(),
" ".join([str(x) for x in cmd]))
do.run(cmd, "PureCN copy number calling")
except subprocess.CalledProcessError as msg:
if _allowed_errors(str(msg)):
logger.info("PureCN failed to find solution for %s: skipping" %
dd.get_sample_name(paired.tumor_data))
with open(failed_file, "w") as out_handle:
out_handle.write(str(msg))
else:
logger.exception()
raise
for f in all_files:
if os.path.exists(os.path.join(os.path.dirname(tx_out_base), f)):
shutil.move(os.path.join(os.path.dirname(tx_out_base), f),
os.path.join(os.path.dirname(out_base), f))
out = _get_purecn_files(paired, work_dir, require_exist=True)[1]
return out if (out.get("rds") and os.path.exists(out["rds"])) else None |
def start(self):
'''
Starts a server on the port provided in the :class:`Server` constructor
in a separate thread
:rtype: Server
:returns: server instance for chaining
'''
self._handler = _create_handler_class(self._rules, self._always_rules)
self._server = HTTPServer(('', self._port), self._handler)
self._thread = Thread(target=self._server.serve_forever, daemon=True)
self._thread.start()
self.running = True
return self | Starts a server on the port provided in the :class:`Server` constructor
in a separate thread
:rtype: Server
:returns: server instance for chaining | Below is the the instruction that describes the task:
### Input:
Starts a server on the port provided in the :class:`Server` constructor
in a separate thread
:rtype: Server
:returns: server instance for chaining
### Response:
def start(self):
'''
Starts a server on the port provided in the :class:`Server` constructor
in a separate thread
:rtype: Server
:returns: server instance for chaining
'''
self._handler = _create_handler_class(self._rules, self._always_rules)
self._server = HTTPServer(('', self._port), self._handler)
self._thread = Thread(target=self._server.serve_forever, daemon=True)
self._thread.start()
self.running = True
return self |
def nga_west2_epistemic_adjustment(magnitude, distance):
"""
Applies the "average" adjustment factor for epistemic uncertainty
as defined in Table 17 of Petersen et al., (2014)::
| R < 10. | 10.0 <= R < 30.0 | R >= 30.0
-----------------------------------------------------------
M < 6.0 | 0.37 | 0.22 | 0.22
6 <= M <7.0 | 0.25 | 0.23 | 0.23
M >= 7.0 | 0.40 | 0.36 | 0.33
"""
if magnitude < 6.0:
adjustment = 0.22 * np.ones_like(distance)
adjustment[distance < 10.0] = 0.37
elif magnitude >= 7.0:
adjustment = 0.36 * np.ones_like(distance)
adjustment[distance < 10.0] = 0.40
adjustment[distance >= 30.0] = 0.33
else:
adjustment = 0.23 * np.ones_like(distance)
adjustment[distance < 10.0] = 0.25
return adjustment | Applies the "average" adjustment factor for epistemic uncertainty
as defined in Table 17 of Petersen et al., (2014)::
| R < 10. | 10.0 <= R < 30.0 | R >= 30.0
-----------------------------------------------------------
M < 6.0 | 0.37 | 0.22 | 0.22
6 <= M <7.0 | 0.25 | 0.23 | 0.23
M >= 7.0 | 0.40 | 0.36 | 0.33 | Below is the the instruction that describes the task:
### Input:
Applies the "average" adjustment factor for epistemic uncertainty
as defined in Table 17 of Petersen et al., (2014)::
| R < 10. | 10.0 <= R < 30.0 | R >= 30.0
-----------------------------------------------------------
M < 6.0 | 0.37 | 0.22 | 0.22
6 <= M <7.0 | 0.25 | 0.23 | 0.23
M >= 7.0 | 0.40 | 0.36 | 0.33
### Response:
def nga_west2_epistemic_adjustment(magnitude, distance):
"""
Applies the "average" adjustment factor for epistemic uncertainty
as defined in Table 17 of Petersen et al., (2014)::
| R < 10. | 10.0 <= R < 30.0 | R >= 30.0
-----------------------------------------------------------
M < 6.0 | 0.37 | 0.22 | 0.22
6 <= M <7.0 | 0.25 | 0.23 | 0.23
M >= 7.0 | 0.40 | 0.36 | 0.33
"""
if magnitude < 6.0:
adjustment = 0.22 * np.ones_like(distance)
adjustment[distance < 10.0] = 0.37
elif magnitude >= 7.0:
adjustment = 0.36 * np.ones_like(distance)
adjustment[distance < 10.0] = 0.40
adjustment[distance >= 30.0] = 0.33
else:
adjustment = 0.23 * np.ones_like(distance)
adjustment[distance < 10.0] = 0.25
return adjustment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.