code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def run_ahead(self, time, framerate):
"""Run the particle system for the specified time frame at the
specified framerate to move time forward as quickly as possible.
Useful for "warming up" the particle system to reach a steady-state
before anything is drawn or to simply "skip ahead" in time.
time -- The amount of simulation time to skip over.
framerate -- The framerate of the simulation in updates per unit
time. Higher values will increase simulation accuracy,
but will take longer to compute.
"""
if time:
td = 1.0 / framerate
update = self.update
for i in range(int(time / td)):
update(td) | Run the particle system for the specified time frame at the
specified framerate to move time forward as quickly as possible.
Useful for "warming up" the particle system to reach a steady-state
before anything is drawn or to simply "skip ahead" in time.
time -- The amount of simulation time to skip over.
framerate -- The framerate of the simulation in updates per unit
time. Higher values will increase simulation accuracy,
but will take longer to compute. | Below is the the instruction that describes the task:
### Input:
Run the particle system for the specified time frame at the
specified framerate to move time forward as quickly as possible.
Useful for "warming up" the particle system to reach a steady-state
before anything is drawn or to simply "skip ahead" in time.
time -- The amount of simulation time to skip over.
framerate -- The framerate of the simulation in updates per unit
time. Higher values will increase simulation accuracy,
but will take longer to compute.
### Response:
def run_ahead(self, time, framerate):
"""Run the particle system for the specified time frame at the
specified framerate to move time forward as quickly as possible.
Useful for "warming up" the particle system to reach a steady-state
before anything is drawn or to simply "skip ahead" in time.
time -- The amount of simulation time to skip over.
framerate -- The framerate of the simulation in updates per unit
time. Higher values will increase simulation accuracy,
but will take longer to compute.
"""
if time:
td = 1.0 / framerate
update = self.update
for i in range(int(time / td)):
update(td) |
def _file_path(self, uid):
"""Create and return full file path for DayOne entry"""
file_name = '%s.doentry' % (uid)
return os.path.join(self.dayone_journal_path, file_name) | Create and return full file path for DayOne entry | Below is the the instruction that describes the task:
### Input:
Create and return full file path for DayOne entry
### Response:
def _file_path(self, uid):
"""Create and return full file path for DayOne entry"""
file_name = '%s.doentry' % (uid)
return os.path.join(self.dayone_journal_path, file_name) |
def follow_hand(poppy, delta):
"""Tell the right hand to follow the left hand"""
right_arm_position = poppy.l_arm_chain.end_effector + delta
poppy.r_arm_chain.goto(right_arm_position, 0.5, wait=True) | Tell the right hand to follow the left hand | Below is the the instruction that describes the task:
### Input:
Tell the right hand to follow the left hand
### Response:
def follow_hand(poppy, delta):
"""Tell the right hand to follow the left hand"""
right_arm_position = poppy.l_arm_chain.end_effector + delta
poppy.r_arm_chain.goto(right_arm_position, 0.5, wait=True) |
def GetString(self, text, idx):
'''Internal method.
Retrieves a string from the #STRINGS buffer.
'''
next = string.find(text, '\x00', idx)
chunk = text[idx:next]
return chunk | Internal method.
Retrieves a string from the #STRINGS buffer. | Below is the the instruction that describes the task:
### Input:
Internal method.
Retrieves a string from the #STRINGS buffer.
### Response:
def GetString(self, text, idx):
'''Internal method.
Retrieves a string from the #STRINGS buffer.
'''
next = string.find(text, '\x00', idx)
chunk = text[idx:next]
return chunk |
def normal_var(data, mean):
""" Creates a segment cost function for a time series with a
Normal distribution with changing variance
Args:
data (:obj:`list` of float): 1D time series data
variance (float): variance
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
cumm = [0.0]
cumm.extend(np.cumsum(np.power(np.abs(data - mean), 2)))
def cost(s, t):
""" Cost function for normal distribution with variable variance
Args:
start (int): start index
end (int): end index
Returns:
float: Cost, from start to end
"""
dist = float(t - s)
diff = cumm[t] - cumm[s]
return dist * np.log(diff/dist)
return cost | Creates a segment cost function for a time series with a
Normal distribution with changing variance
Args:
data (:obj:`list` of float): 1D time series data
variance (float): variance
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment | Below is the the instruction that describes the task:
### Input:
Creates a segment cost function for a time series with a
Normal distribution with changing variance
Args:
data (:obj:`list` of float): 1D time series data
variance (float): variance
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
### Response:
def normal_var(data, mean):
""" Creates a segment cost function for a time series with a
Normal distribution with changing variance
Args:
data (:obj:`list` of float): 1D time series data
variance (float): variance
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
cumm = [0.0]
cumm.extend(np.cumsum(np.power(np.abs(data - mean), 2)))
def cost(s, t):
""" Cost function for normal distribution with variable variance
Args:
start (int): start index
end (int): end index
Returns:
float: Cost, from start to end
"""
dist = float(t - s)
diff = cumm[t] - cumm[s]
return dist * np.log(diff/dist)
return cost |
def inverse_transform(self, y, exogenous=None):
"""Inverse transform a transformed array
Inverse the Box-Cox transformation on the transformed array. Note that
if truncation happened in the ``transform`` method, invertibility will
not be preserved, and the transformed array may not be perfectly
inverse-transformed.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The transformed endogenous (time-series) array.
exogenous : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
Returns
-------
y : array-like or None
The inverse-transformed y array
exogenous : array-like or None
The inverse-transformed exogenous array
"""
check_is_fitted(self, "lam1_")
lam1 = self.lam1_
lam2 = self.lam2_
y, exog = self._check_y_exog(y, exogenous)
if lam1 == 0:
return np.exp(y) - lam2, exog
numer = y * lam1 # remove denominator
numer += 1. # add 1 back to it
de_exp = numer ** (1. / lam1) # de-exponentiate
return de_exp - lam2, exog | Inverse transform a transformed array
Inverse the Box-Cox transformation on the transformed array. Note that
if truncation happened in the ``transform`` method, invertibility will
not be preserved, and the transformed array may not be perfectly
inverse-transformed.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The transformed endogenous (time-series) array.
exogenous : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
Returns
-------
y : array-like or None
The inverse-transformed y array
exogenous : array-like or None
The inverse-transformed exogenous array | Below is the the instruction that describes the task:
### Input:
Inverse transform a transformed array
Inverse the Box-Cox transformation on the transformed array. Note that
if truncation happened in the ``transform`` method, invertibility will
not be preserved, and the transformed array may not be perfectly
inverse-transformed.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The transformed endogenous (time-series) array.
exogenous : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
Returns
-------
y : array-like or None
The inverse-transformed y array
exogenous : array-like or None
The inverse-transformed exogenous array
### Response:
def inverse_transform(self, y, exogenous=None):
"""Inverse transform a transformed array
Inverse the Box-Cox transformation on the transformed array. Note that
if truncation happened in the ``transform`` method, invertibility will
not be preserved, and the transformed array may not be perfectly
inverse-transformed.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The transformed endogenous (time-series) array.
exogenous : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
Returns
-------
y : array-like or None
The inverse-transformed y array
exogenous : array-like or None
The inverse-transformed exogenous array
"""
check_is_fitted(self, "lam1_")
lam1 = self.lam1_
lam2 = self.lam2_
y, exog = self._check_y_exog(y, exogenous)
if lam1 == 0:
return np.exp(y) - lam2, exog
numer = y * lam1 # remove denominator
numer += 1. # add 1 back to it
de_exp = numer ** (1. / lam1) # de-exponentiate
return de_exp - lam2, exog |
def ip_rtm_config_route_static_bfd_bfd_static_route_bfd_interval_attributes_interval(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
rtm_config = ET.SubElement(ip, "rtm-config", xmlns="urn:brocade.com:mgmt:brocade-rtm")
route = ET.SubElement(rtm_config, "route")
static = ET.SubElement(route, "static")
bfd = ET.SubElement(static, "bfd")
bfd_static_route = ET.SubElement(bfd, "bfd-static-route")
bfd_static_route_dest_key = ET.SubElement(bfd_static_route, "bfd-static-route-dest")
bfd_static_route_dest_key.text = kwargs.pop('bfd_static_route_dest')
bfd_static_route_src_key = ET.SubElement(bfd_static_route, "bfd-static-route-src")
bfd_static_route_src_key.text = kwargs.pop('bfd_static_route_src')
bfd_interval_attributes = ET.SubElement(bfd_static_route, "bfd-interval-attributes")
interval = ET.SubElement(bfd_interval_attributes, "interval")
interval.text = kwargs.pop('interval')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def ip_rtm_config_route_static_bfd_bfd_static_route_bfd_interval_attributes_interval(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
rtm_config = ET.SubElement(ip, "rtm-config", xmlns="urn:brocade.com:mgmt:brocade-rtm")
route = ET.SubElement(rtm_config, "route")
static = ET.SubElement(route, "static")
bfd = ET.SubElement(static, "bfd")
bfd_static_route = ET.SubElement(bfd, "bfd-static-route")
bfd_static_route_dest_key = ET.SubElement(bfd_static_route, "bfd-static-route-dest")
bfd_static_route_dest_key.text = kwargs.pop('bfd_static_route_dest')
bfd_static_route_src_key = ET.SubElement(bfd_static_route, "bfd-static-route-src")
bfd_static_route_src_key.text = kwargs.pop('bfd_static_route_src')
bfd_interval_attributes = ET.SubElement(bfd_static_route, "bfd-interval-attributes")
interval = ET.SubElement(bfd_interval_attributes, "interval")
interval.text = kwargs.pop('interval')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def describe_autocomplete(self, service, operation, param):
"""Describe operation and args needed for server side completion.
:type service: str
:param service: The AWS service name.
:type operation: str
:param operation: The AWS operation name.
:type param: str
:param param: The name of the parameter being completed. This must
match the casing in the service model (e.g. InstanceIds, not
--instance-ids).
:rtype: ServerCompletion
:return: A ServerCompletion object that describes what API call to make
in order to complete the response.
"""
service_index = self._index[service]
LOG.debug(service_index)
if param not in service_index.get('operations', {}).get(operation, {}):
LOG.debug("param not in index: %s", param)
return None
p = service_index['operations'][operation][param]
resource_name = p['resourceName']
resource_identifier = p['resourceIdentifier']
resource_index = service_index['resources'][resource_name]
completion_operation = resource_index['operation']
path = resource_index['resourceIdentifier'][resource_identifier]
return ServerCompletion(service=service, operation=completion_operation,
params={}, path=path) | Describe operation and args needed for server side completion.
:type service: str
:param service: The AWS service name.
:type operation: str
:param operation: The AWS operation name.
:type param: str
:param param: The name of the parameter being completed. This must
match the casing in the service model (e.g. InstanceIds, not
--instance-ids).
:rtype: ServerCompletion
:return: A ServerCompletion object that describes what API call to make
in order to complete the response. | Below is the the instruction that describes the task:
### Input:
Describe operation and args needed for server side completion.
:type service: str
:param service: The AWS service name.
:type operation: str
:param operation: The AWS operation name.
:type param: str
:param param: The name of the parameter being completed. This must
match the casing in the service model (e.g. InstanceIds, not
--instance-ids).
:rtype: ServerCompletion
:return: A ServerCompletion object that describes what API call to make
in order to complete the response.
### Response:
def describe_autocomplete(self, service, operation, param):
"""Describe operation and args needed for server side completion.
:type service: str
:param service: The AWS service name.
:type operation: str
:param operation: The AWS operation name.
:type param: str
:param param: The name of the parameter being completed. This must
match the casing in the service model (e.g. InstanceIds, not
--instance-ids).
:rtype: ServerCompletion
:return: A ServerCompletion object that describes what API call to make
in order to complete the response.
"""
service_index = self._index[service]
LOG.debug(service_index)
if param not in service_index.get('operations', {}).get(operation, {}):
LOG.debug("param not in index: %s", param)
return None
p = service_index['operations'][operation][param]
resource_name = p['resourceName']
resource_identifier = p['resourceIdentifier']
resource_index = service_index['resources'][resource_name]
completion_operation = resource_index['operation']
path = resource_index['resourceIdentifier'][resource_identifier]
return ServerCompletion(service=service, operation=completion_operation,
params={}, path=path) |
def matrix_asformat(lvl, name, format, blocksize=None):
"""Set a matrix to a specific format.
This routine looks for the matrix "name" in the specified format as a
member of the level instance, lvl. For example, if name='A', format='bsr'
and blocksize=(4,4), and if lvl.Absr44 exists with the correct blocksize,
then lvl.Absr is returned. If the matrix doesn't already exist, lvl.name
is converted to the desired format, and made a member of lvl.
Only create such persistent copies of a matrix for routines such as
presmoothing and postsmoothing, where the matrix conversion is done every
cycle.
Calling this function can _dramatically_ increase your memory costs.
Be careful with it's usage.
"""
desired_matrix = name + format
M = getattr(lvl, name)
if format == 'bsr':
desired_matrix += str(blocksize[0])+str(blocksize[1])
if hasattr(lvl, desired_matrix):
# if lvl already contains lvl.name+format
pass
elif M.format == format and format != 'bsr':
# is base_matrix already in the correct format?
setattr(lvl, desired_matrix, M)
elif M.format == format and format == 'bsr':
# convert to bsr with the right blocksize
# tobsr() will not do anything extra if this is uneeded
setattr(lvl, desired_matrix, M.tobsr(blocksize=blocksize))
else:
# convert
newM = getattr(M, 'to' + format)()
setattr(lvl, desired_matrix, newM)
return getattr(lvl, desired_matrix) | Set a matrix to a specific format.
This routine looks for the matrix "name" in the specified format as a
member of the level instance, lvl. For example, if name='A', format='bsr'
and blocksize=(4,4), and if lvl.Absr44 exists with the correct blocksize,
then lvl.Absr is returned. If the matrix doesn't already exist, lvl.name
is converted to the desired format, and made a member of lvl.
Only create such persistent copies of a matrix for routines such as
presmoothing and postsmoothing, where the matrix conversion is done every
cycle.
Calling this function can _dramatically_ increase your memory costs.
Be careful with it's usage. | Below is the the instruction that describes the task:
### Input:
Set a matrix to a specific format.
This routine looks for the matrix "name" in the specified format as a
member of the level instance, lvl. For example, if name='A', format='bsr'
and blocksize=(4,4), and if lvl.Absr44 exists with the correct blocksize,
then lvl.Absr is returned. If the matrix doesn't already exist, lvl.name
is converted to the desired format, and made a member of lvl.
Only create such persistent copies of a matrix for routines such as
presmoothing and postsmoothing, where the matrix conversion is done every
cycle.
Calling this function can _dramatically_ increase your memory costs.
Be careful with it's usage.
### Response:
def matrix_asformat(lvl, name, format, blocksize=None):
"""Set a matrix to a specific format.
This routine looks for the matrix "name" in the specified format as a
member of the level instance, lvl. For example, if name='A', format='bsr'
and blocksize=(4,4), and if lvl.Absr44 exists with the correct blocksize,
then lvl.Absr is returned. If the matrix doesn't already exist, lvl.name
is converted to the desired format, and made a member of lvl.
Only create such persistent copies of a matrix for routines such as
presmoothing and postsmoothing, where the matrix conversion is done every
cycle.
Calling this function can _dramatically_ increase your memory costs.
Be careful with it's usage.
"""
desired_matrix = name + format
M = getattr(lvl, name)
if format == 'bsr':
desired_matrix += str(blocksize[0])+str(blocksize[1])
if hasattr(lvl, desired_matrix):
# if lvl already contains lvl.name+format
pass
elif M.format == format and format != 'bsr':
# is base_matrix already in the correct format?
setattr(lvl, desired_matrix, M)
elif M.format == format and format == 'bsr':
# convert to bsr with the right blocksize
# tobsr() will not do anything extra if this is uneeded
setattr(lvl, desired_matrix, M.tobsr(blocksize=blocksize))
else:
# convert
newM = getattr(M, 'to' + format)()
setattr(lvl, desired_matrix, newM)
return getattr(lvl, desired_matrix) |
def _assert_ssl_exc_contains(exc, *msgs):
"""Check whether SSL exception contains either of messages provided."""
if len(msgs) < 1:
raise TypeError(
'_assert_ssl_exc_contains() requires '
'at least one message to be passed.',
)
err_msg_lower = str(exc).lower()
return any(m.lower() in err_msg_lower for m in msgs) | Check whether SSL exception contains either of messages provided. | Below is the the instruction that describes the task:
### Input:
Check whether SSL exception contains either of messages provided.
### Response:
def _assert_ssl_exc_contains(exc, *msgs):
"""Check whether SSL exception contains either of messages provided."""
if len(msgs) < 1:
raise TypeError(
'_assert_ssl_exc_contains() requires '
'at least one message to be passed.',
)
err_msg_lower = str(exc).lower()
return any(m.lower() in err_msg_lower for m in msgs) |
def _setsizes(self, cursor=None):
"""Set stored input and output sizes for cursor execution."""
if cursor is None:
cursor = self._cursor
if self._inputsizes:
cursor.setinputsizes(self._inputsizes)
for column, size in self._outputsizes.items():
if column is None:
cursor.setoutputsize(size)
else:
cursor.setoutputsize(size, column) | Set stored input and output sizes for cursor execution. | Below is the the instruction that describes the task:
### Input:
Set stored input and output sizes for cursor execution.
### Response:
def _setsizes(self, cursor=None):
"""Set stored input and output sizes for cursor execution."""
if cursor is None:
cursor = self._cursor
if self._inputsizes:
cursor.setinputsizes(self._inputsizes)
for column, size in self._outputsizes.items():
if column is None:
cursor.setoutputsize(size)
else:
cursor.setoutputsize(size, column) |
def text_if_str(to_type, text_or_primitive):
"""
Convert to a type, assuming that strings can be only unicode text (not a hexstr)
@param to_type is a function that takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
@param hexstr_or_primitive in bytes, str, or int.
"""
if isinstance(text_or_primitive, str):
(primitive, text) = (None, text_or_primitive)
else:
(primitive, text) = (text_or_primitive, None)
return to_type(primitive, text=text) | Convert to a type, assuming that strings can be only unicode text (not a hexstr)
@param to_type is a function that takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
@param hexstr_or_primitive in bytes, str, or int. | Below is the the instruction that describes the task:
### Input:
Convert to a type, assuming that strings can be only unicode text (not a hexstr)
@param to_type is a function that takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
@param hexstr_or_primitive in bytes, str, or int.
### Response:
def text_if_str(to_type, text_or_primitive):
"""
Convert to a type, assuming that strings can be only unicode text (not a hexstr)
@param to_type is a function that takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
@param hexstr_or_primitive in bytes, str, or int.
"""
if isinstance(text_or_primitive, str):
(primitive, text) = (None, text_or_primitive)
else:
(primitive, text) = (text_or_primitive, None)
return to_type(primitive, text=text) |
def isValidSemver(version):
"""Semantic version number - determines whether the version is qualified. The format is MAJOR.Minor.PATCH, more with https://semver.org/"""
if version and isinstance(version, string_types):
try:
semver.parse(version)
except (TypeError,ValueError):
return False
else:
return True
return False | Semantic version number - determines whether the version is qualified. The format is MAJOR.Minor.PATCH, more with https://semver.org/ | Below is the the instruction that describes the task:
### Input:
Semantic version number - determines whether the version is qualified. The format is MAJOR.Minor.PATCH, more with https://semver.org/
### Response:
def isValidSemver(version):
"""Semantic version number - determines whether the version is qualified. The format is MAJOR.Minor.PATCH, more with https://semver.org/"""
if version and isinstance(version, string_types):
try:
semver.parse(version)
except (TypeError,ValueError):
return False
else:
return True
return False |
def query_image_metadata(self, image, metadata_type=""):
'''**Description**
Find the image with the tag <image> and return its metadata.
**Arguments**
- image: Input image can be in the following formats: registry/repo:tag
- metadata_type: The metadata type can be one of the types returned by running without a type specified
**Success Return Value**
A JSON object representing the image metadata.
'''
return self._query_image(image, query_group='metadata', query_type=metadata_type) | **Description**
Find the image with the tag <image> and return its metadata.
**Arguments**
- image: Input image can be in the following formats: registry/repo:tag
- metadata_type: The metadata type can be one of the types returned by running without a type specified
**Success Return Value**
A JSON object representing the image metadata. | Below is the the instruction that describes the task:
### Input:
**Description**
Find the image with the tag <image> and return its metadata.
**Arguments**
- image: Input image can be in the following formats: registry/repo:tag
- metadata_type: The metadata type can be one of the types returned by running without a type specified
**Success Return Value**
A JSON object representing the image metadata.
### Response:
def query_image_metadata(self, image, metadata_type=""):
'''**Description**
Find the image with the tag <image> and return its metadata.
**Arguments**
- image: Input image can be in the following formats: registry/repo:tag
- metadata_type: The metadata type can be one of the types returned by running without a type specified
**Success Return Value**
A JSON object representing the image metadata.
'''
return self._query_image(image, query_group='metadata', query_type=metadata_type) |
def _get_programs_dict(pkgname_only, flag_protected, flag_no_pfant=False):
"""Returns dictionary {(package description): [ExeInfo0, ...], ...}"""
allinfo = f311.get_programs_dict(pkgname_only, flag_protected)
if not flag_no_pfant and "pyfant" in allinfo:
_add_PFANT(allinfo)
return allinfo | Returns dictionary {(package description): [ExeInfo0, ...], ...} | Below is the the instruction that describes the task:
### Input:
Returns dictionary {(package description): [ExeInfo0, ...], ...}
### Response:
def _get_programs_dict(pkgname_only, flag_protected, flag_no_pfant=False):
"""Returns dictionary {(package description): [ExeInfo0, ...], ...}"""
allinfo = f311.get_programs_dict(pkgname_only, flag_protected)
if not flag_no_pfant and "pyfant" in allinfo:
_add_PFANT(allinfo)
return allinfo |
def raw_abundance(biomf, sampleIDs=None, sample_abd=True):
"""
Calculate the total number of sequences in each OTU or SampleID.
:type biomf: A BIOM file.
:param biomf: OTU table format.
:type sampleIDs: List
:param sampleIDs: A list of column id's from BIOM format OTU table. By default, the
list has been set to None.
:type sample_abd: Boolean
:param sample_abd: A boolean operator to provide output for OTUID's or SampleID's. By
default, the output will be provided for SampleID's.
:rtype: dict
:return: Returns a dictionary keyed on either OTUID's or SampleIDs and their
respective abundance as values.
"""
results = defaultdict(int)
if sampleIDs is None:
sampleIDs = biomf.ids()
else:
try:
for sid in sampleIDs:
assert sid in biomf.ids()
except AssertionError:
raise ValueError(
"\nError while calculating raw total abundances: The sampleIDs provided "
"do not match the sampleIDs in biom file. Please double check the "
"sampleIDs provided.\n")
otuIDs = biomf.ids(axis="observation")
for sampleID in sampleIDs:
for otuID in otuIDs:
abd = biomf.get_value_by_ids(otuID, sampleID)
if sample_abd:
results[sampleID] += abd
else:
results[otuID] += abd
return results | Calculate the total number of sequences in each OTU or SampleID.
:type biomf: A BIOM file.
:param biomf: OTU table format.
:type sampleIDs: List
:param sampleIDs: A list of column id's from BIOM format OTU table. By default, the
list has been set to None.
:type sample_abd: Boolean
:param sample_abd: A boolean operator to provide output for OTUID's or SampleID's. By
default, the output will be provided for SampleID's.
:rtype: dict
:return: Returns a dictionary keyed on either OTUID's or SampleIDs and their
respective abundance as values. | Below is the the instruction that describes the task:
### Input:
Calculate the total number of sequences in each OTU or SampleID.
:type biomf: A BIOM file.
:param biomf: OTU table format.
:type sampleIDs: List
:param sampleIDs: A list of column id's from BIOM format OTU table. By default, the
list has been set to None.
:type sample_abd: Boolean
:param sample_abd: A boolean operator to provide output for OTUID's or SampleID's. By
default, the output will be provided for SampleID's.
:rtype: dict
:return: Returns a dictionary keyed on either OTUID's or SampleIDs and their
respective abundance as values.
### Response:
def raw_abundance(biomf, sampleIDs=None, sample_abd=True):
"""
Calculate the total number of sequences in each OTU or SampleID.
:type biomf: A BIOM file.
:param biomf: OTU table format.
:type sampleIDs: List
:param sampleIDs: A list of column id's from BIOM format OTU table. By default, the
list has been set to None.
:type sample_abd: Boolean
:param sample_abd: A boolean operator to provide output for OTUID's or SampleID's. By
default, the output will be provided for SampleID's.
:rtype: dict
:return: Returns a dictionary keyed on either OTUID's or SampleIDs and their
respective abundance as values.
"""
results = defaultdict(int)
if sampleIDs is None:
sampleIDs = biomf.ids()
else:
try:
for sid in sampleIDs:
assert sid in biomf.ids()
except AssertionError:
raise ValueError(
"\nError while calculating raw total abundances: The sampleIDs provided "
"do not match the sampleIDs in biom file. Please double check the "
"sampleIDs provided.\n")
otuIDs = biomf.ids(axis="observation")
for sampleID in sampleIDs:
for otuID in otuIDs:
abd = biomf.get_value_by_ids(otuID, sampleID)
if sample_abd:
results[sampleID] += abd
else:
results[otuID] += abd
return results |
def _setSpeed(self, speed, motor, device):
"""
Set motor speed. This method takes into consideration the PWM frequency
that the hardware is currently running at and limits the values passed
to the hardware accordingly.
:Parameters:
speed : `int`
Motor speed as an integer. Negative numbers indicate reverse
speeds.
motor : `str`
A string value indicating the motor to set the speed on.
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol.
"""
reverse = False
if speed < 0:
speed = -speed
reverse = True
# 0 and 2 for Qik 2s9v1, 0, 2, and 4 for 2s12v10
if self._deviceConfig[device]['pwm'] in (0, 2, 4,) and speed > 127:
speed = 127
if speed > 127:
if speed > 255:
speed = 255
if reverse:
cmd = self._COMMAND.get('{}-reverse-8bit'.format(motor))
else:
cmd = self._COMMAND.get('{}-forward-8bit'.format(motor))
speed -= 128
else:
if reverse:
cmd = self._COMMAND.get('{}-reverse-7bit'.format(motor))
else:
cmd = self._COMMAND.get('{}-forward-7bit'.format(motor))
if not cmd:
msg = "Invalid motor specified: {}".format(motor)
self._log and self._log.error(msg)
raise ValueError(msg)
self._writeData(cmd, device, params=(speed,)) | Set motor speed. This method takes into consideration the PWM frequency
that the hardware is currently running at and limits the values passed
to the hardware accordingly.
:Parameters:
speed : `int`
Motor speed as an integer. Negative numbers indicate reverse
speeds.
motor : `str`
A string value indicating the motor to set the speed on.
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. | Below is the the instruction that describes the task:
### Input:
Set motor speed. This method takes into consideration the PWM frequency
that the hardware is currently running at and limits the values passed
to the hardware accordingly.
:Parameters:
speed : `int`
Motor speed as an integer. Negative numbers indicate reverse
speeds.
motor : `str`
A string value indicating the motor to set the speed on.
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol.
### Response:
def _setSpeed(self, speed, motor, device):
"""
Set motor speed. This method takes into consideration the PWM frequency
that the hardware is currently running at and limits the values passed
to the hardware accordingly.
:Parameters:
speed : `int`
Motor speed as an integer. Negative numbers indicate reverse
speeds.
motor : `str`
A string value indicating the motor to set the speed on.
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol.
"""
reverse = False
if speed < 0:
speed = -speed
reverse = True
# 0 and 2 for Qik 2s9v1, 0, 2, and 4 for 2s12v10
if self._deviceConfig[device]['pwm'] in (0, 2, 4,) and speed > 127:
speed = 127
if speed > 127:
if speed > 255:
speed = 255
if reverse:
cmd = self._COMMAND.get('{}-reverse-8bit'.format(motor))
else:
cmd = self._COMMAND.get('{}-forward-8bit'.format(motor))
speed -= 128
else:
if reverse:
cmd = self._COMMAND.get('{}-reverse-7bit'.format(motor))
else:
cmd = self._COMMAND.get('{}-forward-7bit'.format(motor))
if not cmd:
msg = "Invalid motor specified: {}".format(motor)
self._log and self._log.error(msg)
raise ValueError(msg)
self._writeData(cmd, device, params=(speed,)) |
def upstream_structure(self):
"""Build dictionary with entire upstream pipeline structure
(with regard to the current Step).
Returns:
dict: dictionary describing the upstream pipeline structure. It has two keys:
``'edges'`` and ``'nodes'``, where:
- value of ``'edges'`` is set of tuples ``(input_step.name, self.name)``
- value of ``'nodes'`` is set of all step names upstream to this Step
"""
structure_dict = {'edges': set(),
'nodes': set()}
structure_dict = self._build_structure_dict(structure_dict)
return structure_dict | Build dictionary with entire upstream pipeline structure
(with regard to the current Step).
Returns:
dict: dictionary describing the upstream pipeline structure. It has two keys:
``'edges'`` and ``'nodes'``, where:
- value of ``'edges'`` is set of tuples ``(input_step.name, self.name)``
- value of ``'nodes'`` is set of all step names upstream to this Step | Below is the the instruction that describes the task:
### Input:
Build dictionary with entire upstream pipeline structure
(with regard to the current Step).
Returns:
dict: dictionary describing the upstream pipeline structure. It has two keys:
``'edges'`` and ``'nodes'``, where:
- value of ``'edges'`` is set of tuples ``(input_step.name, self.name)``
- value of ``'nodes'`` is set of all step names upstream to this Step
### Response:
def upstream_structure(self):
"""Build dictionary with entire upstream pipeline structure
(with regard to the current Step).
Returns:
dict: dictionary describing the upstream pipeline structure. It has two keys:
``'edges'`` and ``'nodes'``, where:
- value of ``'edges'`` is set of tuples ``(input_step.name, self.name)``
- value of ``'nodes'`` is set of all step names upstream to this Step
"""
structure_dict = {'edges': set(),
'nodes': set()}
structure_dict = self._build_structure_dict(structure_dict)
return structure_dict |
def convert(self, blob, **kw):
"""Convert using unoconv converter."""
timeout = self.run_timeout
with make_temp_file(blob) as in_fn, make_temp_file(
prefix="tmp-unoconv-", suffix=".pdf"
) as out_fn:
args = ["-f", "pdf", "-o", out_fn, in_fn]
# Hack for my Mac, FIXME later
if Path("/Applications/LibreOffice.app/Contents/program/python").exists():
cmd = [
"/Applications/LibreOffice.app/Contents/program/python",
"/usr/local/bin/unoconv",
] + args
else:
cmd = [self.unoconv] + args
def run_uno():
try:
self._process = subprocess.Popen(
cmd, close_fds=True, cwd=bytes(self.tmp_dir)
)
self._process.communicate()
except Exception as e:
logger.error("run_uno error: %s", bytes(e), exc_info=True)
raise ConversionError("unoconv failed") from e
run_thread = threading.Thread(target=run_uno)
run_thread.start()
run_thread.join(timeout)
try:
if run_thread.is_alive():
# timeout reached
self._process.terminate()
if self._process.poll() is not None:
try:
self._process.kill()
except OSError:
logger.warning("Failed to kill process %s", self._process)
self._process = None
raise ConversionError(f"Conversion timeout ({timeout})")
converted = open(out_fn).read()
return converted
finally:
self._process = None | Convert using unoconv converter. | Below is the the instruction that describes the task:
### Input:
Convert using unoconv converter.
### Response:
def convert(self, blob, **kw):
"""Convert using unoconv converter."""
timeout = self.run_timeout
with make_temp_file(blob) as in_fn, make_temp_file(
prefix="tmp-unoconv-", suffix=".pdf"
) as out_fn:
args = ["-f", "pdf", "-o", out_fn, in_fn]
# Hack for my Mac, FIXME later
if Path("/Applications/LibreOffice.app/Contents/program/python").exists():
cmd = [
"/Applications/LibreOffice.app/Contents/program/python",
"/usr/local/bin/unoconv",
] + args
else:
cmd = [self.unoconv] + args
def run_uno():
try:
self._process = subprocess.Popen(
cmd, close_fds=True, cwd=bytes(self.tmp_dir)
)
self._process.communicate()
except Exception as e:
logger.error("run_uno error: %s", bytes(e), exc_info=True)
raise ConversionError("unoconv failed") from e
run_thread = threading.Thread(target=run_uno)
run_thread.start()
run_thread.join(timeout)
try:
if run_thread.is_alive():
# timeout reached
self._process.terminate()
if self._process.poll() is not None:
try:
self._process.kill()
except OSError:
logger.warning("Failed to kill process %s", self._process)
self._process = None
raise ConversionError(f"Conversion timeout ({timeout})")
converted = open(out_fn).read()
return converted
finally:
self._process = None |
def lib_dir(self):
"""Return standard library directory path used by RPM libs."""
if not self._lib_dir:
lib_files = glob.glob("/usr/lib/*/librpm.so*")
if not lib_files:
raise InstallError("Can not find lib directory.")
self._lib_dir = os.path.dirname(lib_files[0])
return self._lib_dir | Return standard library directory path used by RPM libs. | Below is the the instruction that describes the task:
### Input:
Return standard library directory path used by RPM libs.
### Response:
def lib_dir(self):
"""Return standard library directory path used by RPM libs."""
if not self._lib_dir:
lib_files = glob.glob("/usr/lib/*/librpm.so*")
if not lib_files:
raise InstallError("Can not find lib directory.")
self._lib_dir = os.path.dirname(lib_files[0])
return self._lib_dir |
def get_latex(ink_filename):
"""Get the LaTeX string from a file by the *.ink filename."""
tex_file = os.path.splitext(ink_filename)[0] + ".tex"
with open(tex_file) as f:
tex_content = f.read().strip()
pattern = re.compile(r"\\begin\{displaymath\}(.*?)\\end\{displaymath\}",
re.DOTALL)
matches = pattern.findall(tex_content)
if len(matches) == 0:
pattern = re.compile(r"$$(.*?)$$",
re.DOTALL)
matches = pattern.findall(tex_content)
if len(matches) != 1:
raise Exception("%s: Found not one match, but %i: %s" %
(ink_filename, len(matches), matches))
formula_in_latex = matches[0].strip()
formula_in_latex = remove_matching_braces(formula_in_latex)
# repl = []
# for letter in string.letters:
# repl.append(('\mbox{%s}' % letter, letter))
# for search, replace in repl:
# formula_in_latex = formula_in_latex.replace(search, replace)
return formula_in_latex | Get the LaTeX string from a file by the *.ink filename. | Below is the the instruction that describes the task:
### Input:
Get the LaTeX string from a file by the *.ink filename.
### Response:
def get_latex(ink_filename):
"""Get the LaTeX string from a file by the *.ink filename."""
tex_file = os.path.splitext(ink_filename)[0] + ".tex"
with open(tex_file) as f:
tex_content = f.read().strip()
pattern = re.compile(r"\\begin\{displaymath\}(.*?)\\end\{displaymath\}",
re.DOTALL)
matches = pattern.findall(tex_content)
if len(matches) == 0:
pattern = re.compile(r"$$(.*?)$$",
re.DOTALL)
matches = pattern.findall(tex_content)
if len(matches) != 1:
raise Exception("%s: Found not one match, but %i: %s" %
(ink_filename, len(matches), matches))
formula_in_latex = matches[0].strip()
formula_in_latex = remove_matching_braces(formula_in_latex)
# repl = []
# for letter in string.letters:
# repl.append(('\mbox{%s}' % letter, letter))
# for search, replace in repl:
# formula_in_latex = formula_in_latex.replace(search, replace)
return formula_in_latex |
def _apply_single_step(dist, params_event_ndims, slices, params_overrides):
"""Applies a single slicing step to `dist`, returning a new instance."""
if len(slices) == 1 and slices[0] == Ellipsis:
# The path used by Distribution.copy: batch_slice(...args..., Ellipsis)
override_dict = {}
else:
override_dict = _slice_params_to_dict(dist, params_event_ndims, slices)
override_dict.update(params_overrides)
parameters = dict(dist.parameters, **override_dict)
new_dist = type(dist)(**parameters)
return new_dist | Applies a single slicing step to `dist`, returning a new instance. | Below is the the instruction that describes the task:
### Input:
Applies a single slicing step to `dist`, returning a new instance.
### Response:
def _apply_single_step(dist, params_event_ndims, slices, params_overrides):
"""Applies a single slicing step to `dist`, returning a new instance."""
if len(slices) == 1 and slices[0] == Ellipsis:
# The path used by Distribution.copy: batch_slice(...args..., Ellipsis)
override_dict = {}
else:
override_dict = _slice_params_to_dict(dist, params_event_ndims, slices)
override_dict.update(params_overrides)
parameters = dict(dist.parameters, **override_dict)
new_dist = type(dist)(**parameters)
return new_dist |
def evaluate_all_configs(
hparams, agent_model_dir, eval_fn=_eval_fn_with_learner
):
"""Evaluate the agent with multiple eval configurations."""
metrics = {}
# Iterate over all combinations of sampling temperatures and whether to do
# initial no-ops.
for sampling_temp in hparams.eval_sampling_temps:
# Iterate over a set so if eval_max_num_noops == 0 then it's 1 iteration.
for max_num_noops in set([hparams.eval_max_num_noops, 0]):
scores = evaluate_single_config(
hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn
)
for (score, clipped) in zip(scores, (True, False)):
metric_name = get_metric_name(sampling_temp, max_num_noops, clipped)
metrics[metric_name] = score
return metrics | Evaluate the agent with multiple eval configurations. | Below is the the instruction that describes the task:
### Input:
Evaluate the agent with multiple eval configurations.
### Response:
def evaluate_all_configs(
hparams, agent_model_dir, eval_fn=_eval_fn_with_learner
):
"""Evaluate the agent with multiple eval configurations."""
metrics = {}
# Iterate over all combinations of sampling temperatures and whether to do
# initial no-ops.
for sampling_temp in hparams.eval_sampling_temps:
# Iterate over a set so if eval_max_num_noops == 0 then it's 1 iteration.
for max_num_noops in set([hparams.eval_max_num_noops, 0]):
scores = evaluate_single_config(
hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn
)
for (score, clipped) in zip(scores, (True, False)):
metric_name = get_metric_name(sampling_temp, max_num_noops, clipped)
metrics[metric_name] = score
return metrics |
def scale_geom_opt_threshold(self, gradient=0.1, displacement=0.1,
energy=0.1):
"""
Adjust the convergence criteria of geometry optimization.
Args:
gradient: the scale factor for gradient criteria. If less than
1.0, you are tightening the threshold. The base value is
300 × 10E−6
displacement: the scale factor for atomic displacement. If less
then 1.0, you are tightening the threshold. The base value is
1200 × 10E−6
energy: the scale factor for energy change between successive
iterations. If less than 1.0, you are tightening the
threshold. The base value is 100 × 10E−8.
"""
if gradient < 1.0/(300-1) or displacement < 1.0/(1200-1) or \
energy < 1.0/(100-1):
raise ValueError("The geometry optimization convergence criteria "
"is too tight")
self.params["rem"]["geom_opt_tol_gradient"] = int(gradient * 300)
self.params["rem"]["geom_opt_tol_displacement"] = int(displacement *
1200)
self.params["rem"]["geom_opt_tol_energy"] = int(energy * 100) | Adjust the convergence criteria of geometry optimization.
Args:
gradient: the scale factor for gradient criteria. If less than
1.0, you are tightening the threshold. The base value is
300 × 10E−6
displacement: the scale factor for atomic displacement. If less
then 1.0, you are tightening the threshold. The base value is
1200 × 10E−6
energy: the scale factor for energy change between successive
iterations. If less than 1.0, you are tightening the
threshold. The base value is 100 × 10E−8. | Below is the the instruction that describes the task:
### Input:
Adjust the convergence criteria of geometry optimization.
Args:
gradient: the scale factor for gradient criteria. If less than
1.0, you are tightening the threshold. The base value is
300 × 10E−6
displacement: the scale factor for atomic displacement. If less
then 1.0, you are tightening the threshold. The base value is
1200 × 10E−6
energy: the scale factor for energy change between successive
iterations. If less than 1.0, you are tightening the
threshold. The base value is 100 × 10E−8.
### Response:
def scale_geom_opt_threshold(self, gradient=0.1, displacement=0.1,
energy=0.1):
"""
Adjust the convergence criteria of geometry optimization.
Args:
gradient: the scale factor for gradient criteria. If less than
1.0, you are tightening the threshold. The base value is
300 × 10E−6
displacement: the scale factor for atomic displacement. If less
then 1.0, you are tightening the threshold. The base value is
1200 × 10E−6
energy: the scale factor for energy change between successive
iterations. If less than 1.0, you are tightening the
threshold. The base value is 100 × 10E−8.
"""
if gradient < 1.0/(300-1) or displacement < 1.0/(1200-1) or \
energy < 1.0/(100-1):
raise ValueError("The geometry optimization convergence criteria "
"is too tight")
self.params["rem"]["geom_opt_tol_gradient"] = int(gradient * 300)
self.params["rem"]["geom_opt_tol_displacement"] = int(displacement *
1200)
self.params["rem"]["geom_opt_tol_energy"] = int(energy * 100) |
def update_submit_s3_uri(estimator, job_name):
"""Updated the S3 URI of the framework source directory in given estimator.
Args:
estimator (sagemaker.estimator.Framework): The Framework estimator to update.
job_name (str): The new job name included in the submit S3 URI
Returns:
str: The updated S3 URI of framework source directory
"""
if estimator.uploaded_code is None:
return
pattern = r'(?<=/)[^/]+?(?=/source/sourcedir.tar.gz)'
# update the S3 URI with the latest training job.
# s3://path/old_job/source/sourcedir.tar.gz will become s3://path/new_job/source/sourcedir.tar.gz
submit_uri = estimator.uploaded_code.s3_prefix
submit_uri = re.sub(pattern, job_name, submit_uri)
script_name = estimator.uploaded_code.script_name
estimator.uploaded_code = fw_utils.UploadedCode(submit_uri, script_name) | Updated the S3 URI of the framework source directory in given estimator.
Args:
estimator (sagemaker.estimator.Framework): The Framework estimator to update.
job_name (str): The new job name included in the submit S3 URI
Returns:
str: The updated S3 URI of framework source directory | Below is the the instruction that describes the task:
### Input:
Updated the S3 URI of the framework source directory in given estimator.
Args:
estimator (sagemaker.estimator.Framework): The Framework estimator to update.
job_name (str): The new job name included in the submit S3 URI
Returns:
str: The updated S3 URI of framework source directory
### Response:
def update_submit_s3_uri(estimator, job_name):
"""Updated the S3 URI of the framework source directory in given estimator.
Args:
estimator (sagemaker.estimator.Framework): The Framework estimator to update.
job_name (str): The new job name included in the submit S3 URI
Returns:
str: The updated S3 URI of framework source directory
"""
if estimator.uploaded_code is None:
return
pattern = r'(?<=/)[^/]+?(?=/source/sourcedir.tar.gz)'
# update the S3 URI with the latest training job.
# s3://path/old_job/source/sourcedir.tar.gz will become s3://path/new_job/source/sourcedir.tar.gz
submit_uri = estimator.uploaded_code.s3_prefix
submit_uri = re.sub(pattern, job_name, submit_uri)
script_name = estimator.uploaded_code.script_name
estimator.uploaded_code = fw_utils.UploadedCode(submit_uri, script_name) |
def connect():
"""Try to connect to the router.
Returns:
u (miniupnc.UPnP): the connected upnp-instance
router (string): the connection information
"""
upnp = miniupnpc.UPnP()
upnp.discoverdelay = 200
providers = upnp.discover()
if providers > 1:
log.debug('multiple upnp providers found', num_providers=providers)
elif providers < 1:
log.error('no upnp providers found')
return None
try:
location = upnp.selectigd()
log.debug('connected', upnp=upnp)
except Exception as e:
log.error('Error when connecting to uPnP provider', exception_info=e)
return None
if not valid_mappable_ipv4(upnp.lanaddr):
log.error('could not query your lanaddr', reported=upnp.lanaddr)
return None
try: # this can fail if router advertises uPnP incorrectly
if not valid_mappable_ipv4(upnp.externalipaddress()):
log.error('could not query your externalipaddress', reported=upnp.externalipaddress())
return None
return upnp, location
except Exception:
log.error('error when connecting with uPnP provider', location=location)
return None | Try to connect to the router.
Returns:
u (miniupnc.UPnP): the connected upnp-instance
router (string): the connection information | Below is the the instruction that describes the task:
### Input:
Try to connect to the router.
Returns:
u (miniupnc.UPnP): the connected upnp-instance
router (string): the connection information
### Response:
def connect():
"""Try to connect to the router.
Returns:
u (miniupnc.UPnP): the connected upnp-instance
router (string): the connection information
"""
upnp = miniupnpc.UPnP()
upnp.discoverdelay = 200
providers = upnp.discover()
if providers > 1:
log.debug('multiple upnp providers found', num_providers=providers)
elif providers < 1:
log.error('no upnp providers found')
return None
try:
location = upnp.selectigd()
log.debug('connected', upnp=upnp)
except Exception as e:
log.error('Error when connecting to uPnP provider', exception_info=e)
return None
if not valid_mappable_ipv4(upnp.lanaddr):
log.error('could not query your lanaddr', reported=upnp.lanaddr)
return None
try: # this can fail if router advertises uPnP incorrectly
if not valid_mappable_ipv4(upnp.externalipaddress()):
log.error('could not query your externalipaddress', reported=upnp.externalipaddress())
return None
return upnp, location
except Exception:
log.error('error when connecting with uPnP provider', location=location)
return None |
def validate_args(args):
"""Validate provided arguments and act on --help."""
if args.list_embedding_sources:
print('Listing all sources for {} embeddings.'.format(
args.embedding_name))
print('Specify --embedding-name if you wish to '
'list sources of other embeddings')
print('')
if args.embedding_name not in nlp.embedding.list_sources().keys():
print('Invalid embedding name.')
print('Only {} are supported.'.format(', '.join(
nlp.embedding.list_sources().keys())))
sys.exit(1)
print(' '.join(nlp.embedding.list_sources()[args.embedding_name]))
sys.exit(0)
if not (args.embedding_path or args.embedding_name):
print('You must specify either --embedding-path or --embedding-name ')
print('Use --embedding-path to load and evaluate '
'word embeddings from a Word2Vec text format '
'or fastText binary format file')
print('Use --embedding-name or to download one of '
'the pre-trained embedding files included in GluonNLP.')
sys.exit(1)
if args.embedding_name and not args.embedding_source:
print('Please also specify --embedding-source'
' to select the version of the pre-trained embedding. '
'Use --list-embedding-sources to see all available sources')
sys.exit(1)
print(args) | Validate provided arguments and act on --help. | Below is the the instruction that describes the task:
### Input:
Validate provided arguments and act on --help.
### Response:
def validate_args(args):
"""Validate provided arguments and act on --help."""
if args.list_embedding_sources:
print('Listing all sources for {} embeddings.'.format(
args.embedding_name))
print('Specify --embedding-name if you wish to '
'list sources of other embeddings')
print('')
if args.embedding_name not in nlp.embedding.list_sources().keys():
print('Invalid embedding name.')
print('Only {} are supported.'.format(', '.join(
nlp.embedding.list_sources().keys())))
sys.exit(1)
print(' '.join(nlp.embedding.list_sources()[args.embedding_name]))
sys.exit(0)
if not (args.embedding_path or args.embedding_name):
print('You must specify either --embedding-path or --embedding-name ')
print('Use --embedding-path to load and evaluate '
'word embeddings from a Word2Vec text format '
'or fastText binary format file')
print('Use --embedding-name or to download one of '
'the pre-trained embedding files included in GluonNLP.')
sys.exit(1)
if args.embedding_name and not args.embedding_source:
print('Please also specify --embedding-source'
' to select the version of the pre-trained embedding. '
'Use --list-embedding-sources to see all available sources')
sys.exit(1)
print(args) |
def get_att_mats(translate_model):
"""Get's the tensors representing the attentions from a build model.
The attentions are stored in a dict on the Transformer object while building
the graph.
Args:
translate_model: Transformer object to fetch the attention weights from.
Returns:
Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attetnion weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
"""
enc_atts = []
dec_atts = []
encdec_atts = []
prefix = "transformer/body/"
postfix_self_attention = "/multihead_attention/dot_product_attention"
if translate_model.hparams.self_attention_type == "dot_product_relative":
postfix_self_attention = ("/multihead_attention/"
"dot_product_attention_relative")
postfix_encdec = "/multihead_attention/dot_product_attention"
for i in range(translate_model.hparams.num_hidden_layers):
enc_att = translate_model.attention_weights[
"%sencoder/layer_%i/self_attention%s"
% (prefix, i, postfix_self_attention)]
dec_att = translate_model.attention_weights[
"%sdecoder/layer_%i/self_attention%s"
% (prefix, i, postfix_self_attention)]
encdec_att = translate_model.attention_weights[
"%sdecoder/layer_%i/encdec_attention%s" % (prefix, i, postfix_encdec)]
enc_atts.append(enc_att)
dec_atts.append(dec_att)
encdec_atts.append(encdec_att)
return enc_atts, dec_atts, encdec_atts | Get's the tensors representing the attentions from a build model.
The attentions are stored in a dict on the Transformer object while building
the graph.
Args:
translate_model: Transformer object to fetch the attention weights from.
Returns:
Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attetnion weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
) | Below is the the instruction that describes the task:
### Input:
Get's the tensors representing the attentions from a build model.
The attentions are stored in a dict on the Transformer object while building
the graph.
Args:
translate_model: Transformer object to fetch the attention weights from.
Returns:
Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attetnion weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
### Response:
def get_att_mats(translate_model):
"""Get's the tensors representing the attentions from a build model.
The attentions are stored in a dict on the Transformer object while building
the graph.
Args:
translate_model: Transformer object to fetch the attention weights from.
Returns:
Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attetnion weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
"""
enc_atts = []
dec_atts = []
encdec_atts = []
prefix = "transformer/body/"
postfix_self_attention = "/multihead_attention/dot_product_attention"
if translate_model.hparams.self_attention_type == "dot_product_relative":
postfix_self_attention = ("/multihead_attention/"
"dot_product_attention_relative")
postfix_encdec = "/multihead_attention/dot_product_attention"
for i in range(translate_model.hparams.num_hidden_layers):
enc_att = translate_model.attention_weights[
"%sencoder/layer_%i/self_attention%s"
% (prefix, i, postfix_self_attention)]
dec_att = translate_model.attention_weights[
"%sdecoder/layer_%i/self_attention%s"
% (prefix, i, postfix_self_attention)]
encdec_att = translate_model.attention_weights[
"%sdecoder/layer_%i/encdec_attention%s" % (prefix, i, postfix_encdec)]
enc_atts.append(enc_att)
dec_atts.append(dec_att)
encdec_atts.append(encdec_att)
return enc_atts, dec_atts, encdec_atts |
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis | Map the axis to the block_manager axis. | Below is the the instruction that describes the task:
### Input:
Map the axis to the block_manager axis.
### Response:
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis |
def measure_int_put(self, measure, value):
"""associates the measure of type Int with the given value"""
if value < 0:
# Should be an error in a later release.
logger.warning("Cannot record negative values")
self._measurement_map[measure] = value | associates the measure of type Int with the given value | Below is the the instruction that describes the task:
### Input:
associates the measure of type Int with the given value
### Response:
def measure_int_put(self, measure, value):
"""associates the measure of type Int with the given value"""
if value < 0:
# Should be an error in a later release.
logger.warning("Cannot record negative values")
self._measurement_map[measure] = value |
def Process(self, parser_mediator, root_item=None, **kwargs):
"""Parses an OLECF file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
root_item (Optional[pyolecf.item]): root item of the OLECF file.
Raises:
ValueError: If the root_item is not set.
"""
# This will raise if unhandled keyword arguments are passed.
super(AutomaticDestinationsOLECFPlugin, self).Process(
parser_mediator, **kwargs)
if not root_item:
raise ValueError('Root item not set.')
for item in root_item.sub_items:
if item.name == 'DestList':
self.ParseDestList(parser_mediator, item)
elif self._RE_LNK_ITEM_NAME.match(item.name):
display_name = parser_mediator.GetDisplayName()
if display_name:
display_name = '{0:s} # {1:s}'.format(display_name, item.name)
else:
display_name = '# {0:s}'.format(item.name)
parser_mediator.AppendToParserChain(self._WINLNK_PARSER)
try:
item.seek(0, os.SEEK_SET)
self._WINLNK_PARSER.ParseFileLNKFile(
parser_mediator, item, display_name)
finally:
parser_mediator.PopFromParserChain() | Parses an OLECF file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
root_item (Optional[pyolecf.item]): root item of the OLECF file.
Raises:
ValueError: If the root_item is not set. | Below is the the instruction that describes the task:
### Input:
Parses an OLECF file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
root_item (Optional[pyolecf.item]): root item of the OLECF file.
Raises:
ValueError: If the root_item is not set.
### Response:
def Process(self, parser_mediator, root_item=None, **kwargs):
"""Parses an OLECF file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
root_item (Optional[pyolecf.item]): root item of the OLECF file.
Raises:
ValueError: If the root_item is not set.
"""
# This will raise if unhandled keyword arguments are passed.
super(AutomaticDestinationsOLECFPlugin, self).Process(
parser_mediator, **kwargs)
if not root_item:
raise ValueError('Root item not set.')
for item in root_item.sub_items:
if item.name == 'DestList':
self.ParseDestList(parser_mediator, item)
elif self._RE_LNK_ITEM_NAME.match(item.name):
display_name = parser_mediator.GetDisplayName()
if display_name:
display_name = '{0:s} # {1:s}'.format(display_name, item.name)
else:
display_name = '# {0:s}'.format(item.name)
parser_mediator.AppendToParserChain(self._WINLNK_PARSER)
try:
item.seek(0, os.SEEK_SET)
self._WINLNK_PARSER.ParseFileLNKFile(
parser_mediator, item, display_name)
finally:
parser_mediator.PopFromParserChain() |
def flatten_txns_by_hash(tx_list, nesting=True):
'''
Flattens a response from querying a list of address (or wallet) transactions
If nesting==True then it will return an ordered dictionary where the keys are tranasaction hashes, otherwise it will be a list of dicts.
(nesting==False is good for django templates)
'''
nested_cleaned_txs = OrderedDict()
for tx in tx_list:
tx_hash = tx.get('tx_hash')
satoshis = tx.get('value', 0) # rare edge case where API returns 0
if tx.get('tx_input_n') >= 0:
satoshis *= -1
if tx_hash in nested_cleaned_txs:
nested_cleaned_txs[tx_hash]['txns_satoshis_list'].append(satoshis)
nested_cleaned_txs[tx_hash]['satoshis_net'] = sum(nested_cleaned_txs[tx_hash]['txns_satoshis_list'])
if tx.get('double_spend') and not nested_cleaned_txs[tx_hash]['double_spend']:
nested_cleaned_txs[tx_hash]['double_spend'] = True
else:
nested_cleaned_txs[tx_hash] = {
'txns_satoshis_list': [satoshis, ],
'satoshis_net': satoshis,
'received_at': tx.get('received'),
'confirmed_at': tx.get('confirmed'),
'confirmations': tx.get('confirmations', 0),
'block_height': tx.get('block_height'),
'double_spend': tx.get('double_spend', False),
}
if nesting:
return nested_cleaned_txs
else:
unnested_cleaned_txs = []
for tx_hash in nested_cleaned_txs:
tx_cleaned = nested_cleaned_txs[tx_hash]
tx_cleaned['tx_hash'] = tx_hash
unnested_cleaned_txs.append(tx_cleaned)
return unnested_cleaned_txs | Flattens a response from querying a list of address (or wallet) transactions
If nesting==True then it will return an ordered dictionary where the keys are tranasaction hashes, otherwise it will be a list of dicts.
(nesting==False is good for django templates) | Below is the the instruction that describes the task:
### Input:
Flattens a response from querying a list of address (or wallet) transactions
If nesting==True then it will return an ordered dictionary where the keys are tranasaction hashes, otherwise it will be a list of dicts.
(nesting==False is good for django templates)
### Response:
def flatten_txns_by_hash(tx_list, nesting=True):
'''
Flattens a response from querying a list of address (or wallet) transactions
If nesting==True then it will return an ordered dictionary where the keys are tranasaction hashes, otherwise it will be a list of dicts.
(nesting==False is good for django templates)
'''
nested_cleaned_txs = OrderedDict()
for tx in tx_list:
tx_hash = tx.get('tx_hash')
satoshis = tx.get('value', 0) # rare edge case where API returns 0
if tx.get('tx_input_n') >= 0:
satoshis *= -1
if tx_hash in nested_cleaned_txs:
nested_cleaned_txs[tx_hash]['txns_satoshis_list'].append(satoshis)
nested_cleaned_txs[tx_hash]['satoshis_net'] = sum(nested_cleaned_txs[tx_hash]['txns_satoshis_list'])
if tx.get('double_spend') and not nested_cleaned_txs[tx_hash]['double_spend']:
nested_cleaned_txs[tx_hash]['double_spend'] = True
else:
nested_cleaned_txs[tx_hash] = {
'txns_satoshis_list': [satoshis, ],
'satoshis_net': satoshis,
'received_at': tx.get('received'),
'confirmed_at': tx.get('confirmed'),
'confirmations': tx.get('confirmations', 0),
'block_height': tx.get('block_height'),
'double_spend': tx.get('double_spend', False),
}
if nesting:
return nested_cleaned_txs
else:
unnested_cleaned_txs = []
for tx_hash in nested_cleaned_txs:
tx_cleaned = nested_cleaned_txs[tx_hash]
tx_cleaned['tx_hash'] = tx_hash
unnested_cleaned_txs.append(tx_cleaned)
return unnested_cleaned_txs |
def draw_selection(self, surf):
"""Draw the selection rectange."""
select_start = self._select_start # Cache to avoid a race condition.
if select_start:
mouse_pos = self.get_mouse_pos()
if (mouse_pos and mouse_pos.surf.surf_type & SurfType.SCREEN and
mouse_pos.surf.surf_type == select_start.surf.surf_type):
rect = point.Rect(select_start.world_pos, mouse_pos.world_pos)
surf.draw_rect(colors.green, rect, 1) | Draw the selection rectange. | Below is the the instruction that describes the task:
### Input:
Draw the selection rectange.
### Response:
def draw_selection(self, surf):
"""Draw the selection rectange."""
select_start = self._select_start # Cache to avoid a race condition.
if select_start:
mouse_pos = self.get_mouse_pos()
if (mouse_pos and mouse_pos.surf.surf_type & SurfType.SCREEN and
mouse_pos.surf.surf_type == select_start.surf.surf_type):
rect = point.Rect(select_start.world_pos, mouse_pos.world_pos)
surf.draw_rect(colors.green, rect, 1) |
def parse_config_file():
"""
Find the .splunk_logger config file in the current directory, or in the
user's home and parse it. The one in the current directory has precedence.
:return: A tuple with:
- project_id
- access_token
"""
for filename in ('.splunk_logger', os.path.expanduser('~/.splunk_logger')):
project_id, access_token, api_domain = _parse_config_file_impl(filename)
if project_id is not None\
and access_token is not None\
and api_domain is not None:
return project_id, access_token, api_domain
else:
return None, None, None | Find the .splunk_logger config file in the current directory, or in the
user's home and parse it. The one in the current directory has precedence.
:return: A tuple with:
- project_id
- access_token | Below is the the instruction that describes the task:
### Input:
Find the .splunk_logger config file in the current directory, or in the
user's home and parse it. The one in the current directory has precedence.
:return: A tuple with:
- project_id
- access_token
### Response:
def parse_config_file():
"""
Find the .splunk_logger config file in the current directory, or in the
user's home and parse it. The one in the current directory has precedence.
:return: A tuple with:
- project_id
- access_token
"""
for filename in ('.splunk_logger', os.path.expanduser('~/.splunk_logger')):
project_id, access_token, api_domain = _parse_config_file_impl(filename)
if project_id is not None\
and access_token is not None\
and api_domain is not None:
return project_id, access_token, api_domain
else:
return None, None, None |
def interpolate(self, tres="<default>", fres="<default>", logf=False,
outseg=None):
"""Interpolate this `QGram` over a regularly-gridded spectrogram
Parameters
----------
tres : `float`, optional
desired time resolution (seconds) of output `Spectrogram`,
default is `abs(outseg) / 1000.`
fres : `float`, `int`, `None`, optional
desired frequency resolution (Hertz) of output `Spectrogram`,
or, if ``logf=True``, the number of frequency samples;
give `None` to skip this step and return the original resolution,
default is 0.5 Hz or 500 frequency samples
logf : `bool`, optional
boolean switch to enable (`True`) or disable (`False`) use of
log-sampled frequencies in the output `Spectrogram`
outseg : `~gwpy.segments.Segment`, optional
GPS `[start, stop)` segment for output `Spectrogram`,
default is the full duration of the input
Returns
-------
out : `~gwpy.spectrogram.Spectrogram`
output `Spectrogram` of normalised Q energy
See Also
--------
scipy.interpolate
this method uses `~scipy.interpolate.InterpolatedUnivariateSpline`
to cast all frequency rows to a common time-axis, and then
`~scipy.interpolate.interp2d` to apply the desired frequency
resolution across the band
Notes
-----
This method will return a `Spectrogram` of dtype ``float32`` if
``norm`` is given, and ``float64`` otherwise.
To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`,
the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled
frequency axis by passing `logf=True` at runtime. The `fres` argument
is then the number of points on the frequency axis. Note, this is
incompatible with `~matplotlib.axes.Axes.imshow`.
It is also highly recommended to use the `outseg` keyword argument
when only a small window around a given GPS time is of interest.
"""
from scipy.interpolate import (interp2d, InterpolatedUnivariateSpline)
from ..spectrogram import Spectrogram
if outseg is None:
outseg = self.energies[0].span
frequencies = self.plane.frequencies
dtype = self.energies[0].dtype
# build regular Spectrogram from peak-Q data by interpolating each
# (Q, frequency) `TimeSeries` to have the same time resolution
if tres == "<default>":
tres = abs(Segment(outseg)) / 1000.
xout = numpy.arange(*outseg, step=tres)
nx = xout.size
ny = frequencies.size
out = Spectrogram(numpy.empty((nx, ny), dtype=dtype),
t0=outseg[0], dt=tres, frequencies=frequencies)
# record Q in output
out.q = self.plane.q
# interpolate rows
for i, row in enumerate(self.energies):
xrow = numpy.arange(row.x0.value, (row.x0 + row.duration).value,
row.dx.value)
interp = InterpolatedUnivariateSpline(xrow, row.value)
out[:, i] = interp(xout).astype(dtype, casting="same_kind",
copy=False)
if fres is None:
return out
# interpolate the spectrogram to increase its frequency resolution
# --- this is done because Duncan doesn't like interpolated images
# since they don't support log scaling
interp = interp2d(xout, frequencies, out.value.T, kind='cubic')
if not logf:
if fres == "<default>":
fres = .5
outfreq = numpy.arange(
self.plane.frange[0], self.plane.frange[1], fres,
dtype=dtype)
else:
if fres == "<default>":
fres = 500
# using `~numpy.logspace` here to support numpy-1.7.1 for EPEL7,
# but numpy-1.12.0 introduced the function `~numpy.geomspace`
logfmin = numpy.log10(self.plane.frange[0])
logfmax = numpy.log10(self.plane.frange[1])
outfreq = numpy.logspace(logfmin, logfmax, num=int(fres))
new = type(out)(
interp(xout, outfreq).T.astype(
dtype, casting="same_kind", copy=False),
t0=outseg[0], dt=tres, frequencies=outfreq,
)
new.q = self.plane.q
return new | Interpolate this `QGram` over a regularly-gridded spectrogram
Parameters
----------
tres : `float`, optional
desired time resolution (seconds) of output `Spectrogram`,
default is `abs(outseg) / 1000.`
fres : `float`, `int`, `None`, optional
desired frequency resolution (Hertz) of output `Spectrogram`,
or, if ``logf=True``, the number of frequency samples;
give `None` to skip this step and return the original resolution,
default is 0.5 Hz or 500 frequency samples
logf : `bool`, optional
boolean switch to enable (`True`) or disable (`False`) use of
log-sampled frequencies in the output `Spectrogram`
outseg : `~gwpy.segments.Segment`, optional
GPS `[start, stop)` segment for output `Spectrogram`,
default is the full duration of the input
Returns
-------
out : `~gwpy.spectrogram.Spectrogram`
output `Spectrogram` of normalised Q energy
See Also
--------
scipy.interpolate
this method uses `~scipy.interpolate.InterpolatedUnivariateSpline`
to cast all frequency rows to a common time-axis, and then
`~scipy.interpolate.interp2d` to apply the desired frequency
resolution across the band
Notes
-----
This method will return a `Spectrogram` of dtype ``float32`` if
``norm`` is given, and ``float64`` otherwise.
To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`,
the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled
frequency axis by passing `logf=True` at runtime. The `fres` argument
is then the number of points on the frequency axis. Note, this is
incompatible with `~matplotlib.axes.Axes.imshow`.
It is also highly recommended to use the `outseg` keyword argument
when only a small window around a given GPS time is of interest. | Below is the the instruction that describes the task:
### Input:
Interpolate this `QGram` over a regularly-gridded spectrogram
Parameters
----------
tres : `float`, optional
desired time resolution (seconds) of output `Spectrogram`,
default is `abs(outseg) / 1000.`
fres : `float`, `int`, `None`, optional
desired frequency resolution (Hertz) of output `Spectrogram`,
or, if ``logf=True``, the number of frequency samples;
give `None` to skip this step and return the original resolution,
default is 0.5 Hz or 500 frequency samples
logf : `bool`, optional
boolean switch to enable (`True`) or disable (`False`) use of
log-sampled frequencies in the output `Spectrogram`
outseg : `~gwpy.segments.Segment`, optional
GPS `[start, stop)` segment for output `Spectrogram`,
default is the full duration of the input
Returns
-------
out : `~gwpy.spectrogram.Spectrogram`
output `Spectrogram` of normalised Q energy
See Also
--------
scipy.interpolate
this method uses `~scipy.interpolate.InterpolatedUnivariateSpline`
to cast all frequency rows to a common time-axis, and then
`~scipy.interpolate.interp2d` to apply the desired frequency
resolution across the band
Notes
-----
This method will return a `Spectrogram` of dtype ``float32`` if
``norm`` is given, and ``float64`` otherwise.
To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`,
the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled
frequency axis by passing `logf=True` at runtime. The `fres` argument
is then the number of points on the frequency axis. Note, this is
incompatible with `~matplotlib.axes.Axes.imshow`.
It is also highly recommended to use the `outseg` keyword argument
when only a small window around a given GPS time is of interest.
### Response:
def interpolate(self, tres="<default>", fres="<default>", logf=False,
outseg=None):
"""Interpolate this `QGram` over a regularly-gridded spectrogram
Parameters
----------
tres : `float`, optional
desired time resolution (seconds) of output `Spectrogram`,
default is `abs(outseg) / 1000.`
fres : `float`, `int`, `None`, optional
desired frequency resolution (Hertz) of output `Spectrogram`,
or, if ``logf=True``, the number of frequency samples;
give `None` to skip this step and return the original resolution,
default is 0.5 Hz or 500 frequency samples
logf : `bool`, optional
boolean switch to enable (`True`) or disable (`False`) use of
log-sampled frequencies in the output `Spectrogram`
outseg : `~gwpy.segments.Segment`, optional
GPS `[start, stop)` segment for output `Spectrogram`,
default is the full duration of the input
Returns
-------
out : `~gwpy.spectrogram.Spectrogram`
output `Spectrogram` of normalised Q energy
See Also
--------
scipy.interpolate
this method uses `~scipy.interpolate.InterpolatedUnivariateSpline`
to cast all frequency rows to a common time-axis, and then
`~scipy.interpolate.interp2d` to apply the desired frequency
resolution across the band
Notes
-----
This method will return a `Spectrogram` of dtype ``float32`` if
``norm`` is given, and ``float64`` otherwise.
To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`,
the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled
frequency axis by passing `logf=True` at runtime. The `fres` argument
is then the number of points on the frequency axis. Note, this is
incompatible with `~matplotlib.axes.Axes.imshow`.
It is also highly recommended to use the `outseg` keyword argument
when only a small window around a given GPS time is of interest.
"""
from scipy.interpolate import (interp2d, InterpolatedUnivariateSpline)
from ..spectrogram import Spectrogram
if outseg is None:
outseg = self.energies[0].span
frequencies = self.plane.frequencies
dtype = self.energies[0].dtype
# build regular Spectrogram from peak-Q data by interpolating each
# (Q, frequency) `TimeSeries` to have the same time resolution
if tres == "<default>":
tres = abs(Segment(outseg)) / 1000.
xout = numpy.arange(*outseg, step=tres)
nx = xout.size
ny = frequencies.size
out = Spectrogram(numpy.empty((nx, ny), dtype=dtype),
t0=outseg[0], dt=tres, frequencies=frequencies)
# record Q in output
out.q = self.plane.q
# interpolate rows
for i, row in enumerate(self.energies):
xrow = numpy.arange(row.x0.value, (row.x0 + row.duration).value,
row.dx.value)
interp = InterpolatedUnivariateSpline(xrow, row.value)
out[:, i] = interp(xout).astype(dtype, casting="same_kind",
copy=False)
if fres is None:
return out
# interpolate the spectrogram to increase its frequency resolution
# --- this is done because Duncan doesn't like interpolated images
# since they don't support log scaling
interp = interp2d(xout, frequencies, out.value.T, kind='cubic')
if not logf:
if fres == "<default>":
fres = .5
outfreq = numpy.arange(
self.plane.frange[0], self.plane.frange[1], fres,
dtype=dtype)
else:
if fres == "<default>":
fres = 500
# using `~numpy.logspace` here to support numpy-1.7.1 for EPEL7,
# but numpy-1.12.0 introduced the function `~numpy.geomspace`
logfmin = numpy.log10(self.plane.frange[0])
logfmax = numpy.log10(self.plane.frange[1])
outfreq = numpy.logspace(logfmin, logfmax, num=int(fres))
new = type(out)(
interp(xout, outfreq).T.astype(
dtype, casting="same_kind", copy=False),
t0=outseg[0], dt=tres, frequencies=outfreq,
)
new.q = self.plane.q
return new |
def handle_error(self, error, req, schema, error_status_code, error_headers):
"""Handles errors during parsing. Aborts the current request with a
400 error.
"""
status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS
raise bottle.HTTPError(
status=status_code,
body=error.messages,
headers=error_headers,
exception=error,
) | Handles errors during parsing. Aborts the current request with a
400 error. | Below is the the instruction that describes the task:
### Input:
Handles errors during parsing. Aborts the current request with a
400 error.
### Response:
def handle_error(self, error, req, schema, error_status_code, error_headers):
"""Handles errors during parsing. Aborts the current request with a
400 error.
"""
status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS
raise bottle.HTTPError(
status=status_code,
body=error.messages,
headers=error_headers,
exception=error,
) |
def path_to_url(path):
# type: (Union[str, Text]) -> str
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
return url | Convert a path to a file: URL. The path will be made absolute and have
quoted path parts. | Below is the the instruction that describes the task:
### Input:
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
### Response:
def path_to_url(path):
# type: (Union[str, Text]) -> str
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
return url |
def encode(self, txt):
'''Encode a text string by replacing characters with alphabet index.
Parameters
----------
txt : str
A string to encode.
Returns
-------
classes : list of int
A sequence of alphabet index values corresponding to the given text.
'''
return list(self._fwd_index.get(c, 0) for c in txt) | Encode a text string by replacing characters with alphabet index.
Parameters
----------
txt : str
A string to encode.
Returns
-------
classes : list of int
A sequence of alphabet index values corresponding to the given text. | Below is the the instruction that describes the task:
### Input:
Encode a text string by replacing characters with alphabet index.
Parameters
----------
txt : str
A string to encode.
Returns
-------
classes : list of int
A sequence of alphabet index values corresponding to the given text.
### Response:
def encode(self, txt):
'''Encode a text string by replacing characters with alphabet index.
Parameters
----------
txt : str
A string to encode.
Returns
-------
classes : list of int
A sequence of alphabet index values corresponding to the given text.
'''
return list(self._fwd_index.get(c, 0) for c in txt) |
def toFloat(value):
""" Converts string or signed list to float. """
if isinstance(value, str):
return strFloat(value)
elif isinstance(value, list):
return slistFloat(value)
else:
return value | Converts string or signed list to float. | Below is the the instruction that describes the task:
### Input:
Converts string or signed list to float.
### Response:
def toFloat(value):
""" Converts string or signed list to float. """
if isinstance(value, str):
return strFloat(value)
elif isinstance(value, list):
return slistFloat(value)
else:
return value |
def connect(self, addr):
"""
Call the :meth:`connect` method of the underlying socket and set up SSL
on the socket, using the :class:`Context` object supplied to this
:class:`Connection` object at creation.
:param addr: A remote address
:return: What the socket's connect method returns
"""
_lib.SSL_set_connect_state(self._ssl)
return self._socket.connect(addr) | Call the :meth:`connect` method of the underlying socket and set up SSL
on the socket, using the :class:`Context` object supplied to this
:class:`Connection` object at creation.
:param addr: A remote address
:return: What the socket's connect method returns | Below is the the instruction that describes the task:
### Input:
Call the :meth:`connect` method of the underlying socket and set up SSL
on the socket, using the :class:`Context` object supplied to this
:class:`Connection` object at creation.
:param addr: A remote address
:return: What the socket's connect method returns
### Response:
def connect(self, addr):
"""
Call the :meth:`connect` method of the underlying socket and set up SSL
on the socket, using the :class:`Context` object supplied to this
:class:`Connection` object at creation.
:param addr: A remote address
:return: What the socket's connect method returns
"""
_lib.SSL_set_connect_state(self._ssl)
return self._socket.connect(addr) |
def _get_min_val(spaceshift, *params):
r"""Calculate minimum resolved amplitude or maximum r."""
# Get parameters from tuples
spacing, shift = spaceshift
n, fI, fC, r, r_def, error, reim, cvar, verb, plot, log = params
# Get filter for these parameters
dlf = _calculate_filter(n, spacing, shift, fI, r_def, reim, 'filt')
# Calculate rhs-response with this filter
k = dlf.base/r[:, None]
# Loop over transforms
for i, f in enumerate(fC):
# Calculate lhs and rhs; rhs depends on ftype
lhs = f.lhs(k)
if f.name == 'j2':
rhs0 = np.dot(lhs[0], getattr(dlf, 'j0'))/r
rhs1 = np.dot(lhs[1], getattr(dlf, 'j1'))/r**2
rhs = rhs0 + rhs1
else:
rhs = np.dot(lhs, getattr(dlf, f.name))/r
# Get relative error
rel_error = np.abs((rhs - f.rhs)/f.rhs)
# Get indices where relative error is bigger than error
imin0 = np.where(rel_error > error)[0]
# Find first occurrence of failure
if np.all(rhs == 0) or np.all(np.isnan(rhs)):
# if all rhs are zeros or nans, the filter is useless
imin0 = 0
elif imin0.size == 0:
# if imin0.size == 0: # empty array, all rel_error < error.
imin0 = rhs.size-1 # set to last r
if verb > 0 and log['warn-r'] == 0:
print('* WARNING :: all data have error < ' + str(error) +
'; choose larger r or set error-level higher.')
log['warn-r'] = 1 # Only do this once
else:
# Kind of a dirty hack: Permit to jump up to four bad values,
# resulting for instance from high rel_error from zero crossings
# of the transform pair. Should be made an input argument or
# generally improved.
if imin0.size > 4:
imin0 = np.max([0, imin0[4]-5])
else: # just take the first one (no jumping allowed; normal case)
imin0 = np.max([0, imin0[0]-1])
# Note that both version yield the same result if the failure is
# consistent.
# Depending on cvar, store minimum amplitude or 1/maxr
if cvar == 'amp':
min_val0 = np.abs(rhs[imin0])
else:
min_val0 = 1/r[imin0]
# Check if this inversion is better than previous ones
if i == 0: # First run, store these values
imin = dc(imin0)
min_val = dc(min_val0)
else: # Replace imin, min_val if this one is better
if min_val0 > min_val:
min_val = dc(min_val0)
imin = dc(imin0)
# QC plot
if plot > 2:
_plot_inversion(f, rhs, r, k, imin0, spacing, shift, cvar)
# If verbose, print progress
if verb > 1:
log = _print_count(log)
# If there is no point with rel_error < error (imin=0) it returns np.inf.
return np.where(imin == 0, np.inf, min_val) | r"""Calculate minimum resolved amplitude or maximum r. | Below is the the instruction that describes the task:
### Input:
r"""Calculate minimum resolved amplitude or maximum r.
### Response:
def _get_min_val(spaceshift, *params):
r"""Calculate minimum resolved amplitude or maximum r."""
# Get parameters from tuples
spacing, shift = spaceshift
n, fI, fC, r, r_def, error, reim, cvar, verb, plot, log = params
# Get filter for these parameters
dlf = _calculate_filter(n, spacing, shift, fI, r_def, reim, 'filt')
# Calculate rhs-response with this filter
k = dlf.base/r[:, None]
# Loop over transforms
for i, f in enumerate(fC):
# Calculate lhs and rhs; rhs depends on ftype
lhs = f.lhs(k)
if f.name == 'j2':
rhs0 = np.dot(lhs[0], getattr(dlf, 'j0'))/r
rhs1 = np.dot(lhs[1], getattr(dlf, 'j1'))/r**2
rhs = rhs0 + rhs1
else:
rhs = np.dot(lhs, getattr(dlf, f.name))/r
# Get relative error
rel_error = np.abs((rhs - f.rhs)/f.rhs)
# Get indices where relative error is bigger than error
imin0 = np.where(rel_error > error)[0]
# Find first occurrence of failure
if np.all(rhs == 0) or np.all(np.isnan(rhs)):
# if all rhs are zeros or nans, the filter is useless
imin0 = 0
elif imin0.size == 0:
# if imin0.size == 0: # empty array, all rel_error < error.
imin0 = rhs.size-1 # set to last r
if verb > 0 and log['warn-r'] == 0:
print('* WARNING :: all data have error < ' + str(error) +
'; choose larger r or set error-level higher.')
log['warn-r'] = 1 # Only do this once
else:
# Kind of a dirty hack: Permit to jump up to four bad values,
# resulting for instance from high rel_error from zero crossings
# of the transform pair. Should be made an input argument or
# generally improved.
if imin0.size > 4:
imin0 = np.max([0, imin0[4]-5])
else: # just take the first one (no jumping allowed; normal case)
imin0 = np.max([0, imin0[0]-1])
# Note that both version yield the same result if the failure is
# consistent.
# Depending on cvar, store minimum amplitude or 1/maxr
if cvar == 'amp':
min_val0 = np.abs(rhs[imin0])
else:
min_val0 = 1/r[imin0]
# Check if this inversion is better than previous ones
if i == 0: # First run, store these values
imin = dc(imin0)
min_val = dc(min_val0)
else: # Replace imin, min_val if this one is better
if min_val0 > min_val:
min_val = dc(min_val0)
imin = dc(imin0)
# QC plot
if plot > 2:
_plot_inversion(f, rhs, r, k, imin0, spacing, shift, cvar)
# If verbose, print progress
if verb > 1:
log = _print_count(log)
# If there is no point with rel_error < error (imin=0) it returns np.inf.
return np.where(imin == 0, np.inf, min_val) |
def mcf(n,c):
"""mcf: multi-commodity flow formulation for the (asymmetric) traveling salesman problem
Parameters:
- n: number of nodes
- c[i,j]: cost for traversing arc (i,j)
Returns a model, ready to be solved.
"""
model = Model("mcf")
x,f = {},{}
for i in range(1,n+1):
for j in range(1,n+1):
if i != j:
x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j))
if i != j and j != 1:
for k in range(2,n+1):
if i != k:
f[i,j,k] = model.addVar(ub=1, vtype="C", name="f(%s,%s,%s)"%(i,j,k))
for i in range(1,n+1):
model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i)
model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i)
for k in range(2,n+1):
model.addCons(quicksum(f[1,i,k] for i in range(2,n+1) if (1,i,k) in f) == 1, "FlowOut(%s)"%k)
model.addCons(quicksum(f[i,k,k] for i in range(1,n+1) if (i,k,k) in f) == 1, "FlowIn(%s)"%k)
for i in range(2,n+1):
if i != k:
model.addCons(quicksum(f[j,i,k] for j in range(1,n+1) if (j,i,k) in f) == \
quicksum(f[i,j,k] for j in range(1,n+1) if (i,j,k) in f),
"FlowCons(%s,%s)"%(i,k))
for (i,j,k) in f:
model.addCons(f[i,j,k] <= x[i,j], "FlowUB(%s,%s,%s)"%(i,j,k))
model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize")
model.data = x,f
return model | mcf: multi-commodity flow formulation for the (asymmetric) traveling salesman problem
Parameters:
- n: number of nodes
- c[i,j]: cost for traversing arc (i,j)
Returns a model, ready to be solved. | Below is the the instruction that describes the task:
### Input:
mcf: multi-commodity flow formulation for the (asymmetric) traveling salesman problem
Parameters:
- n: number of nodes
- c[i,j]: cost for traversing arc (i,j)
Returns a model, ready to be solved.
### Response:
def mcf(n,c):
"""mcf: multi-commodity flow formulation for the (asymmetric) traveling salesman problem
Parameters:
- n: number of nodes
- c[i,j]: cost for traversing arc (i,j)
Returns a model, ready to be solved.
"""
model = Model("mcf")
x,f = {},{}
for i in range(1,n+1):
for j in range(1,n+1):
if i != j:
x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j))
if i != j and j != 1:
for k in range(2,n+1):
if i != k:
f[i,j,k] = model.addVar(ub=1, vtype="C", name="f(%s,%s,%s)"%(i,j,k))
for i in range(1,n+1):
model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i)
model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i)
for k in range(2,n+1):
model.addCons(quicksum(f[1,i,k] for i in range(2,n+1) if (1,i,k) in f) == 1, "FlowOut(%s)"%k)
model.addCons(quicksum(f[i,k,k] for i in range(1,n+1) if (i,k,k) in f) == 1, "FlowIn(%s)"%k)
for i in range(2,n+1):
if i != k:
model.addCons(quicksum(f[j,i,k] for j in range(1,n+1) if (j,i,k) in f) == \
quicksum(f[i,j,k] for j in range(1,n+1) if (i,j,k) in f),
"FlowCons(%s,%s)"%(i,k))
for (i,j,k) in f:
model.addCons(f[i,j,k] <= x[i,j], "FlowUB(%s,%s,%s)"%(i,j,k))
model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize")
model.data = x,f
return model |
def extract_cycles(series, left=False, right=False):
"""Iterate cycles in the series.
Parameters
----------
series : iterable sequence of numbers
left: bool, optional
If True, treat the first point in the series as a reversal.
right: bool, optional
If True, treat the last point in the series as a reversal.
Yields
------
cycle : tuple
Each tuple contains three floats (low, high, mult), where low and high
define cycle amplitude and mult equals to 1.0 for full cycles and 0.5
for half cycles.
"""
points = deque()
for x in reversals(series, left=left, right=right):
points.append(x)
while len(points) >= 3:
# Form ranges X and Y from the three most recent points
X = abs(points[-2] - points[-1])
Y = abs(points[-3] - points[-2])
if X < Y:
# Read the next point
break
elif len(points) == 3:
# Y contains the starting point
# Count Y as one-half cycle and discard the first point
yield points[0], points[1], 0.5
points.popleft()
else:
# Count Y as one cycle and discard the peak and the valley of Y
yield points[-3], points[-2], 1.0
last = points.pop()
points.pop()
points.pop()
points.append(last)
else:
# Count the remaining ranges as one-half cycles
while len(points) > 1:
yield points[0], points[1], 0.5
points.popleft() | Iterate cycles in the series.
Parameters
----------
series : iterable sequence of numbers
left: bool, optional
If True, treat the first point in the series as a reversal.
right: bool, optional
If True, treat the last point in the series as a reversal.
Yields
------
cycle : tuple
Each tuple contains three floats (low, high, mult), where low and high
define cycle amplitude and mult equals to 1.0 for full cycles and 0.5
for half cycles. | Below is the the instruction that describes the task:
### Input:
Iterate cycles in the series.
Parameters
----------
series : iterable sequence of numbers
left: bool, optional
If True, treat the first point in the series as a reversal.
right: bool, optional
If True, treat the last point in the series as a reversal.
Yields
------
cycle : tuple
Each tuple contains three floats (low, high, mult), where low and high
define cycle amplitude and mult equals to 1.0 for full cycles and 0.5
for half cycles.
### Response:
def extract_cycles(series, left=False, right=False):
"""Iterate cycles in the series.
Parameters
----------
series : iterable sequence of numbers
left: bool, optional
If True, treat the first point in the series as a reversal.
right: bool, optional
If True, treat the last point in the series as a reversal.
Yields
------
cycle : tuple
Each tuple contains three floats (low, high, mult), where low and high
define cycle amplitude and mult equals to 1.0 for full cycles and 0.5
for half cycles.
"""
points = deque()
for x in reversals(series, left=left, right=right):
points.append(x)
while len(points) >= 3:
# Form ranges X and Y from the three most recent points
X = abs(points[-2] - points[-1])
Y = abs(points[-3] - points[-2])
if X < Y:
# Read the next point
break
elif len(points) == 3:
# Y contains the starting point
# Count Y as one-half cycle and discard the first point
yield points[0], points[1], 0.5
points.popleft()
else:
# Count Y as one cycle and discard the peak and the valley of Y
yield points[-3], points[-2], 1.0
last = points.pop()
points.pop()
points.pop()
points.append(last)
else:
# Count the remaining ranges as one-half cycles
while len(points) > 1:
yield points[0], points[1], 0.5
points.popleft() |
def load(self, definitions):
""" Load the object by opening the URL """
url = self.location
log.debug('importing (%s)', url)
if '://' not in url:
url = urljoin(definitions.url, url)
options = definitions.options
d = Definitions(url, options)
if d.root.match(Definitions.Tag, wsdlns):
self.import_definitions(definitions, d)
return
if d.root.match(Schema.Tag, Namespace.xsdns):
self.import_schema(definitions, d)
return
raise Exception('document at "%s" is unknown' % url) | Load the object by opening the URL | Below is the the instruction that describes the task:
### Input:
Load the object by opening the URL
### Response:
def load(self, definitions):
""" Load the object by opening the URL """
url = self.location
log.debug('importing (%s)', url)
if '://' not in url:
url = urljoin(definitions.url, url)
options = definitions.options
d = Definitions(url, options)
if d.root.match(Definitions.Tag, wsdlns):
self.import_definitions(definitions, d)
return
if d.root.match(Schema.Tag, Namespace.xsdns):
self.import_schema(definitions, d)
return
raise Exception('document at "%s" is unknown' % url) |
def optimize_thumbnail(thumbnail):
'''Optimize thumbnail images by removing unnecessary data'''
try:
optimize_command = settings.THUMBNAIL_OPTIMIZE_COMMAND[
determinetype(thumbnail.path)]
if not optimize_command:
return
except (TypeError, KeyError, NotImplementedError):
return
storage = thumbnail.storage
try:
with NamedTemporaryFile() as temp_file:
thumbnail.seek(0)
temp_file.write(thumbnail.read())
temp_file.flush()
optimize_command = optimize_command.format(filename=temp_file.name)
output = check_output(
optimize_command, stderr=subprocess.STDOUT, shell=True)
if output:
logger.warning(
'{0} returned {1}'.format(optimize_command, output))
else:
logger.info('{0} returned nothing'.format(optimize_command))
with open(temp_file.name, 'rb') as f:
thumbnail.file = ContentFile(f.read())
storage.delete(thumbnail.path)
storage.save(thumbnail.path, thumbnail)
except Exception as e:
logger.error(e) | Optimize thumbnail images by removing unnecessary data | Below is the the instruction that describes the task:
### Input:
Optimize thumbnail images by removing unnecessary data
### Response:
def optimize_thumbnail(thumbnail):
'''Optimize thumbnail images by removing unnecessary data'''
try:
optimize_command = settings.THUMBNAIL_OPTIMIZE_COMMAND[
determinetype(thumbnail.path)]
if not optimize_command:
return
except (TypeError, KeyError, NotImplementedError):
return
storage = thumbnail.storage
try:
with NamedTemporaryFile() as temp_file:
thumbnail.seek(0)
temp_file.write(thumbnail.read())
temp_file.flush()
optimize_command = optimize_command.format(filename=temp_file.name)
output = check_output(
optimize_command, stderr=subprocess.STDOUT, shell=True)
if output:
logger.warning(
'{0} returned {1}'.format(optimize_command, output))
else:
logger.info('{0} returned nothing'.format(optimize_command))
with open(temp_file.name, 'rb') as f:
thumbnail.file = ContentFile(f.read())
storage.delete(thumbnail.path)
storage.save(thumbnail.path, thumbnail)
except Exception as e:
logger.error(e) |
def getschemacls(resource, besteffort=False):
"""Get schema class related to input resource.
:param resource: resource from which get schema class.
:param bool besteffort: if True (default) try a best effort in parsing
the inheritance tree of resource if resource is a class.
:rtype: type
"""
return _SCHEMAFACTORY.getschemacls(
resource=resource, besteffort=besteffort
) | Get schema class related to input resource.
:param resource: resource from which get schema class.
:param bool besteffort: if True (default) try a best effort in parsing
the inheritance tree of resource if resource is a class.
:rtype: type | Below is the the instruction that describes the task:
### Input:
Get schema class related to input resource.
:param resource: resource from which get schema class.
:param bool besteffort: if True (default) try a best effort in parsing
the inheritance tree of resource if resource is a class.
:rtype: type
### Response:
def getschemacls(resource, besteffort=False):
"""Get schema class related to input resource.
:param resource: resource from which get schema class.
:param bool besteffort: if True (default) try a best effort in parsing
the inheritance tree of resource if resource is a class.
:rtype: type
"""
return _SCHEMAFACTORY.getschemacls(
resource=resource, besteffort=besteffort
) |
def preferred_change(data):
''' Determines preferred existing id based on curie prefix in the ranking list '''
ranking = [
'CHEBI',
'NCBITaxon',
'COGPO',
'CAO',
'DICOM',
'UBERON',
'NLX',
'NLXANAT',
'NLXCELL',
'NLXFUNC',
'NLXINV',
'NLXORG',
'NLXRES',
'NLXSUB'
'BIRNLEX',
'SAO',
'NDA.CDE',
'PR',
'IAO',
'NIFEXT',
'OEN',
'ILX',
]
mock_rank = ranking[::-1]
score = []
old_pref_index = None
for i, d in enumerate(data['existing_ids']):
if not d.get('preferred'): # db allows None or '' which will cause a problem
d['preferred'] = 0
if int(d['preferred']) == 1:
old_pref_index = i
if d.get('curie'):
pref = d['curie'].split(':')[0]
if pref in mock_rank:
score.append(mock_rank.index(pref))
else:
score.append(-1)
else:
score.append(-1)
new_pref_index = score.index(max(score))
new_pref_iri = data['existing_ids'][new_pref_index]['iri']
if new_pref_iri.rsplit('/', 1)[0] == 'http://uri.interlex.org/base':
if old_pref_index:
if old_pref_index != new_pref_index:
return data
for e in data['existing_ids']:
e['preferred'] = 0
data['existing_ids'][new_pref_index]['preferred'] = 1
return data | Determines preferred existing id based on curie prefix in the ranking list | Below is the the instruction that describes the task:
### Input:
Determines preferred existing id based on curie prefix in the ranking list
### Response:
def preferred_change(data):
''' Determines preferred existing id based on curie prefix in the ranking list '''
ranking = [
'CHEBI',
'NCBITaxon',
'COGPO',
'CAO',
'DICOM',
'UBERON',
'NLX',
'NLXANAT',
'NLXCELL',
'NLXFUNC',
'NLXINV',
'NLXORG',
'NLXRES',
'NLXSUB'
'BIRNLEX',
'SAO',
'NDA.CDE',
'PR',
'IAO',
'NIFEXT',
'OEN',
'ILX',
]
mock_rank = ranking[::-1]
score = []
old_pref_index = None
for i, d in enumerate(data['existing_ids']):
if not d.get('preferred'): # db allows None or '' which will cause a problem
d['preferred'] = 0
if int(d['preferred']) == 1:
old_pref_index = i
if d.get('curie'):
pref = d['curie'].split(':')[0]
if pref in mock_rank:
score.append(mock_rank.index(pref))
else:
score.append(-1)
else:
score.append(-1)
new_pref_index = score.index(max(score))
new_pref_iri = data['existing_ids'][new_pref_index]['iri']
if new_pref_iri.rsplit('/', 1)[0] == 'http://uri.interlex.org/base':
if old_pref_index:
if old_pref_index != new_pref_index:
return data
for e in data['existing_ids']:
e['preferred'] = 0
data['existing_ids'][new_pref_index]['preferred'] = 1
return data |
def update(self, name, rssi):
"""Update the device name and/or RSSI.
During an ongoing scan, multiple records from the same device can be
received during the scan. Each time that happens this method is
called to update the :attr:`name` and/or :attr:`rssi` attributes.
"""
self.name = name
self.rssi = rssi
self._age = time.time() | Update the device name and/or RSSI.
During an ongoing scan, multiple records from the same device can be
received during the scan. Each time that happens this method is
called to update the :attr:`name` and/or :attr:`rssi` attributes. | Below is the the instruction that describes the task:
### Input:
Update the device name and/or RSSI.
During an ongoing scan, multiple records from the same device can be
received during the scan. Each time that happens this method is
called to update the :attr:`name` and/or :attr:`rssi` attributes.
### Response:
def update(self, name, rssi):
"""Update the device name and/or RSSI.
During an ongoing scan, multiple records from the same device can be
received during the scan. Each time that happens this method is
called to update the :attr:`name` and/or :attr:`rssi` attributes.
"""
self.name = name
self.rssi = rssi
self._age = time.time() |
def arguments_from_optionable(parser, component, prefix=""):
""" Add argparse arguments from all options of one :class:`Optionable`
>>> # Let's build a dummy optionable component:
>>> comp = Optionable()
>>> comp.add_option("num", Numeric(default=1, max=12, help="An exemple of option"))
>>> comp.add_option("title", Text(help="The title of the title"))
>>> comp.add_option("ok", Boolean(help="is it ok ?", default=True))
>>> comp.add_option("cool", Boolean(help="is it cool ?", default=False))
>>>
>>> # one can then register all the options of this component to a arg parser
>>> parser = argparse.ArgumentParser(prog="PROG")
>>> arguments_from_optionable(parser, comp)
>>> parser.print_help()
usage: PROG [-h] [--num NUM] [--title TITLE] [--not-ok] [--cool]
<BLANKLINE>
optional arguments:
-h, --help show this help message and exit
--num NUM An exemple of option
--title TITLE The title of the title
--not-ok is it ok ?
--cool is it cool ?
The option values for a componant can then be retrieved with :func:`get_config_for`
.. doctest::
:hide:
>>> import argparse
>>> args = argparse.Namespace()
>>> args.num = 1
>>> args.title = "My title"
>>> args.ok = True
>>> args.cool = False
>>> args = parser.parse_args() # doctest: +SKIP
>>> config = get_config_for(args, comp)
>>> comp("input", **config) # doctest: +SKIP
"comp_result"
"""
for option in component.options:
if component.options[option].hidden:
continue
argument_from_option(parser, component, option, prefix=prefix) | Add argparse arguments from all options of one :class:`Optionable`
>>> # Let's build a dummy optionable component:
>>> comp = Optionable()
>>> comp.add_option("num", Numeric(default=1, max=12, help="An exemple of option"))
>>> comp.add_option("title", Text(help="The title of the title"))
>>> comp.add_option("ok", Boolean(help="is it ok ?", default=True))
>>> comp.add_option("cool", Boolean(help="is it cool ?", default=False))
>>>
>>> # one can then register all the options of this component to a arg parser
>>> parser = argparse.ArgumentParser(prog="PROG")
>>> arguments_from_optionable(parser, comp)
>>> parser.print_help()
usage: PROG [-h] [--num NUM] [--title TITLE] [--not-ok] [--cool]
<BLANKLINE>
optional arguments:
-h, --help show this help message and exit
--num NUM An exemple of option
--title TITLE The title of the title
--not-ok is it ok ?
--cool is it cool ?
The option values for a componant can then be retrieved with :func:`get_config_for`
.. doctest::
:hide:
>>> import argparse
>>> args = argparse.Namespace()
>>> args.num = 1
>>> args.title = "My title"
>>> args.ok = True
>>> args.cool = False
>>> args = parser.parse_args() # doctest: +SKIP
>>> config = get_config_for(args, comp)
>>> comp("input", **config) # doctest: +SKIP
"comp_result" | Below is the the instruction that describes the task:
### Input:
Add argparse arguments from all options of one :class:`Optionable`
>>> # Let's build a dummy optionable component:
>>> comp = Optionable()
>>> comp.add_option("num", Numeric(default=1, max=12, help="An exemple of option"))
>>> comp.add_option("title", Text(help="The title of the title"))
>>> comp.add_option("ok", Boolean(help="is it ok ?", default=True))
>>> comp.add_option("cool", Boolean(help="is it cool ?", default=False))
>>>
>>> # one can then register all the options of this component to a arg parser
>>> parser = argparse.ArgumentParser(prog="PROG")
>>> arguments_from_optionable(parser, comp)
>>> parser.print_help()
usage: PROG [-h] [--num NUM] [--title TITLE] [--not-ok] [--cool]
<BLANKLINE>
optional arguments:
-h, --help show this help message and exit
--num NUM An exemple of option
--title TITLE The title of the title
--not-ok is it ok ?
--cool is it cool ?
The option values for a componant can then be retrieved with :func:`get_config_for`
.. doctest::
:hide:
>>> import argparse
>>> args = argparse.Namespace()
>>> args.num = 1
>>> args.title = "My title"
>>> args.ok = True
>>> args.cool = False
>>> args = parser.parse_args() # doctest: +SKIP
>>> config = get_config_for(args, comp)
>>> comp("input", **config) # doctest: +SKIP
"comp_result"
### Response:
def arguments_from_optionable(parser, component, prefix=""):
""" Add argparse arguments from all options of one :class:`Optionable`
>>> # Let's build a dummy optionable component:
>>> comp = Optionable()
>>> comp.add_option("num", Numeric(default=1, max=12, help="An exemple of option"))
>>> comp.add_option("title", Text(help="The title of the title"))
>>> comp.add_option("ok", Boolean(help="is it ok ?", default=True))
>>> comp.add_option("cool", Boolean(help="is it cool ?", default=False))
>>>
>>> # one can then register all the options of this component to a arg parser
>>> parser = argparse.ArgumentParser(prog="PROG")
>>> arguments_from_optionable(parser, comp)
>>> parser.print_help()
usage: PROG [-h] [--num NUM] [--title TITLE] [--not-ok] [--cool]
<BLANKLINE>
optional arguments:
-h, --help show this help message and exit
--num NUM An exemple of option
--title TITLE The title of the title
--not-ok is it ok ?
--cool is it cool ?
The option values for a componant can then be retrieved with :func:`get_config_for`
.. doctest::
:hide:
>>> import argparse
>>> args = argparse.Namespace()
>>> args.num = 1
>>> args.title = "My title"
>>> args.ok = True
>>> args.cool = False
>>> args = parser.parse_args() # doctest: +SKIP
>>> config = get_config_for(args, comp)
>>> comp("input", **config) # doctest: +SKIP
"comp_result"
"""
for option in component.options:
if component.options[option].hidden:
continue
argument_from_option(parser, component, option, prefix=prefix) |
def get_imported_data(csv_file, **kwargs):
"""Reads the content of the Polarion exported csv file and returns imported data."""
open_args = []
open_kwargs = {}
try:
# pylint: disable=pointless-statement
unicode
open_args.append("rb")
except NameError:
open_kwargs["encoding"] = "utf-8"
with open(os.path.expanduser(csv_file), *open_args, **open_kwargs) as input_file:
reader = _get_csv_reader(input_file)
fieldnames = _get_csv_fieldnames(reader)
if not fieldnames:
raise Dump2PolarionException(
"Cannot find field names in CSV file '{}'".format(csv_file)
)
results = _get_results(reader, fieldnames)
if not results:
raise Dump2PolarionException("No results read from CSV file '{}'".format(csv_file))
testrun = _get_testrun_from_csv(input_file, reader)
return xunit_exporter.ImportedData(results=results, testrun=testrun) | Reads the content of the Polarion exported csv file and returns imported data. | Below is the the instruction that describes the task:
### Input:
Reads the content of the Polarion exported csv file and returns imported data.
### Response:
def get_imported_data(csv_file, **kwargs):
"""Reads the content of the Polarion exported csv file and returns imported data."""
open_args = []
open_kwargs = {}
try:
# pylint: disable=pointless-statement
unicode
open_args.append("rb")
except NameError:
open_kwargs["encoding"] = "utf-8"
with open(os.path.expanduser(csv_file), *open_args, **open_kwargs) as input_file:
reader = _get_csv_reader(input_file)
fieldnames = _get_csv_fieldnames(reader)
if not fieldnames:
raise Dump2PolarionException(
"Cannot find field names in CSV file '{}'".format(csv_file)
)
results = _get_results(reader, fieldnames)
if not results:
raise Dump2PolarionException("No results read from CSV file '{}'".format(csv_file))
testrun = _get_testrun_from_csv(input_file, reader)
return xunit_exporter.ImportedData(results=results, testrun=testrun) |
def get_agent_ids_by_resource(self, resource_id):
"""Gets the list of ``Agent`` ``Ids`` mapped to a ``Resource``.
arg: resource_id (osid.id.Id): ``Id`` of a ``Resource``
return: (osid.id.IdList) - list of agent ``Ids``
raise: NotFound - ``resource_id`` is not found
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('resource',
collection='Resource',
runtime=self._runtime)
resource = collection.find_one(
dict({'_id': ObjectId(resource_id.get_identifier())},
**self._view_filter()))
if 'agentIds' not in resource:
result = IdList([])
else:
result = IdList(resource['agentIds'])
return result | Gets the list of ``Agent`` ``Ids`` mapped to a ``Resource``.
arg: resource_id (osid.id.Id): ``Id`` of a ``Resource``
return: (osid.id.IdList) - list of agent ``Ids``
raise: NotFound - ``resource_id`` is not found
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the list of ``Agent`` ``Ids`` mapped to a ``Resource``.
arg: resource_id (osid.id.Id): ``Id`` of a ``Resource``
return: (osid.id.IdList) - list of agent ``Ids``
raise: NotFound - ``resource_id`` is not found
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_agent_ids_by_resource(self, resource_id):
"""Gets the list of ``Agent`` ``Ids`` mapped to a ``Resource``.
arg: resource_id (osid.id.Id): ``Id`` of a ``Resource``
return: (osid.id.IdList) - list of agent ``Ids``
raise: NotFound - ``resource_id`` is not found
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('resource',
collection='Resource',
runtime=self._runtime)
resource = collection.find_one(
dict({'_id': ObjectId(resource_id.get_identifier())},
**self._view_filter()))
if 'agentIds' not in resource:
result = IdList([])
else:
result = IdList(resource['agentIds'])
return result |
def directive(apply_globally=False, api=None):
"""A decorator that registers a single hug directive"""
def decorator(directive_method):
if apply_globally:
hug.defaults.directives[underscore(directive_method.__name__)] = directive_method
else:
apply_to_api = hug.API(api) if api else hug.api.from_object(directive_method)
apply_to_api.add_directive(directive_method)
directive_method.directive = True
return directive_method
return decorator | A decorator that registers a single hug directive | Below is the the instruction that describes the task:
### Input:
A decorator that registers a single hug directive
### Response:
def directive(apply_globally=False, api=None):
"""A decorator that registers a single hug directive"""
def decorator(directive_method):
if apply_globally:
hug.defaults.directives[underscore(directive_method.__name__)] = directive_method
else:
apply_to_api = hug.API(api) if api else hug.api.from_object(directive_method)
apply_to_api.add_directive(directive_method)
directive_method.directive = True
return directive_method
return decorator |
def post_process_get(self, result):
"""
Given an object with identifiers, fetches the data for that object
from the service.
This alters the data on the object itself & simply passes through what
was received.
:param result: The response data
:type result: dict
:returns: The unmodified response data
"""
if not hasattr(result, 'items'):
# If it's not a dict, give up & just return whatever you get.
return result
# We need to possibly drill into the response & get out the data here.
# Check for a result key.
result_key = self._details.result_key_for('get')
if not result_key:
# There's no result_key. Just use the top-level data.
data = result
else:
data = result[result_key]
for key, value in data.items():
self._data[to_snake_case(key)] = value
return result | Given an object with identifiers, fetches the data for that object
from the service.
This alters the data on the object itself & simply passes through what
was received.
:param result: The response data
:type result: dict
:returns: The unmodified response data | Below is the the instruction that describes the task:
### Input:
Given an object with identifiers, fetches the data for that object
from the service.
This alters the data on the object itself & simply passes through what
was received.
:param result: The response data
:type result: dict
:returns: The unmodified response data
### Response:
def post_process_get(self, result):
"""
Given an object with identifiers, fetches the data for that object
from the service.
This alters the data on the object itself & simply passes through what
was received.
:param result: The response data
:type result: dict
:returns: The unmodified response data
"""
if not hasattr(result, 'items'):
# If it's not a dict, give up & just return whatever you get.
return result
# We need to possibly drill into the response & get out the data here.
# Check for a result key.
result_key = self._details.result_key_for('get')
if not result_key:
# There's no result_key. Just use the top-level data.
data = result
else:
data = result[result_key]
for key, value in data.items():
self._data[to_snake_case(key)] = value
return result |
def get_user_profile_photos(self, *args, **kwargs):
"""See :func:`get_user_profile_photos`"""
return get_user_profile_photos(*args, **self._merge_overrides(**kwargs)).run() | See :func:`get_user_profile_photos` | Below is the the instruction that describes the task:
### Input:
See :func:`get_user_profile_photos`
### Response:
def get_user_profile_photos(self, *args, **kwargs):
"""See :func:`get_user_profile_photos`"""
return get_user_profile_photos(*args, **self._merge_overrides(**kwargs)).run() |
def get_block_type(values, dtype=None):
"""
Find the appropriate Block subclass to use for the given values and dtype.
Parameters
----------
values : ndarray-like
dtype : numpy or pandas dtype
Returns
-------
cls : class, subclass of Block
"""
dtype = dtype or values.dtype
vtype = dtype.type
if is_sparse(dtype):
# Need this first(ish) so that Sparse[datetime] is sparse
cls = ExtensionBlock
elif is_categorical(values):
cls = CategoricalBlock
elif issubclass(vtype, np.datetime64):
assert not is_datetime64tz_dtype(values)
cls = DatetimeBlock
elif is_datetime64tz_dtype(values):
cls = DatetimeTZBlock
elif is_interval_dtype(dtype) or is_period_dtype(dtype):
cls = ObjectValuesExtensionBlock
elif is_extension_array_dtype(values):
cls = ExtensionBlock
elif issubclass(vtype, np.floating):
cls = FloatBlock
elif issubclass(vtype, np.timedelta64):
assert issubclass(vtype, np.integer)
cls = TimeDeltaBlock
elif issubclass(vtype, np.complexfloating):
cls = ComplexBlock
elif issubclass(vtype, np.integer):
cls = IntBlock
elif dtype == np.bool_:
cls = BoolBlock
else:
cls = ObjectBlock
return cls | Find the appropriate Block subclass to use for the given values and dtype.
Parameters
----------
values : ndarray-like
dtype : numpy or pandas dtype
Returns
-------
cls : class, subclass of Block | Below is the the instruction that describes the task:
### Input:
Find the appropriate Block subclass to use for the given values and dtype.
Parameters
----------
values : ndarray-like
dtype : numpy or pandas dtype
Returns
-------
cls : class, subclass of Block
### Response:
def get_block_type(values, dtype=None):
"""
Find the appropriate Block subclass to use for the given values and dtype.
Parameters
----------
values : ndarray-like
dtype : numpy or pandas dtype
Returns
-------
cls : class, subclass of Block
"""
dtype = dtype or values.dtype
vtype = dtype.type
if is_sparse(dtype):
# Need this first(ish) so that Sparse[datetime] is sparse
cls = ExtensionBlock
elif is_categorical(values):
cls = CategoricalBlock
elif issubclass(vtype, np.datetime64):
assert not is_datetime64tz_dtype(values)
cls = DatetimeBlock
elif is_datetime64tz_dtype(values):
cls = DatetimeTZBlock
elif is_interval_dtype(dtype) or is_period_dtype(dtype):
cls = ObjectValuesExtensionBlock
elif is_extension_array_dtype(values):
cls = ExtensionBlock
elif issubclass(vtype, np.floating):
cls = FloatBlock
elif issubclass(vtype, np.timedelta64):
assert issubclass(vtype, np.integer)
cls = TimeDeltaBlock
elif issubclass(vtype, np.complexfloating):
cls = ComplexBlock
elif issubclass(vtype, np.integer):
cls = IntBlock
elif dtype == np.bool_:
cls = BoolBlock
else:
cls = ObjectBlock
return cls |
def set_focus_to_state_model(self, state_m, ratio_requested=0.8):
""" Focus a state view of respective state model
:param rafcon.gui.model.state state_m: Respective state model of state view to be focused
:param ratio_requested: Minimum ratio of the screen which is requested, so can be more
:return:
"""
state_machine_m = self.model
state_v = self.canvas.get_view_for_model(state_m)
if state_v is None:
logger.warning('There is no view for state model {0}'.format(state_m))
self.move_item_into_viewport(state_v)
# check_relative size in view and call it again if the state is still very small
state_v = self.canvas.get_view_for_model(state_machine_m.root_state)
state_size = self.view.editor.get_matrix_i2v(state_v).transform_distance(state_v.width, state_v.height)
viewport_size = self.view.editor.get_allocation().width, self.view.editor.get_allocation().height
if state_size[0] < ratio_requested*viewport_size[0] and state_size[1] < ratio_requested*viewport_size[1]:
self.set_focus_to_state_model(state_m, ratio_requested) | Focus a state view of respective state model
:param rafcon.gui.model.state state_m: Respective state model of state view to be focused
:param ratio_requested: Minimum ratio of the screen which is requested, so can be more
:return: | Below is the the instruction that describes the task:
### Input:
Focus a state view of respective state model
:param rafcon.gui.model.state state_m: Respective state model of state view to be focused
:param ratio_requested: Minimum ratio of the screen which is requested, so can be more
:return:
### Response:
def set_focus_to_state_model(self, state_m, ratio_requested=0.8):
""" Focus a state view of respective state model
:param rafcon.gui.model.state state_m: Respective state model of state view to be focused
:param ratio_requested: Minimum ratio of the screen which is requested, so can be more
:return:
"""
state_machine_m = self.model
state_v = self.canvas.get_view_for_model(state_m)
if state_v is None:
logger.warning('There is no view for state model {0}'.format(state_m))
self.move_item_into_viewport(state_v)
# check_relative size in view and call it again if the state is still very small
state_v = self.canvas.get_view_for_model(state_machine_m.root_state)
state_size = self.view.editor.get_matrix_i2v(state_v).transform_distance(state_v.width, state_v.height)
viewport_size = self.view.editor.get_allocation().width, self.view.editor.get_allocation().height
if state_size[0] < ratio_requested*viewport_size[0] and state_size[1] < ratio_requested*viewport_size[1]:
self.set_focus_to_state_model(state_m, ratio_requested) |
def cmvn(vec, variance_normalization=False):
""" This function is aimed to perform global cepstral mean and
variance normalization (CMVN) on input feature vector "vec".
The code assumes that there is one observation per row.
Args:
vec (array): input feature matrix
(size:(num_observation,num_features))
variance_normalization (bool): If the variance
normilization should be performed or not.
Return:
array: The mean(or mean+variance) normalized feature vector.
"""
eps = 2**-30
rows, cols = vec.shape
# Mean calculation
norm = np.mean(vec, axis=0)
norm_vec = np.tile(norm, (rows, 1))
# Mean subtraction
mean_subtracted = vec - norm_vec
# Variance normalization
if variance_normalization:
stdev = np.std(mean_subtracted, axis=0)
stdev_vec = np.tile(stdev, (rows, 1))
output = mean_subtracted / (stdev_vec + eps)
else:
output = mean_subtracted
return output | This function is aimed to perform global cepstral mean and
variance normalization (CMVN) on input feature vector "vec".
The code assumes that there is one observation per row.
Args:
vec (array): input feature matrix
(size:(num_observation,num_features))
variance_normalization (bool): If the variance
normilization should be performed or not.
Return:
array: The mean(or mean+variance) normalized feature vector. | Below is the the instruction that describes the task:
### Input:
This function is aimed to perform global cepstral mean and
variance normalization (CMVN) on input feature vector "vec".
The code assumes that there is one observation per row.
Args:
vec (array): input feature matrix
(size:(num_observation,num_features))
variance_normalization (bool): If the variance
normilization should be performed or not.
Return:
array: The mean(or mean+variance) normalized feature vector.
### Response:
def cmvn(vec, variance_normalization=False):
""" This function is aimed to perform global cepstral mean and
variance normalization (CMVN) on input feature vector "vec".
The code assumes that there is one observation per row.
Args:
vec (array): input feature matrix
(size:(num_observation,num_features))
variance_normalization (bool): If the variance
normilization should be performed or not.
Return:
array: The mean(or mean+variance) normalized feature vector.
"""
eps = 2**-30
rows, cols = vec.shape
# Mean calculation
norm = np.mean(vec, axis=0)
norm_vec = np.tile(norm, (rows, 1))
# Mean subtraction
mean_subtracted = vec - norm_vec
# Variance normalization
if variance_normalization:
stdev = np.std(mean_subtracted, axis=0)
stdev_vec = np.tile(stdev, (rows, 1))
output = mean_subtracted / (stdev_vec + eps)
else:
output = mean_subtracted
return output |
def pick(choices, default=None, str_choices=None, prompt=None, allow_mult=False, more_choices=False):
'''
:param choices: Strings between which the user will make a choice
:type choices: list of strings
:param default: Number the index to be used as the default
:type default: int or None
:param str_choices: Strings to be used as aliases for the choices; must be of the same length as choices and each string must be unique
:type str_choices: list of strings
:param prompt: A custom prompt to be used
:type prompt: string
:param allow_mult: Whether "*" is a valid option to select all choices
:type allow_mult: boolean
:param more_choices: Whether "m" is a valid option to ask for more options
:type more_choices: boolean
:returns: The user's choice, i.e. one of a numbered index of choices (e.g. 0 for the first item), "*" (only if allow_mult is True), or "m" (only if more_results is True)
:rtype: int or string
:raises: :exc:`EOFError` to signify quitting the process
At most one of allow_mult and more_choices should be set to True.
'''
for i in range(len(choices)):
prefix = str(i) + ') '
lines = choices[i].split("\n")
joiner = "\n" + " " * len(prefix)
print(prefix + joiner.join(lines))
if more_choices:
print('m) More options not shown...')
print('')
if prompt is None:
prompt = 'Pick a numbered choice'
if allow_mult:
prompt += ' or "*" for all'
elif more_choices:
prompt += ' or "m" for more options'
if default is not None:
prompt += ' [' + str(default) + ']'
prompt += ': '
while True:
try:
value = input(prompt)
except KeyboardInterrupt:
print('')
raise
except EOFError:
print('')
raise
if default is not None and value == '':
return default
if allow_mult and value == '*':
return value
if more_choices and value == 'm':
return value
try:
choice = str_choices.index(value)
return choice
except:
pass
try:
choice = int(value)
if choice not in range(len(choices)):
raise IndexError()
return choice
except Exception:
print('Not a valid selection') | :param choices: Strings between which the user will make a choice
:type choices: list of strings
:param default: Number the index to be used as the default
:type default: int or None
:param str_choices: Strings to be used as aliases for the choices; must be of the same length as choices and each string must be unique
:type str_choices: list of strings
:param prompt: A custom prompt to be used
:type prompt: string
:param allow_mult: Whether "*" is a valid option to select all choices
:type allow_mult: boolean
:param more_choices: Whether "m" is a valid option to ask for more options
:type more_choices: boolean
:returns: The user's choice, i.e. one of a numbered index of choices (e.g. 0 for the first item), "*" (only if allow_mult is True), or "m" (only if more_results is True)
:rtype: int or string
:raises: :exc:`EOFError` to signify quitting the process
At most one of allow_mult and more_choices should be set to True. | Below is the the instruction that describes the task:
### Input:
:param choices: Strings between which the user will make a choice
:type choices: list of strings
:param default: Number the index to be used as the default
:type default: int or None
:param str_choices: Strings to be used as aliases for the choices; must be of the same length as choices and each string must be unique
:type str_choices: list of strings
:param prompt: A custom prompt to be used
:type prompt: string
:param allow_mult: Whether "*" is a valid option to select all choices
:type allow_mult: boolean
:param more_choices: Whether "m" is a valid option to ask for more options
:type more_choices: boolean
:returns: The user's choice, i.e. one of a numbered index of choices (e.g. 0 for the first item), "*" (only if allow_mult is True), or "m" (only if more_results is True)
:rtype: int or string
:raises: :exc:`EOFError` to signify quitting the process
At most one of allow_mult and more_choices should be set to True.
### Response:
def pick(choices, default=None, str_choices=None, prompt=None, allow_mult=False, more_choices=False):
'''
:param choices: Strings between which the user will make a choice
:type choices: list of strings
:param default: Number the index to be used as the default
:type default: int or None
:param str_choices: Strings to be used as aliases for the choices; must be of the same length as choices and each string must be unique
:type str_choices: list of strings
:param prompt: A custom prompt to be used
:type prompt: string
:param allow_mult: Whether "*" is a valid option to select all choices
:type allow_mult: boolean
:param more_choices: Whether "m" is a valid option to ask for more options
:type more_choices: boolean
:returns: The user's choice, i.e. one of a numbered index of choices (e.g. 0 for the first item), "*" (only if allow_mult is True), or "m" (only if more_results is True)
:rtype: int or string
:raises: :exc:`EOFError` to signify quitting the process
At most one of allow_mult and more_choices should be set to True.
'''
for i in range(len(choices)):
prefix = str(i) + ') '
lines = choices[i].split("\n")
joiner = "\n" + " " * len(prefix)
print(prefix + joiner.join(lines))
if more_choices:
print('m) More options not shown...')
print('')
if prompt is None:
prompt = 'Pick a numbered choice'
if allow_mult:
prompt += ' or "*" for all'
elif more_choices:
prompt += ' or "m" for more options'
if default is not None:
prompt += ' [' + str(default) + ']'
prompt += ': '
while True:
try:
value = input(prompt)
except KeyboardInterrupt:
print('')
raise
except EOFError:
print('')
raise
if default is not None and value == '':
return default
if allow_mult and value == '*':
return value
if more_choices and value == 'm':
return value
try:
choice = str_choices.index(value)
return choice
except:
pass
try:
choice = int(value)
if choice not in range(len(choices)):
raise IndexError()
return choice
except Exception:
print('Not a valid selection') |
def cleanup_nodes(doc):
"""
Remove text nodes containing only whitespace
"""
for node in doc.documentElement.childNodes:
if node.nodeType == Node.TEXT_NODE and node.nodeValue.isspace():
doc.documentElement.removeChild(node)
return doc | Remove text nodes containing only whitespace | Below is the the instruction that describes the task:
### Input:
Remove text nodes containing only whitespace
### Response:
def cleanup_nodes(doc):
"""
Remove text nodes containing only whitespace
"""
for node in doc.documentElement.childNodes:
if node.nodeType == Node.TEXT_NODE and node.nodeValue.isspace():
doc.documentElement.removeChild(node)
return doc |
def build(self, recipe):
"""
Builds a recipe
:param recipe: Name of the recipe to build.
"""
return self.__app.recipes.build(recipe, self._plugin) | Builds a recipe
:param recipe: Name of the recipe to build. | Below is the the instruction that describes the task:
### Input:
Builds a recipe
:param recipe: Name of the recipe to build.
### Response:
def build(self, recipe):
"""
Builds a recipe
:param recipe: Name of the recipe to build.
"""
return self.__app.recipes.build(recipe, self._plugin) |
def can_connect_to(self, other):
"""Whether a connection can be established between those two meshes."""
assert other.is_mesh()
disconnected = not other.is_connected() and not self.is_connected()
types_differ = self._is_consumed_mesh() != other._is_consumed_mesh()
return disconnected and types_differ | Whether a connection can be established between those two meshes. | Below is the the instruction that describes the task:
### Input:
Whether a connection can be established between those two meshes.
### Response:
def can_connect_to(self, other):
"""Whether a connection can be established between those two meshes."""
assert other.is_mesh()
disconnected = not other.is_connected() and not self.is_connected()
types_differ = self._is_consumed_mesh() != other._is_consumed_mesh()
return disconnected and types_differ |
def generate_dc_json(dc_dict):
"""Generate DC JSON data.
Returns data as a JSON formatted string.
"""
formatted_dict = formatted_dc_dict(dc_dict)
return json.dumps(formatted_dict, sort_keys=True, indent=4) | Generate DC JSON data.
Returns data as a JSON formatted string. | Below is the the instruction that describes the task:
### Input:
Generate DC JSON data.
Returns data as a JSON formatted string.
### Response:
def generate_dc_json(dc_dict):
"""Generate DC JSON data.
Returns data as a JSON formatted string.
"""
formatted_dict = formatted_dc_dict(dc_dict)
return json.dumps(formatted_dict, sort_keys=True, indent=4) |
def make(base_classes=(), have_mt=False):
"""Use this static method to build a model class that
possibly derives from other classes. If have_mt is True,
then returned class will take into account multi-threading
issues when dealing with observable properties."""
good_bc = ModelFactory.__fix_bases(base_classes, have_mt)
print "Base classes are:", good_bc
key = "".join(map(str, good_bc))
if key in ModelFactory.__memoized:
return ModelFactory.__memoized[key]
cls = new.classobj('', good_bc, {'__module__': '__main__', '__doc__': None})
ModelFactory.__memoized[key] = cls
return cls | Use this static method to build a model class that
possibly derives from other classes. If have_mt is True,
then returned class will take into account multi-threading
issues when dealing with observable properties. | Below is the the instruction that describes the task:
### Input:
Use this static method to build a model class that
possibly derives from other classes. If have_mt is True,
then returned class will take into account multi-threading
issues when dealing with observable properties.
### Response:
def make(base_classes=(), have_mt=False):
"""Use this static method to build a model class that
possibly derives from other classes. If have_mt is True,
then returned class will take into account multi-threading
issues when dealing with observable properties."""
good_bc = ModelFactory.__fix_bases(base_classes, have_mt)
print "Base classes are:", good_bc
key = "".join(map(str, good_bc))
if key in ModelFactory.__memoized:
return ModelFactory.__memoized[key]
cls = new.classobj('', good_bc, {'__module__': '__main__', '__doc__': None})
ModelFactory.__memoized[key] = cls
return cls |
def default_error_handler(exception):
"""
Default error handler
Will display an error page with the corresponding error code from template
directory, for example, a not found will load a 404.html etc.
Will first look in userland app templates and if not found, fallback to
boiler templates to display a default page.
:param exception: Exception
:return: string
"""
http_exception = isinstance(exception, exceptions.HTTPException)
code = exception.code if http_exception else 500
# log exceptions only (app debug should be off)
if code == 500:
current_app.logger.error(exception)
# jsonify error if json requested via accept header
if has_app_context() and has_request_context():
headers = request.headers
if 'Accept' in headers and headers['Accept'] == 'application/json':
return json_error_handler(exception)
# otherwise render template
return template_error_handler(exception) | Default error handler
Will display an error page with the corresponding error code from template
directory, for example, a not found will load a 404.html etc.
Will first look in userland app templates and if not found, fallback to
boiler templates to display a default page.
:param exception: Exception
:return: string | Below is the the instruction that describes the task:
### Input:
Default error handler
Will display an error page with the corresponding error code from template
directory, for example, a not found will load a 404.html etc.
Will first look in userland app templates and if not found, fallback to
boiler templates to display a default page.
:param exception: Exception
:return: string
### Response:
def default_error_handler(exception):
"""
Default error handler
Will display an error page with the corresponding error code from template
directory, for example, a not found will load a 404.html etc.
Will first look in userland app templates and if not found, fallback to
boiler templates to display a default page.
:param exception: Exception
:return: string
"""
http_exception = isinstance(exception, exceptions.HTTPException)
code = exception.code if http_exception else 500
# log exceptions only (app debug should be off)
if code == 500:
current_app.logger.error(exception)
# jsonify error if json requested via accept header
if has_app_context() and has_request_context():
headers = request.headers
if 'Accept' in headers and headers['Accept'] == 'application/json':
return json_error_handler(exception)
# otherwise render template
return template_error_handler(exception) |
def get_xsession(self, item):
"""
Returns the XNAT session and cache dir corresponding to the
item.
"""
subj_label, sess_label = self._get_item_labels(item)
with self:
xproject = self._login.projects[self.project_id]
try:
xsubject = xproject.subjects[subj_label]
except KeyError:
xsubject = self._login.classes.SubjectData(
label=subj_label, parent=xproject)
try:
xsession = xsubject.experiments[sess_label]
except KeyError:
xsession = self._login.classes.MrSessionData(
label=sess_label, parent=xsubject)
if item.derived:
xsession.fields[
self.DERIVED_FROM_FIELD] = self._get_item_labels(
item, no_from_study=True)[1]
return xsession | Returns the XNAT session and cache dir corresponding to the
item. | Below is the the instruction that describes the task:
### Input:
Returns the XNAT session and cache dir corresponding to the
item.
### Response:
def get_xsession(self, item):
"""
Returns the XNAT session and cache dir corresponding to the
item.
"""
subj_label, sess_label = self._get_item_labels(item)
with self:
xproject = self._login.projects[self.project_id]
try:
xsubject = xproject.subjects[subj_label]
except KeyError:
xsubject = self._login.classes.SubjectData(
label=subj_label, parent=xproject)
try:
xsession = xsubject.experiments[sess_label]
except KeyError:
xsession = self._login.classes.MrSessionData(
label=sess_label, parent=xsubject)
if item.derived:
xsession.fields[
self.DERIVED_FROM_FIELD] = self._get_item_labels(
item, no_from_study=True)[1]
return xsession |
def head(self, n=5):
"""
Return first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask] | Return first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6 | Below is the the instruction that describes the task:
### Input:
Return first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
### Response:
def head(self, n=5):
"""
Return first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask] |
def remote_port_uneq_store(self, remote_port):
"""This function saves the port, if different from stored. """
if remote_port != self.remote_port:
self.remote_port = remote_port
return True
return False | This function saves the port, if different from stored. | Below is the the instruction that describes the task:
### Input:
This function saves the port, if different from stored.
### Response:
def remote_port_uneq_store(self, remote_port):
"""This function saves the port, if different from stored. """
if remote_port != self.remote_port:
self.remote_port = remote_port
return True
return False |
def preview(request, content_type_id, object_id):
"""
This is an override for django.views.default.shortcut.
It assumes that get_absolute_url returns an absolute url, so
it does not do any of the very elaborate link checking that shortcut does.
This version adds the language code to the url. (/en/blaat/).
"""
try:
content_type = ContentType.objects.get(pk=content_type_id)
obj = content_type.get_object_for_this_type(pk=object_id)
except ObjectDoesNotExist:
raise http.Http404("Content type %s object %s doesn't exist" % (content_type_id, object_id))
try:
absolute_url = obj.get_absolute_url()
except AttributeError:
raise http.Http404("%s objects don't have get_absolute_url() methods" % content_type.name)
if absolute_url.startswith('http://') or absolute_url.startswith('https://'):
http.HttpResponseRedirect(absolute_url)
else:
absolute_url = fix_language_code(absolute_url, request.LANGUAGE_CODE)
return http.HttpResponseRedirect(absolute_url) | This is an override for django.views.default.shortcut.
It assumes that get_absolute_url returns an absolute url, so
it does not do any of the very elaborate link checking that shortcut does.
This version adds the language code to the url. (/en/blaat/). | Below is the the instruction that describes the task:
### Input:
This is an override for django.views.default.shortcut.
It assumes that get_absolute_url returns an absolute url, so
it does not do any of the very elaborate link checking that shortcut does.
This version adds the language code to the url. (/en/blaat/).
### Response:
def preview(request, content_type_id, object_id):
"""
This is an override for django.views.default.shortcut.
It assumes that get_absolute_url returns an absolute url, so
it does not do any of the very elaborate link checking that shortcut does.
This version adds the language code to the url. (/en/blaat/).
"""
try:
content_type = ContentType.objects.get(pk=content_type_id)
obj = content_type.get_object_for_this_type(pk=object_id)
except ObjectDoesNotExist:
raise http.Http404("Content type %s object %s doesn't exist" % (content_type_id, object_id))
try:
absolute_url = obj.get_absolute_url()
except AttributeError:
raise http.Http404("%s objects don't have get_absolute_url() methods" % content_type.name)
if absolute_url.startswith('http://') or absolute_url.startswith('https://'):
http.HttpResponseRedirect(absolute_url)
else:
absolute_url = fix_language_code(absolute_url, request.LANGUAGE_CODE)
return http.HttpResponseRedirect(absolute_url) |
def username_user_password(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
user_password = ET.SubElement(username, "user-password")
user_password.text = kwargs.pop('user_password')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def username_user_password(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
user_password = ET.SubElement(username, "user-password")
user_password.text = kwargs.pop('user_password')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _basic_return(self, args, msg):
"""
return a failed message
This method returns an undeliverable message that was
published with the "immediate" flag set, or an unroutable
message published with the "mandatory" flag set. The reply
code and text provide information about the reason that the
message was undeliverable.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published.
"""
reply_code = args.read_short()
reply_text = args.read_shortstr()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
self.returned_messages.put(
(reply_code, reply_text, exchange, routing_key, msg)
) | return a failed message
This method returns an undeliverable message that was
published with the "immediate" flag set, or an unroutable
message published with the "mandatory" flag set. The reply
code and text provide information about the reason that the
message was undeliverable.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published. | Below is the the instruction that describes the task:
### Input:
return a failed message
This method returns an undeliverable message that was
published with the "immediate" flag set, or an unroutable
message published with the "mandatory" flag set. The reply
code and text provide information about the reason that the
message was undeliverable.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published.
### Response:
def _basic_return(self, args, msg):
"""
return a failed message
This method returns an undeliverable message that was
published with the "immediate" flag set, or an unroutable
message published with the "mandatory" flag set. The reply
code and text provide information about the reason that the
message was undeliverable.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published.
"""
reply_code = args.read_short()
reply_text = args.read_shortstr()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
self.returned_messages.put(
(reply_code, reply_text, exchange, routing_key, msg)
) |
def neighbors(*asns, **kwargs):
'''
Search for BGP neighbors details in the mines of the ``bgp.neighbors`` function.
Arguments:
asns
A list of AS numbers to search for.
The runner will return only the neighbors of these AS numbers.
device
Filter by device name (minion ID).
ip
Search BGP neighbor using the IP address.
In multi-VRF environments, the same IP address could be used by
more than one neighbors, in different routing tables.
network
Search neighbors within a certain IP network.
title
Custom title.
display: ``True``
Display on the screen or return structured object? Default: ``True`` (return on the CLI).
outputter: ``table``
Specify the outputter name when displaying on the CLI. Default: :mod:`table <salt.output.table_out>`.
In addition, any field from the output of the ``neighbors`` function
from the :mod:`NAPALM BGP module <salt.modules.napalm_bgp.neighbors>` can be used as a filter.
CLI Example:
.. code-block:: bash
salt-run bgp.neighbors 13335 15169
salt-run bgp.neighbors 13335 ip=172.17.19.1
salt-run bgp.neighbors multipath=True
salt-run bgp.neighbors up=False export_policy=my-export-policy multihop=False
salt-run bgp.neighbors network=192.168.0.0/16
Output example:
.. code-block:: text
BGP Neighbors for 13335, 15169
________________________________________________________________________________________________________________________________________________________________
| Device | AS Number | Neighbor Address | State|#Active/Received/Accepted/Damped | Policy IN | Policy OUT |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 13335 | 172.17.109.11 | Established 0/398/398/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 13335 | 172.17.109.12 | Established 397/398/398/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.flw01 | 13335 | 192.168.172.11 | Established 1/398/398/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.oua01 | 13335 | 172.17.109.17 | Established 0/0/0/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 15169 | 2001::1 | Established 102/102/102/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 15169 | 2001::2 | Established 102/102/102/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.tbg01 | 13335 | 192.168.172.17 | Established 0/1/1/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
'''
opts = _get_bgp_runner_opts()
title = kwargs.pop('title', None)
display = kwargs.pop('display', opts['display'])
outputter = kwargs.pop('outputter', opts['outputter'])
# cleaning up the kwargs
# __pub args not used in this runner (yet)
kwargs_copy = {}
kwargs_copy.update(kwargs)
for karg, _ in six.iteritems(kwargs_copy):
if karg.startswith('__pub'):
kwargs.pop(karg)
if not asns and not kwargs:
if display:
print('Please specify at least an AS Number or an output filter')
return []
device = kwargs.pop('device', None)
neighbor_ip = kwargs.pop('ip', None)
ipnet = kwargs.pop('network', None)
ipnet_obj = IPNetwork(ipnet) if ipnet else None
# any other key passed on the CLI can be used as a filter
rows = []
# building the labels
labels = {}
for field in opts['return_fields']:
if field in _DEFAULT_LABELS_MAPPING:
labels[field] = _DEFAULT_LABELS_MAPPING[field]
else:
# transform from 'previous_connection_state' to 'Previous Connection State'
labels[field] = ' '.join(map(lambda word: word.title(), field.split('_')))
display_fields = list(set(opts['return_fields']) - set(_DEFAULT_INCLUDED_FIELDS))
get_bgp_neighbors_all = _get_mine(opts=opts)
if not title:
title_parts = []
if asns:
title_parts.append('BGP Neighbors for {asns}'.format(
asns=', '.join([six.text_type(asn) for asn in asns])
))
if neighbor_ip:
title_parts.append('Selecting neighbors having the remote IP address: {ipaddr}'.format(ipaddr=neighbor_ip))
if ipnet:
title_parts.append('Selecting neighbors within the IP network: {ipnet}'.format(ipnet=ipnet))
if kwargs:
title_parts.append('Searching for BGP neighbors having the attributes: {attrmap}'.format(
attrmap=', '.join(map(lambda key: '{key}={value}'.format(key=key, value=kwargs[key]), kwargs))
))
title = '\n'.join(title_parts)
for minion, get_bgp_neighbors_minion in six.iteritems(get_bgp_neighbors_all): # pylint: disable=too-many-nested-blocks
if not get_bgp_neighbors_minion.get('result'):
continue # ignore empty or failed mines
if device and minion != device:
# when requested to display only the neighbors on a certain device
continue
get_bgp_neighbors_minion_out = get_bgp_neighbors_minion.get('out', {})
for vrf, vrf_bgp_neighbors in six.iteritems(get_bgp_neighbors_minion_out): # pylint: disable=unused-variable
for asn, get_bgp_neighbors_minion_asn in six.iteritems(vrf_bgp_neighbors):
if asns and asn not in asns:
# if filtering by AS number(s),
# will ignore if this AS number key not in that list
# and continue the search
continue
for neighbor in get_bgp_neighbors_minion_asn:
if kwargs and not _compare_match(kwargs, neighbor):
# requested filtering by neighbors stats
# but this one does not correspond
continue
if neighbor_ip and neighbor_ip != neighbor.get('remote_address'):
# requested filtering by neighbors IP addr
continue
if ipnet_obj and neighbor.get('remote_address'):
neighbor_ip_obj = IPAddress(neighbor.get('remote_address'))
if neighbor_ip_obj not in ipnet_obj:
# Neighbor not in this network
continue
row = {
'device': minion,
'neighbor_address': neighbor.get('remote_address'),
'as_number': asn
}
if 'vrf' in display_fields:
row['vrf'] = vrf
if 'connection_stats' in display_fields:
connection_stats = '{state} {active}/{received}/{accepted}/{damped}'.format(
state=neighbor.get('connection_state', -1),
active=neighbor.get('active_prefix_count', -1),
received=neighbor.get('received_prefix_count', -1),
accepted=neighbor.get('accepted_prefix_count', -1),
damped=neighbor.get('suppressed_prefix_count', -1),
)
row['connection_stats'] = connection_stats
if 'interface_description' in display_fields or 'interface_name' in display_fields:
net_find = __salt__['net.interfaces'](device=minion,
ipnet=neighbor.get('remote_address'),
display=False)
if net_find:
if 'interface_description' in display_fields:
row['interface_description'] = net_find[0]['interface_description']
if 'interface_name' in display_fields:
row['interface_name'] = net_find[0]['interface']
else:
# if unable to find anything, leave blank
if 'interface_description' in display_fields:
row['interface_description'] = ''
if 'interface_name' in display_fields:
row['interface_name'] = ''
for field in display_fields:
if field in neighbor:
row[field] = neighbor[field]
rows.append(row)
return _display_runner(rows, labels, title, display=display, outputter=outputter) | Search for BGP neighbors details in the mines of the ``bgp.neighbors`` function.
Arguments:
asns
A list of AS numbers to search for.
The runner will return only the neighbors of these AS numbers.
device
Filter by device name (minion ID).
ip
Search BGP neighbor using the IP address.
In multi-VRF environments, the same IP address could be used by
more than one neighbors, in different routing tables.
network
Search neighbors within a certain IP network.
title
Custom title.
display: ``True``
Display on the screen or return structured object? Default: ``True`` (return on the CLI).
outputter: ``table``
Specify the outputter name when displaying on the CLI. Default: :mod:`table <salt.output.table_out>`.
In addition, any field from the output of the ``neighbors`` function
from the :mod:`NAPALM BGP module <salt.modules.napalm_bgp.neighbors>` can be used as a filter.
CLI Example:
.. code-block:: bash
salt-run bgp.neighbors 13335 15169
salt-run bgp.neighbors 13335 ip=172.17.19.1
salt-run bgp.neighbors multipath=True
salt-run bgp.neighbors up=False export_policy=my-export-policy multihop=False
salt-run bgp.neighbors network=192.168.0.0/16
Output example:
.. code-block:: text
BGP Neighbors for 13335, 15169
________________________________________________________________________________________________________________________________________________________________
| Device | AS Number | Neighbor Address | State|#Active/Received/Accepted/Damped | Policy IN | Policy OUT |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 13335 | 172.17.109.11 | Established 0/398/398/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 13335 | 172.17.109.12 | Established 397/398/398/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.flw01 | 13335 | 192.168.172.11 | Established 1/398/398/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.oua01 | 13335 | 172.17.109.17 | Established 0/0/0/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 15169 | 2001::1 | Established 102/102/102/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 15169 | 2001::2 | Established 102/102/102/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.tbg01 | 13335 | 192.168.172.17 | Established 0/1/1/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________ | Below is the the instruction that describes the task:
### Input:
Search for BGP neighbors details in the mines of the ``bgp.neighbors`` function.
Arguments:
asns
A list of AS numbers to search for.
The runner will return only the neighbors of these AS numbers.
device
Filter by device name (minion ID).
ip
Search BGP neighbor using the IP address.
In multi-VRF environments, the same IP address could be used by
more than one neighbors, in different routing tables.
network
Search neighbors within a certain IP network.
title
Custom title.
display: ``True``
Display on the screen or return structured object? Default: ``True`` (return on the CLI).
outputter: ``table``
Specify the outputter name when displaying on the CLI. Default: :mod:`table <salt.output.table_out>`.
In addition, any field from the output of the ``neighbors`` function
from the :mod:`NAPALM BGP module <salt.modules.napalm_bgp.neighbors>` can be used as a filter.
CLI Example:
.. code-block:: bash
salt-run bgp.neighbors 13335 15169
salt-run bgp.neighbors 13335 ip=172.17.19.1
salt-run bgp.neighbors multipath=True
salt-run bgp.neighbors up=False export_policy=my-export-policy multihop=False
salt-run bgp.neighbors network=192.168.0.0/16
Output example:
.. code-block:: text
BGP Neighbors for 13335, 15169
________________________________________________________________________________________________________________________________________________________________
| Device | AS Number | Neighbor Address | State|#Active/Received/Accepted/Damped | Policy IN | Policy OUT |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 13335 | 172.17.109.11 | Established 0/398/398/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 13335 | 172.17.109.12 | Established 397/398/398/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.flw01 | 13335 | 192.168.172.11 | Established 1/398/398/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.oua01 | 13335 | 172.17.109.17 | Established 0/0/0/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 15169 | 2001::1 | Established 102/102/102/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 15169 | 2001::2 | Established 102/102/102/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.tbg01 | 13335 | 192.168.172.17 | Established 0/1/1/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
### Response:
def neighbors(*asns, **kwargs):
'''
Search for BGP neighbors details in the mines of the ``bgp.neighbors`` function.
Arguments:
asns
A list of AS numbers to search for.
The runner will return only the neighbors of these AS numbers.
device
Filter by device name (minion ID).
ip
Search BGP neighbor using the IP address.
In multi-VRF environments, the same IP address could be used by
more than one neighbors, in different routing tables.
network
Search neighbors within a certain IP network.
title
Custom title.
display: ``True``
Display on the screen or return structured object? Default: ``True`` (return on the CLI).
outputter: ``table``
Specify the outputter name when displaying on the CLI. Default: :mod:`table <salt.output.table_out>`.
In addition, any field from the output of the ``neighbors`` function
from the :mod:`NAPALM BGP module <salt.modules.napalm_bgp.neighbors>` can be used as a filter.
CLI Example:
.. code-block:: bash
salt-run bgp.neighbors 13335 15169
salt-run bgp.neighbors 13335 ip=172.17.19.1
salt-run bgp.neighbors multipath=True
salt-run bgp.neighbors up=False export_policy=my-export-policy multihop=False
salt-run bgp.neighbors network=192.168.0.0/16
Output example:
.. code-block:: text
BGP Neighbors for 13335, 15169
________________________________________________________________________________________________________________________________________________________________
| Device | AS Number | Neighbor Address | State|#Active/Received/Accepted/Damped | Policy IN | Policy OUT |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 13335 | 172.17.109.11 | Established 0/398/398/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 13335 | 172.17.109.12 | Established 397/398/398/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.flw01 | 13335 | 192.168.172.11 | Established 1/398/398/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.oua01 | 13335 | 172.17.109.17 | Established 0/0/0/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 15169 | 2001::1 | Established 102/102/102/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.bjm01 | 15169 | 2001::2 | Established 102/102/102/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
| edge01.tbg01 | 13335 | 192.168.172.17 | Established 0/1/1/0 | import-policy | export-policy |
________________________________________________________________________________________________________________________________________________________________
'''
opts = _get_bgp_runner_opts()
title = kwargs.pop('title', None)
display = kwargs.pop('display', opts['display'])
outputter = kwargs.pop('outputter', opts['outputter'])
# cleaning up the kwargs
# __pub args not used in this runner (yet)
kwargs_copy = {}
kwargs_copy.update(kwargs)
for karg, _ in six.iteritems(kwargs_copy):
if karg.startswith('__pub'):
kwargs.pop(karg)
if not asns and not kwargs:
if display:
print('Please specify at least an AS Number or an output filter')
return []
device = kwargs.pop('device', None)
neighbor_ip = kwargs.pop('ip', None)
ipnet = kwargs.pop('network', None)
ipnet_obj = IPNetwork(ipnet) if ipnet else None
# any other key passed on the CLI can be used as a filter
rows = []
# building the labels
labels = {}
for field in opts['return_fields']:
if field in _DEFAULT_LABELS_MAPPING:
labels[field] = _DEFAULT_LABELS_MAPPING[field]
else:
# transform from 'previous_connection_state' to 'Previous Connection State'
labels[field] = ' '.join(map(lambda word: word.title(), field.split('_')))
display_fields = list(set(opts['return_fields']) - set(_DEFAULT_INCLUDED_FIELDS))
get_bgp_neighbors_all = _get_mine(opts=opts)
if not title:
title_parts = []
if asns:
title_parts.append('BGP Neighbors for {asns}'.format(
asns=', '.join([six.text_type(asn) for asn in asns])
))
if neighbor_ip:
title_parts.append('Selecting neighbors having the remote IP address: {ipaddr}'.format(ipaddr=neighbor_ip))
if ipnet:
title_parts.append('Selecting neighbors within the IP network: {ipnet}'.format(ipnet=ipnet))
if kwargs:
title_parts.append('Searching for BGP neighbors having the attributes: {attrmap}'.format(
attrmap=', '.join(map(lambda key: '{key}={value}'.format(key=key, value=kwargs[key]), kwargs))
))
title = '\n'.join(title_parts)
for minion, get_bgp_neighbors_minion in six.iteritems(get_bgp_neighbors_all): # pylint: disable=too-many-nested-blocks
if not get_bgp_neighbors_minion.get('result'):
continue # ignore empty or failed mines
if device and minion != device:
# when requested to display only the neighbors on a certain device
continue
get_bgp_neighbors_minion_out = get_bgp_neighbors_minion.get('out', {})
for vrf, vrf_bgp_neighbors in six.iteritems(get_bgp_neighbors_minion_out): # pylint: disable=unused-variable
for asn, get_bgp_neighbors_minion_asn in six.iteritems(vrf_bgp_neighbors):
if asns and asn not in asns:
# if filtering by AS number(s),
# will ignore if this AS number key not in that list
# and continue the search
continue
for neighbor in get_bgp_neighbors_minion_asn:
if kwargs and not _compare_match(kwargs, neighbor):
# requested filtering by neighbors stats
# but this one does not correspond
continue
if neighbor_ip and neighbor_ip != neighbor.get('remote_address'):
# requested filtering by neighbors IP addr
continue
if ipnet_obj and neighbor.get('remote_address'):
neighbor_ip_obj = IPAddress(neighbor.get('remote_address'))
if neighbor_ip_obj not in ipnet_obj:
# Neighbor not in this network
continue
row = {
'device': minion,
'neighbor_address': neighbor.get('remote_address'),
'as_number': asn
}
if 'vrf' in display_fields:
row['vrf'] = vrf
if 'connection_stats' in display_fields:
connection_stats = '{state} {active}/{received}/{accepted}/{damped}'.format(
state=neighbor.get('connection_state', -1),
active=neighbor.get('active_prefix_count', -1),
received=neighbor.get('received_prefix_count', -1),
accepted=neighbor.get('accepted_prefix_count', -1),
damped=neighbor.get('suppressed_prefix_count', -1),
)
row['connection_stats'] = connection_stats
if 'interface_description' in display_fields or 'interface_name' in display_fields:
net_find = __salt__['net.interfaces'](device=minion,
ipnet=neighbor.get('remote_address'),
display=False)
if net_find:
if 'interface_description' in display_fields:
row['interface_description'] = net_find[0]['interface_description']
if 'interface_name' in display_fields:
row['interface_name'] = net_find[0]['interface']
else:
# if unable to find anything, leave blank
if 'interface_description' in display_fields:
row['interface_description'] = ''
if 'interface_name' in display_fields:
row['interface_name'] = ''
for field in display_fields:
if field in neighbor:
row[field] = neighbor[field]
rows.append(row)
return _display_runner(rows, labels, title, display=display, outputter=outputter) |
def launch_process(self, command):
# type: (Union[bytes,text_type])->None
"""* What you can do
- It starts process and keep it.
"""
if not self.option is None:
command_plus_option = self.command + " " + self.option
else:
command_plus_option = self.command
if six.PY3:
if shutil.which(command) is None:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid
else:
doc_command_string = "echo '' | {}".format(command)
command_check = os.system(doc_command_string)
if not command_check == 0:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid | * What you can do
- It starts process and keep it. | Below is the the instruction that describes the task:
### Input:
* What you can do
- It starts process and keep it.
### Response:
def launch_process(self, command):
# type: (Union[bytes,text_type])->None
"""* What you can do
- It starts process and keep it.
"""
if not self.option is None:
command_plus_option = self.command + " " + self.option
else:
command_plus_option = self.command
if six.PY3:
if shutil.which(command) is None:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid
else:
doc_command_string = "echo '' | {}".format(command)
command_check = os.system(doc_command_string)
if not command_check == 0:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid |
def sort_queryset(queryset, request, context=None):
""" Returns a sorted queryset
The context argument is only used in the template tag
"""
sort_by = request.GET.get('sort_by')
if sort_by:
if sort_by in [el.name for el in queryset.model._meta.fields]:
queryset = queryset.order_by(sort_by)
else:
if sort_by in request.session:
sort_by = request.session[sort_by]
try:
queryset = queryset.order_by(sort_by)
except:
raise
# added else to fix a bug when using changelist
# TODO: use less ifs and more standard sorting
elif context is not None:
# sorted ascending
if sort_by[0] != '-':
sort_by = context['cl'].list_display[int(sort_by) - 1]
# sorted descending
else:
sort_by = '-' + context['cl'].list_display[abs(int(sort_by)) - 1]
queryset = queryset.order_by(sort_by)
return queryset | Returns a sorted queryset
The context argument is only used in the template tag | Below is the the instruction that describes the task:
### Input:
Returns a sorted queryset
The context argument is only used in the template tag
### Response:
def sort_queryset(queryset, request, context=None):
""" Returns a sorted queryset
The context argument is only used in the template tag
"""
sort_by = request.GET.get('sort_by')
if sort_by:
if sort_by in [el.name for el in queryset.model._meta.fields]:
queryset = queryset.order_by(sort_by)
else:
if sort_by in request.session:
sort_by = request.session[sort_by]
try:
queryset = queryset.order_by(sort_by)
except:
raise
# added else to fix a bug when using changelist
# TODO: use less ifs and more standard sorting
elif context is not None:
# sorted ascending
if sort_by[0] != '-':
sort_by = context['cl'].list_display[int(sort_by) - 1]
# sorted descending
else:
sort_by = '-' + context['cl'].list_display[abs(int(sort_by)) - 1]
queryset = queryset.order_by(sort_by)
return queryset |
def deposit(self, amount):
"""
:param amount: (int +/-) amount to be deposited or withdrawn in cents
"""
_json = {"amount": amount}
self.api_interface.piggy_bank_deposit(self, _json) | :param amount: (int +/-) amount to be deposited or withdrawn in cents | Below is the the instruction that describes the task:
### Input:
:param amount: (int +/-) amount to be deposited or withdrawn in cents
### Response:
def deposit(self, amount):
"""
:param amount: (int +/-) amount to be deposited or withdrawn in cents
"""
_json = {"amount": amount}
self.api_interface.piggy_bank_deposit(self, _json) |
def compare(optimizers, problems, runs=20, all_kwargs={}):
"""Compare a set of optimizers.
Args:
optimizers: list/Optimizer; Either a list of optimizers to compare,
or a single optimizer to test on each problem.
problems: list/Problem; Either a problem instance or a list of problem instances,
one for each optimizer.
all_kwargs: dict/list<dict>; Either the Optimizer.optimize keyword arguments
for all optimizers, or a list of keyword arguments, one for each optimizer.
runs: int; How many times to run each optimizer (smoothness)
Returns:
dict; mapping optimizer identifier to stats.
"""
if not (isinstance(optimizers, collections.Iterable)
or isinstance(problems, collections.Iterable)):
raise TypeError('optimizers or problems must be iterable')
# If optimizers is not a list, repeat into list for each problem
if not isinstance(optimizers, collections.Iterable):
optimizers = [copy.deepcopy(optimizers) for _ in range(len(problems))]
# If problems is not a list, repeat into list for each optimizer
if not isinstance(problems, collections.Iterable):
problems = [copy.deepcopy(problems) for _ in range(len(optimizers))]
# If all_kwargs is not a list, repeat it into a list
if isinstance(all_kwargs, dict):
all_kwargs = [all_kwargs] * len(optimizers)
elif not isinstance(all_kwargs, collections.Iterable):
raise TypeError('all_kwargs must be dict or list of dict')
stats = {}
key_counts = {}
for optimizer, problem, kwargs in zip(optimizers, problems, all_kwargs):
# For nice human readable dictionaries, extract useful names from
# optimizer
class_name = optimizer.__class__.__name__
fitness_func_name = problem._fitness_function.__name__
key_name = '{} {}'.format(class_name, fitness_func_name)
# Keep track of how many optimizers of each class / fitness func
# for better keys in stats dict
try:
key_counts[key_name] += 1
except KeyError:
key_counts[key_name] = 1
# Foo 1, Foo 2, Bar 1, etc.
key = '{} {}'.format(key_name, key_counts[key_name])
print key + ': ',
# Finally, get the actual stats
stats[key] = benchmark(optimizer, problem, runs=runs, **kwargs)
print
return stats | Compare a set of optimizers.
Args:
optimizers: list/Optimizer; Either a list of optimizers to compare,
or a single optimizer to test on each problem.
problems: list/Problem; Either a problem instance or a list of problem instances,
one for each optimizer.
all_kwargs: dict/list<dict>; Either the Optimizer.optimize keyword arguments
for all optimizers, or a list of keyword arguments, one for each optimizer.
runs: int; How many times to run each optimizer (smoothness)
Returns:
dict; mapping optimizer identifier to stats. | Below is the the instruction that describes the task:
### Input:
Compare a set of optimizers.
Args:
optimizers: list/Optimizer; Either a list of optimizers to compare,
or a single optimizer to test on each problem.
problems: list/Problem; Either a problem instance or a list of problem instances,
one for each optimizer.
all_kwargs: dict/list<dict>; Either the Optimizer.optimize keyword arguments
for all optimizers, or a list of keyword arguments, one for each optimizer.
runs: int; How many times to run each optimizer (smoothness)
Returns:
dict; mapping optimizer identifier to stats.
### Response:
def compare(optimizers, problems, runs=20, all_kwargs={}):
"""Compare a set of optimizers.
Args:
optimizers: list/Optimizer; Either a list of optimizers to compare,
or a single optimizer to test on each problem.
problems: list/Problem; Either a problem instance or a list of problem instances,
one for each optimizer.
all_kwargs: dict/list<dict>; Either the Optimizer.optimize keyword arguments
for all optimizers, or a list of keyword arguments, one for each optimizer.
runs: int; How many times to run each optimizer (smoothness)
Returns:
dict; mapping optimizer identifier to stats.
"""
if not (isinstance(optimizers, collections.Iterable)
or isinstance(problems, collections.Iterable)):
raise TypeError('optimizers or problems must be iterable')
# If optimizers is not a list, repeat into list for each problem
if not isinstance(optimizers, collections.Iterable):
optimizers = [copy.deepcopy(optimizers) for _ in range(len(problems))]
# If problems is not a list, repeat into list for each optimizer
if not isinstance(problems, collections.Iterable):
problems = [copy.deepcopy(problems) for _ in range(len(optimizers))]
# If all_kwargs is not a list, repeat it into a list
if isinstance(all_kwargs, dict):
all_kwargs = [all_kwargs] * len(optimizers)
elif not isinstance(all_kwargs, collections.Iterable):
raise TypeError('all_kwargs must be dict or list of dict')
stats = {}
key_counts = {}
for optimizer, problem, kwargs in zip(optimizers, problems, all_kwargs):
# For nice human readable dictionaries, extract useful names from
# optimizer
class_name = optimizer.__class__.__name__
fitness_func_name = problem._fitness_function.__name__
key_name = '{} {}'.format(class_name, fitness_func_name)
# Keep track of how many optimizers of each class / fitness func
# for better keys in stats dict
try:
key_counts[key_name] += 1
except KeyError:
key_counts[key_name] = 1
# Foo 1, Foo 2, Bar 1, etc.
key = '{} {}'.format(key_name, key_counts[key_name])
print key + ': ',
# Finally, get the actual stats
stats[key] = benchmark(optimizer, problem, runs=runs, **kwargs)
print
return stats |
def from_dict(cls, D, is_json=False):
'''This factory for :class:`Model`
takes either a native Python dictionary or a JSON dictionary/object
if ``is_json`` is ``True``. The dictionary passed does not need to
contain all of the values that the Model declares.
'''
instance = cls()
instance.set_data(D, is_json=is_json)
return instance | This factory for :class:`Model`
takes either a native Python dictionary or a JSON dictionary/object
if ``is_json`` is ``True``. The dictionary passed does not need to
contain all of the values that the Model declares. | Below is the the instruction that describes the task:
### Input:
This factory for :class:`Model`
takes either a native Python dictionary or a JSON dictionary/object
if ``is_json`` is ``True``. The dictionary passed does not need to
contain all of the values that the Model declares.
### Response:
def from_dict(cls, D, is_json=False):
'''This factory for :class:`Model`
takes either a native Python dictionary or a JSON dictionary/object
if ``is_json`` is ``True``. The dictionary passed does not need to
contain all of the values that the Model declares.
'''
instance = cls()
instance.set_data(D, is_json=is_json)
return instance |
def main():
""" Send data """
try:
args = get_config()
result = webpush(
args.sub_info,
data=args.data,
vapid_private_key=args.key,
vapid_claims=args.claims,
curl=args.curl,
content_encoding=args.encoding)
print(result)
except Exception as ex:
print("ERROR: {}".format(ex)) | Send data | Below is the the instruction that describes the task:
### Input:
Send data
### Response:
def main():
""" Send data """
try:
args = get_config()
result = webpush(
args.sub_info,
data=args.data,
vapid_private_key=args.key,
vapid_claims=args.claims,
curl=args.curl,
content_encoding=args.encoding)
print(result)
except Exception as ex:
print("ERROR: {}".format(ex)) |
def post_event(self, id, **data):
"""
POST /events/:id/
Updates an event. Returns an :format:`event` for the specified event. Does not support updating a repeating event
series parent (see POST /series/:id/).
"""
return self.post("/events/{0}/".format(id), data=data) | POST /events/:id/
Updates an event. Returns an :format:`event` for the specified event. Does not support updating a repeating event
series parent (see POST /series/:id/). | Below is the the instruction that describes the task:
### Input:
POST /events/:id/
Updates an event. Returns an :format:`event` for the specified event. Does not support updating a repeating event
series parent (see POST /series/:id/).
### Response:
def post_event(self, id, **data):
"""
POST /events/:id/
Updates an event. Returns an :format:`event` for the specified event. Does not support updating a repeating event
series parent (see POST /series/:id/).
"""
return self.post("/events/{0}/".format(id), data=data) |
def visitIriRange(self, ctx: ShExDocParser.IriRangeContext):
""" iriRange: iri (STEM_MARK iriExclusion*)? """
baseiri = self.context.iri_to_iriref(ctx.iri())
if not ctx.STEM_MARK():
vsvalue = baseiri # valueSetValue = objectValue / objectValue = IRI
else:
if ctx.iriExclusion(): # valueSetValue = IriStemRange / iriStemRange = stem + exclusions
vsvalue = IriStemRange(baseiri, exclusions=[])
self._iri_exclusions(vsvalue, ctx.iriExclusion())
else:
vsvalue = IriStem(baseiri) # valueSetValue = IriStem / IriStem: {stem:IRI}
self.nodeconstraint.values.append(vsvalue) | iriRange: iri (STEM_MARK iriExclusion*)? | Below is the the instruction that describes the task:
### Input:
iriRange: iri (STEM_MARK iriExclusion*)?
### Response:
def visitIriRange(self, ctx: ShExDocParser.IriRangeContext):
""" iriRange: iri (STEM_MARK iriExclusion*)? """
baseiri = self.context.iri_to_iriref(ctx.iri())
if not ctx.STEM_MARK():
vsvalue = baseiri # valueSetValue = objectValue / objectValue = IRI
else:
if ctx.iriExclusion(): # valueSetValue = IriStemRange / iriStemRange = stem + exclusions
vsvalue = IriStemRange(baseiri, exclusions=[])
self._iri_exclusions(vsvalue, ctx.iriExclusion())
else:
vsvalue = IriStem(baseiri) # valueSetValue = IriStem / IriStem: {stem:IRI}
self.nodeconstraint.values.append(vsvalue) |
def getATR(reader):
"""Return the ATR of the card inserted into the reader."""
connection = reader.createConnection()
atr = ""
try:
connection.connect()
atr = smartcard.util.toHexString(connection.getATR())
connection.disconnect()
except smartcard.Exceptions.NoCardException:
atr = "no card inserted"
return atr | Return the ATR of the card inserted into the reader. | Below is the the instruction that describes the task:
### Input:
Return the ATR of the card inserted into the reader.
### Response:
def getATR(reader):
"""Return the ATR of the card inserted into the reader."""
connection = reader.createConnection()
atr = ""
try:
connection.connect()
atr = smartcard.util.toHexString(connection.getATR())
connection.disconnect()
except smartcard.Exceptions.NoCardException:
atr = "no card inserted"
return atr |
def create_case_observable(self, case_id, case_observable):
"""
:param case_id: Case identifier
:param case_observable: TheHive observable
:type case_observable: CaseObservable defined in models.py
:return: TheHive observable
:rtype: json
"""
req = self.url + "/api/case/{}/artifact".format(case_id)
if case_observable.dataType == 'file':
try:
mesg = json.dumps({ "dataType": case_observable.dataType,
"message": case_observable.message,
"tlp": case_observable.tlp,
"tags": case_observable.tags,
"ioc": case_observable.ioc
})
data = {"_json": mesg}
return requests.post(req, data=data, files=case_observable.data[0], proxies=self.proxies, auth=self.auth, verify=self.cert)
except requests.exceptions.RequestException as e:
raise CaseObservableException("Case observable create error: {}".format(e))
else:
try:
return requests.post(req, headers={'Content-Type': 'application/json'}, data=case_observable.jsonify(), proxies=self.proxies, auth=self.auth, verify=self.cert)
except requests.exceptions.RequestException as e:
raise CaseObservableException("Case observable create error: {}".format(e)) | :param case_id: Case identifier
:param case_observable: TheHive observable
:type case_observable: CaseObservable defined in models.py
:return: TheHive observable
:rtype: json | Below is the the instruction that describes the task:
### Input:
:param case_id: Case identifier
:param case_observable: TheHive observable
:type case_observable: CaseObservable defined in models.py
:return: TheHive observable
:rtype: json
### Response:
def create_case_observable(self, case_id, case_observable):
"""
:param case_id: Case identifier
:param case_observable: TheHive observable
:type case_observable: CaseObservable defined in models.py
:return: TheHive observable
:rtype: json
"""
req = self.url + "/api/case/{}/artifact".format(case_id)
if case_observable.dataType == 'file':
try:
mesg = json.dumps({ "dataType": case_observable.dataType,
"message": case_observable.message,
"tlp": case_observable.tlp,
"tags": case_observable.tags,
"ioc": case_observable.ioc
})
data = {"_json": mesg}
return requests.post(req, data=data, files=case_observable.data[0], proxies=self.proxies, auth=self.auth, verify=self.cert)
except requests.exceptions.RequestException as e:
raise CaseObservableException("Case observable create error: {}".format(e))
else:
try:
return requests.post(req, headers={'Content-Type': 'application/json'}, data=case_observable.jsonify(), proxies=self.proxies, auth=self.auth, verify=self.cert)
except requests.exceptions.RequestException as e:
raise CaseObservableException("Case observable create error: {}".format(e)) |
def combinations(iterable, r):
"""Calculate combinations
>>> list(combinations('ABCD',2))
[['A', 'B'], ['A', 'C'], ['A', 'D'], ['B', 'C'], ['B', 'D'], ['C', 'D']]
>>> list(combinations(range(4), 3))
[[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]
Args:
iterable: Any iterable object.
r: Size of combination.
Yields:
list: Combination of size r.
"""
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield list(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i + 1, r):
indices[j] = indices[j - 1] + 1
yield list(pool[i] for i in indices) | Calculate combinations
>>> list(combinations('ABCD',2))
[['A', 'B'], ['A', 'C'], ['A', 'D'], ['B', 'C'], ['B', 'D'], ['C', 'D']]
>>> list(combinations(range(4), 3))
[[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]
Args:
iterable: Any iterable object.
r: Size of combination.
Yields:
list: Combination of size r. | Below is the the instruction that describes the task:
### Input:
Calculate combinations
>>> list(combinations('ABCD',2))
[['A', 'B'], ['A', 'C'], ['A', 'D'], ['B', 'C'], ['B', 'D'], ['C', 'D']]
>>> list(combinations(range(4), 3))
[[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]
Args:
iterable: Any iterable object.
r: Size of combination.
Yields:
list: Combination of size r.
### Response:
def combinations(iterable, r):
"""Calculate combinations
>>> list(combinations('ABCD',2))
[['A', 'B'], ['A', 'C'], ['A', 'D'], ['B', 'C'], ['B', 'D'], ['C', 'D']]
>>> list(combinations(range(4), 3))
[[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]
Args:
iterable: Any iterable object.
r: Size of combination.
Yields:
list: Combination of size r.
"""
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield list(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i + 1, r):
indices[j] = indices[j - 1] + 1
yield list(pool[i] for i in indices) |
def emit(self, item, defaults=None, stencil=None, to_log=False, item_formatter=None):
""" Print an item to stdout, or the log on INFO level.
"""
item_text = self.format_item(item, defaults, stencil)
# Post-process line?
if item_formatter:
item_text = item_formatter(item_text)
# For a header, use configured escape codes on a terminal
if item is None and os.isatty(sys.stdout.fileno()):
item_text = ''.join((config.output_header_ecma48, item_text, "\x1B[0m"))
# Dump to selected target
if to_log:
if callable(to_log):
to_log(item_text)
else:
self.LOG.info(item_text)
elif self.options.nul:
sys.stdout.write(item_text + '\0')
sys.stdout.flush()
else:
print(item_text)
return item_text.count('\n') + 1 | Print an item to stdout, or the log on INFO level. | Below is the the instruction that describes the task:
### Input:
Print an item to stdout, or the log on INFO level.
### Response:
def emit(self, item, defaults=None, stencil=None, to_log=False, item_formatter=None):
""" Print an item to stdout, or the log on INFO level.
"""
item_text = self.format_item(item, defaults, stencil)
# Post-process line?
if item_formatter:
item_text = item_formatter(item_text)
# For a header, use configured escape codes on a terminal
if item is None and os.isatty(sys.stdout.fileno()):
item_text = ''.join((config.output_header_ecma48, item_text, "\x1B[0m"))
# Dump to selected target
if to_log:
if callable(to_log):
to_log(item_text)
else:
self.LOG.info(item_text)
elif self.options.nul:
sys.stdout.write(item_text + '\0')
sys.stdout.flush()
else:
print(item_text)
return item_text.count('\n') + 1 |
def patch_cluster_custom_object_scale(self, group, version, plural, name, body, **kwargs): # noqa: E501
"""patch_cluster_custom_object_scale # noqa: E501
partially update scale of the specified cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_custom_object_scale(group, version, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_cluster_custom_object_scale_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
else:
(data) = self.patch_cluster_custom_object_scale_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
return data | patch_cluster_custom_object_scale # noqa: E501
partially update scale of the specified cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_custom_object_scale(group, version, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
patch_cluster_custom_object_scale # noqa: E501
partially update scale of the specified cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_custom_object_scale(group, version, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
### Response:
def patch_cluster_custom_object_scale(self, group, version, plural, name, body, **kwargs): # noqa: E501
"""patch_cluster_custom_object_scale # noqa: E501
partially update scale of the specified cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_custom_object_scale(group, version, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_cluster_custom_object_scale_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
else:
(data) = self.patch_cluster_custom_object_scale_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
return data |
def set(self, stats, value):
"""
Log set
>>> client = StatsdClient()
>>> client.set('example.set', "set")
>>> client.set(('example.set61', 'example.set67'), "2701")
"""
self.update_stats(stats, value, self.SC_SET) | Log set
>>> client = StatsdClient()
>>> client.set('example.set', "set")
>>> client.set(('example.set61', 'example.set67'), "2701") | Below is the the instruction that describes the task:
### Input:
Log set
>>> client = StatsdClient()
>>> client.set('example.set', "set")
>>> client.set(('example.set61', 'example.set67'), "2701")
### Response:
def set(self, stats, value):
"""
Log set
>>> client = StatsdClient()
>>> client.set('example.set', "set")
>>> client.set(('example.set61', 'example.set67'), "2701")
"""
self.update_stats(stats, value, self.SC_SET) |
def connected(self, *, presence=structs.PresenceState(False), **kwargs):
"""
Return a :class:`.node.UseConnected` context manager which does not
modify the presence settings.
The keyword arguments are passed to the :class:`.node.UseConnected`
context manager constructor.
.. versionadded:: 0.8
"""
return UseConnected(self, presence=presence, **kwargs) | Return a :class:`.node.UseConnected` context manager which does not
modify the presence settings.
The keyword arguments are passed to the :class:`.node.UseConnected`
context manager constructor.
.. versionadded:: 0.8 | Below is the the instruction that describes the task:
### Input:
Return a :class:`.node.UseConnected` context manager which does not
modify the presence settings.
The keyword arguments are passed to the :class:`.node.UseConnected`
context manager constructor.
.. versionadded:: 0.8
### Response:
def connected(self, *, presence=structs.PresenceState(False), **kwargs):
"""
Return a :class:`.node.UseConnected` context manager which does not
modify the presence settings.
The keyword arguments are passed to the :class:`.node.UseConnected`
context manager constructor.
.. versionadded:: 0.8
"""
return UseConnected(self, presence=presence, **kwargs) |
def process(self, article):
"""
Ingests an Article to create navigation structures and parse global
metadata.
"""
if self.article is not None and not self.collection:
log.warning('Could not process additional article. Navigation only \
handles one article unless collection mode is set.')
return False
if article.publisher is None:
log.error('''Navigation cannot be generated for an Article \
without a publisher!''')
return
self.article = article
self.article_doi = self.article.doi.split('/')[1]
self.all_dois.append(self.article.doi)
if self.collection:
pass
else:
self.title = self.article.publisher.nav_title()
for author in self.article.publisher.nav_contributors():
self.contributors.add(author)
#Analyze the structure of the article to create internal mapping
self.map_navigation() | Ingests an Article to create navigation structures and parse global
metadata. | Below is the the instruction that describes the task:
### Input:
Ingests an Article to create navigation structures and parse global
metadata.
### Response:
def process(self, article):
"""
Ingests an Article to create navigation structures and parse global
metadata.
"""
if self.article is not None and not self.collection:
log.warning('Could not process additional article. Navigation only \
handles one article unless collection mode is set.')
return False
if article.publisher is None:
log.error('''Navigation cannot be generated for an Article \
without a publisher!''')
return
self.article = article
self.article_doi = self.article.doi.split('/')[1]
self.all_dois.append(self.article.doi)
if self.collection:
pass
else:
self.title = self.article.publisher.nav_title()
for author in self.article.publisher.nav_contributors():
self.contributors.add(author)
#Analyze the structure of the article to create internal mapping
self.map_navigation() |
def __place_order(self, token):
"""
Use the struts token to place the order.
Parameters
----------
token : string
The struts token received from the place order page
Returns
-------
int
The completed order ID.
"""
order_id = 0
response = None
if not token or token['value'] == '':
raise LendingClubError('The token parameter is False, None or unknown.')
# Process order confirmation page
try:
# Place the order
payload = {}
if token:
payload['struts.token.name'] = token['name']
payload[token['name']] = token['value']
response = self.lc.session.post('/portfolio/orderConfirmed.action', data=payload)
# Process HTML for the order ID
html = response.text
soup = BeautifulSoup(html, 'html5lib')
# Order num
order_field = soup.find(id='order_id')
if order_field:
order_id = int(order_field['value'])
# Did not find an ID
if order_id == 0:
self.__log('An investment order was submitted, but a confirmation ID could not be determined')
raise LendingClubError('No order ID was found when placing the order.', response)
else:
return order_id
except Exception as e:
raise LendingClubError('Could not place the order: {0}'.format(str(e)), response) | Use the struts token to place the order.
Parameters
----------
token : string
The struts token received from the place order page
Returns
-------
int
The completed order ID. | Below is the the instruction that describes the task:
### Input:
Use the struts token to place the order.
Parameters
----------
token : string
The struts token received from the place order page
Returns
-------
int
The completed order ID.
### Response:
def __place_order(self, token):
"""
Use the struts token to place the order.
Parameters
----------
token : string
The struts token received from the place order page
Returns
-------
int
The completed order ID.
"""
order_id = 0
response = None
if not token or token['value'] == '':
raise LendingClubError('The token parameter is False, None or unknown.')
# Process order confirmation page
try:
# Place the order
payload = {}
if token:
payload['struts.token.name'] = token['name']
payload[token['name']] = token['value']
response = self.lc.session.post('/portfolio/orderConfirmed.action', data=payload)
# Process HTML for the order ID
html = response.text
soup = BeautifulSoup(html, 'html5lib')
# Order num
order_field = soup.find(id='order_id')
if order_field:
order_id = int(order_field['value'])
# Did not find an ID
if order_id == 0:
self.__log('An investment order was submitted, but a confirmation ID could not be determined')
raise LendingClubError('No order ID was found when placing the order.', response)
else:
return order_id
except Exception as e:
raise LendingClubError('Could not place the order: {0}'.format(str(e)), response) |
def post_data(self, path, data, content_type, **params):
"""
Make a POST request to the given path, with `data` in its body.
Return the JSON-decoded result.
The content_type must be set to reflect the kind of data being sent,
which is often `application/json`.
Keyword parameters will be converted to URL parameters. This is unlike
other POST requests which encode those parameters in the body, because
the body is already being used.
This is used by the Luminoso API to upload new documents in JSON
format.
"""
params = jsonify_parameters(params)
url = ensure_trailing_slash(self.url + path.lstrip('/'))
return self._json_request('post', url,
params=params,
data=data,
headers={'Content-Type': content_type}
) | Make a POST request to the given path, with `data` in its body.
Return the JSON-decoded result.
The content_type must be set to reflect the kind of data being sent,
which is often `application/json`.
Keyword parameters will be converted to URL parameters. This is unlike
other POST requests which encode those parameters in the body, because
the body is already being used.
This is used by the Luminoso API to upload new documents in JSON
format. | Below is the the instruction that describes the task:
### Input:
Make a POST request to the given path, with `data` in its body.
Return the JSON-decoded result.
The content_type must be set to reflect the kind of data being sent,
which is often `application/json`.
Keyword parameters will be converted to URL parameters. This is unlike
other POST requests which encode those parameters in the body, because
the body is already being used.
This is used by the Luminoso API to upload new documents in JSON
format.
### Response:
def post_data(self, path, data, content_type, **params):
"""
Make a POST request to the given path, with `data` in its body.
Return the JSON-decoded result.
The content_type must be set to reflect the kind of data being sent,
which is often `application/json`.
Keyword parameters will be converted to URL parameters. This is unlike
other POST requests which encode those parameters in the body, because
the body is already being used.
This is used by the Luminoso API to upload new documents in JSON
format.
"""
params = jsonify_parameters(params)
url = ensure_trailing_slash(self.url + path.lstrip('/'))
return self._json_request('post', url,
params=params,
data=data,
headers={'Content-Type': content_type}
) |
def object_to_dict(cls, obj):
"""
This function converts Objects into Dictionary
"""
dict_obj = dict()
if obj is not None:
if type(obj) == list:
dict_list = []
for inst in obj:
dict_list.append(cls.object_to_dict(inst))
dict_obj["list"] = dict_list
elif not cls.is_primitive(obj):
for key in obj.__dict__:
# is an object
if type(obj.__dict__[key]) == list:
dict_list = []
for inst in obj.__dict__[key]:
dict_list.append(cls.object_to_dict(inst))
dict_obj[key] = dict_list
elif not cls.is_primitive(obj.__dict__[key]):
temp_dict = cls.object_to_dict(obj.__dict__[key])
dict_obj[key] = temp_dict
else:
dict_obj[key] = obj.__dict__[key]
elif cls.is_primitive(obj):
return obj
return dict_obj | This function converts Objects into Dictionary | Below is the the instruction that describes the task:
### Input:
This function converts Objects into Dictionary
### Response:
def object_to_dict(cls, obj):
"""
This function converts Objects into Dictionary
"""
dict_obj = dict()
if obj is not None:
if type(obj) == list:
dict_list = []
for inst in obj:
dict_list.append(cls.object_to_dict(inst))
dict_obj["list"] = dict_list
elif not cls.is_primitive(obj):
for key in obj.__dict__:
# is an object
if type(obj.__dict__[key]) == list:
dict_list = []
for inst in obj.__dict__[key]:
dict_list.append(cls.object_to_dict(inst))
dict_obj[key] = dict_list
elif not cls.is_primitive(obj.__dict__[key]):
temp_dict = cls.object_to_dict(obj.__dict__[key])
dict_obj[key] = temp_dict
else:
dict_obj[key] = obj.__dict__[key]
elif cls.is_primitive(obj):
return obj
return dict_obj |
def draw(self, can=None):
"Draw the charts."
if can == None:
can = canvas.default_canvas()
assert self.check_integrity()
for plot in self.__plots:
plot.check_integrity()
self.x_range, self.x_grid_interval = \
self.__get_data_range(self.x_range, 'X',
self.x_coord,
self.x_grid_interval)
self.y_range, self.y_grid_interval = \
self.__get_data_range(self.y_range, 'Y',
self.y_coord,
self.y_grid_interval)
can.rectangle(self.border_line_style, self.bg_style,
self.loc[0], self.loc[1],
self.loc[0] + self.size[0], self.loc[1] + self.size[1])
if not self.x_grid_over_plot:
self.__draw_x_grid_and_axis(can)
if not self.y_grid_over_plot:
self.__draw_y_grid_and_axis(can)
clipbox = theme.adjust_bounding_box([self.loc[0], self.loc[1],
self.loc[0] + self.size[0],
self.loc[1] + self.size[1]])
can.clip(clipbox[0], clipbox[1],
clipbox[2], clipbox[3])
for plot in self.__plots:
plot.draw(self, can)
can.endclip()
if self.x_grid_over_plot:
self.__draw_x_grid_and_axis(can)
if self.y_grid_over_plot:
self.__draw_y_grid_and_axis(can)
if self.legend == _dummy_legend:
self.legend = legend.T()
if self.legend:
legends = []
for plot in self.__plots:
entry = plot.get_legend_entry()
if entry == None:
pass
elif type(entry) != ListType:
legends.append(entry)
else:
for e in entry:
legends.append(e)
self.legend.draw(self, legends, can) | Draw the charts. | Below is the the instruction that describes the task:
### Input:
Draw the charts.
### Response:
def draw(self, can=None):
"Draw the charts."
if can == None:
can = canvas.default_canvas()
assert self.check_integrity()
for plot in self.__plots:
plot.check_integrity()
self.x_range, self.x_grid_interval = \
self.__get_data_range(self.x_range, 'X',
self.x_coord,
self.x_grid_interval)
self.y_range, self.y_grid_interval = \
self.__get_data_range(self.y_range, 'Y',
self.y_coord,
self.y_grid_interval)
can.rectangle(self.border_line_style, self.bg_style,
self.loc[0], self.loc[1],
self.loc[0] + self.size[0], self.loc[1] + self.size[1])
if not self.x_grid_over_plot:
self.__draw_x_grid_and_axis(can)
if not self.y_grid_over_plot:
self.__draw_y_grid_and_axis(can)
clipbox = theme.adjust_bounding_box([self.loc[0], self.loc[1],
self.loc[0] + self.size[0],
self.loc[1] + self.size[1]])
can.clip(clipbox[0], clipbox[1],
clipbox[2], clipbox[3])
for plot in self.__plots:
plot.draw(self, can)
can.endclip()
if self.x_grid_over_plot:
self.__draw_x_grid_and_axis(can)
if self.y_grid_over_plot:
self.__draw_y_grid_and_axis(can)
if self.legend == _dummy_legend:
self.legend = legend.T()
if self.legend:
legends = []
for plot in self.__plots:
entry = plot.get_legend_entry()
if entry == None:
pass
elif type(entry) != ListType:
legends.append(entry)
else:
for e in entry:
legends.append(e)
self.legend.draw(self, legends, can) |
def std_filter(array, n_std=2.0, return_index=False):
"""Standard deviation outlier detector.
:param array: array of data.
:param n_std: default 2.0, exclude data out of ``n_std`` standard deviation.
:param return_index: boolean, default False, if True, only returns index.
"""
if not isinstance(array, np.ndarray):
array = np.array(array)
mean, std = array.mean(), array.std()
good_index = np.where(abs(array - mean) <= n_std * std)
bad_index = np.where(abs(array - mean) > n_std * std)
if return_index:
return good_index[0], bad_index[0]
else:
return array[good_index], array[bad_index] | Standard deviation outlier detector.
:param array: array of data.
:param n_std: default 2.0, exclude data out of ``n_std`` standard deviation.
:param return_index: boolean, default False, if True, only returns index. | Below is the the instruction that describes the task:
### Input:
Standard deviation outlier detector.
:param array: array of data.
:param n_std: default 2.0, exclude data out of ``n_std`` standard deviation.
:param return_index: boolean, default False, if True, only returns index.
### Response:
def std_filter(array, n_std=2.0, return_index=False):
"""Standard deviation outlier detector.
:param array: array of data.
:param n_std: default 2.0, exclude data out of ``n_std`` standard deviation.
:param return_index: boolean, default False, if True, only returns index.
"""
if not isinstance(array, np.ndarray):
array = np.array(array)
mean, std = array.mean(), array.std()
good_index = np.where(abs(array - mean) <= n_std * std)
bad_index = np.where(abs(array - mean) > n_std * std)
if return_index:
return good_index[0], bad_index[0]
else:
return array[good_index], array[bad_index] |
def exit(self, code=None, message=None, perfdata=None, extdata=None):
"""
manual exit from the plugin
arguments:
code: exit status code
message: a short, one-line message to display
perfdata: perfdata, if any
extdata: multi-line message to give more details
"""
code = UNKNOWN if code is None else int(code)
message = "" if message is None else str(message)
perfdata = "" if perfdata is None else str(perfdata)
extdata = "" if extdata is None else str(extdata)
print("{0} {1} - {2} | {3}".format(self.name.upper(),
_CODES_STR[code],
message, perfdata))
if extdata:
print(extdata)
sys.exit(code) | manual exit from the plugin
arguments:
code: exit status code
message: a short, one-line message to display
perfdata: perfdata, if any
extdata: multi-line message to give more details | Below is the the instruction that describes the task:
### Input:
manual exit from the plugin
arguments:
code: exit status code
message: a short, one-line message to display
perfdata: perfdata, if any
extdata: multi-line message to give more details
### Response:
def exit(self, code=None, message=None, perfdata=None, extdata=None):
"""
manual exit from the plugin
arguments:
code: exit status code
message: a short, one-line message to display
perfdata: perfdata, if any
extdata: multi-line message to give more details
"""
code = UNKNOWN if code is None else int(code)
message = "" if message is None else str(message)
perfdata = "" if perfdata is None else str(perfdata)
extdata = "" if extdata is None else str(extdata)
print("{0} {1} - {2} | {3}".format(self.name.upper(),
_CODES_STR[code],
message, perfdata))
if extdata:
print(extdata)
sys.exit(code) |
def hashify_targets(targets: list, build_context) -> list:
"""Return sorted hashes of `targets`."""
return sorted(build_context.targets[target_name].hash(build_context)
for target_name in listify(targets)) | Return sorted hashes of `targets`. | Below is the the instruction that describes the task:
### Input:
Return sorted hashes of `targets`.
### Response:
def hashify_targets(targets: list, build_context) -> list:
"""Return sorted hashes of `targets`."""
return sorted(build_context.targets[target_name].hash(build_context)
for target_name in listify(targets)) |
def _child_inst_names(self) -> Set[InstanceName]:
"""Return the set of instance names under the receiver."""
return frozenset([c.iname() for c in self.data_children()]) | Return the set of instance names under the receiver. | Below is the the instruction that describes the task:
### Input:
Return the set of instance names under the receiver.
### Response:
def _child_inst_names(self) -> Set[InstanceName]:
"""Return the set of instance names under the receiver."""
return frozenset([c.iname() for c in self.data_children()]) |
def execute(self, command):
"""Start a new MIP run."""
process = subprocess.Popen(
command,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
)
return process | Start a new MIP run. | Below is the the instruction that describes the task:
### Input:
Start a new MIP run.
### Response:
def execute(self, command):
"""Start a new MIP run."""
process = subprocess.Popen(
command,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
)
return process |
def recommend_get(self, adgroup_id, **kwargs):
'''xxxxx.xxxxx.keywords.recommend.get
===================================
取得一个推广组的推荐关键词列表'''
request = TOPRequest('xxxxx.xxxxx.keywords.recommend.get')
request['adgroup_id'] = adgroup_id
for k, v in kwargs.iteritems():
if k not in ('nick', 'order_by', 'search', 'pertinence', 'page_size', 'page_no') and v==None: continue
request[k] = v
self.create(self.execute(request), models = {'result':RecommendWordPage})
return self.result | xxxxx.xxxxx.keywords.recommend.get
===================================
取得一个推广组的推荐关键词列表 | Below is the the instruction that describes the task:
### Input:
xxxxx.xxxxx.keywords.recommend.get
===================================
取得一个推广组的推荐关键词列表
### Response:
def recommend_get(self, adgroup_id, **kwargs):
'''xxxxx.xxxxx.keywords.recommend.get
===================================
取得一个推广组的推荐关键词列表'''
request = TOPRequest('xxxxx.xxxxx.keywords.recommend.get')
request['adgroup_id'] = adgroup_id
for k, v in kwargs.iteritems():
if k not in ('nick', 'order_by', 'search', 'pertinence', 'page_size', 'page_no') and v==None: continue
request[k] = v
self.create(self.execute(request), models = {'result':RecommendWordPage})
return self.result |
def deploy_from_image(self, context, deploy_action, cancellation_context):
"""
Deploy From Image Command, will deploy vm from ovf image
:param CancellationContext cancellation_context:
:param ResourceCommandContext context: the context of the command
:param DeployApp deploy_action:
:return str deploy results
"""
deploy_action.actionParams.deployment.attributes['vCenter Name'] = context.resource.name
deploy_from_image_model = self.resource_model_parser.convert_to_resource_model(
attributes=deploy_action.actionParams.deployment.attributes,
resource_model_type=vCenterVMFromImageResourceModel)
data_holder = DeployFromImageDetails(deploy_from_image_model, deploy_action.actionParams.appName)
# execute command
deploy_result_action = self.command_wrapper.execute_command_with_connection(
context,
self.deploy_command.execute_deploy_from_image,
data_holder,
context.resource,
cancellation_context,
self.folder_manager)
deploy_result_action.actionId = deploy_action.actionId
return deploy_result_action | Deploy From Image Command, will deploy vm from ovf image
:param CancellationContext cancellation_context:
:param ResourceCommandContext context: the context of the command
:param DeployApp deploy_action:
:return str deploy results | Below is the the instruction that describes the task:
### Input:
Deploy From Image Command, will deploy vm from ovf image
:param CancellationContext cancellation_context:
:param ResourceCommandContext context: the context of the command
:param DeployApp deploy_action:
:return str deploy results
### Response:
def deploy_from_image(self, context, deploy_action, cancellation_context):
"""
Deploy From Image Command, will deploy vm from ovf image
:param CancellationContext cancellation_context:
:param ResourceCommandContext context: the context of the command
:param DeployApp deploy_action:
:return str deploy results
"""
deploy_action.actionParams.deployment.attributes['vCenter Name'] = context.resource.name
deploy_from_image_model = self.resource_model_parser.convert_to_resource_model(
attributes=deploy_action.actionParams.deployment.attributes,
resource_model_type=vCenterVMFromImageResourceModel)
data_holder = DeployFromImageDetails(deploy_from_image_model, deploy_action.actionParams.appName)
# execute command
deploy_result_action = self.command_wrapper.execute_command_with_connection(
context,
self.deploy_command.execute_deploy_from_image,
data_holder,
context.resource,
cancellation_context,
self.folder_manager)
deploy_result_action.actionId = deploy_action.actionId
return deploy_result_action |
def walk(
self,
path="/", # type: Text
namespaces=None, # type: Optional[Collection[Text]]
**kwargs # type: Any
):
# type: (...) -> Iterator[Step]
"""Walk the directory structure of a filesystem.
Arguments:
path (str):
namespaces (list, optional): A list of namespaces to include
in the resource information, e.g. ``['basic', 'access']``
(defaults to ``['basic']``).
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter (list): If supplied, this parameter should be a list
of file name patterns, e.g. ``['*.py']``. Files will only be
returned if the final component matches one of the
patterns.
exclude (list, optional): If supplied, this parameter should be
a list of filename patterns, e.g. ``['~*', '.*']``. Files matching
any of these patterns will be removed from the walk.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: an iterator of ``(<path>, <dirs>, <files>)``
named tuples, where ``<path>`` is an absolute path to a
directory, and ``<dirs>`` and ``<files>`` are a list of
`~fs.info.Info` objects for directories and files in ``<path>``.
Example:
>>> home_fs = open_fs('~/')
>>> walker = Walker(filter=['*.py'])
>>> for path, dirs, files in walker.walk(home_fs, namespaces=['details']):
... print("[{}]".format(path))
... print("{} directories".format(len(dirs)))
... total = sum(info.size for info in files)
... print("{} bytes {}".format(total))
This method invokes `Walker.walk` with bound `FS` object.
"""
walker = self._make_walker(**kwargs)
return walker.walk(self.fs, path=path, namespaces=namespaces) | Walk the directory structure of a filesystem.
Arguments:
path (str):
namespaces (list, optional): A list of namespaces to include
in the resource information, e.g. ``['basic', 'access']``
(defaults to ``['basic']``).
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter (list): If supplied, this parameter should be a list
of file name patterns, e.g. ``['*.py']``. Files will only be
returned if the final component matches one of the
patterns.
exclude (list, optional): If supplied, this parameter should be
a list of filename patterns, e.g. ``['~*', '.*']``. Files matching
any of these patterns will be removed from the walk.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: an iterator of ``(<path>, <dirs>, <files>)``
named tuples, where ``<path>`` is an absolute path to a
directory, and ``<dirs>`` and ``<files>`` are a list of
`~fs.info.Info` objects for directories and files in ``<path>``.
Example:
>>> home_fs = open_fs('~/')
>>> walker = Walker(filter=['*.py'])
>>> for path, dirs, files in walker.walk(home_fs, namespaces=['details']):
... print("[{}]".format(path))
... print("{} directories".format(len(dirs)))
... total = sum(info.size for info in files)
... print("{} bytes {}".format(total))
This method invokes `Walker.walk` with bound `FS` object. | Below is the the instruction that describes the task:
### Input:
Walk the directory structure of a filesystem.
Arguments:
path (str):
namespaces (list, optional): A list of namespaces to include
in the resource information, e.g. ``['basic', 'access']``
(defaults to ``['basic']``).
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter (list): If supplied, this parameter should be a list
of file name patterns, e.g. ``['*.py']``. Files will only be
returned if the final component matches one of the
patterns.
exclude (list, optional): If supplied, this parameter should be
a list of filename patterns, e.g. ``['~*', '.*']``. Files matching
any of these patterns will be removed from the walk.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: an iterator of ``(<path>, <dirs>, <files>)``
named tuples, where ``<path>`` is an absolute path to a
directory, and ``<dirs>`` and ``<files>`` are a list of
`~fs.info.Info` objects for directories and files in ``<path>``.
Example:
>>> home_fs = open_fs('~/')
>>> walker = Walker(filter=['*.py'])
>>> for path, dirs, files in walker.walk(home_fs, namespaces=['details']):
... print("[{}]".format(path))
... print("{} directories".format(len(dirs)))
... total = sum(info.size for info in files)
... print("{} bytes {}".format(total))
This method invokes `Walker.walk` with bound `FS` object.
### Response:
def walk(
self,
path="/", # type: Text
namespaces=None, # type: Optional[Collection[Text]]
**kwargs # type: Any
):
# type: (...) -> Iterator[Step]
"""Walk the directory structure of a filesystem.
Arguments:
path (str):
namespaces (list, optional): A list of namespaces to include
in the resource information, e.g. ``['basic', 'access']``
(defaults to ``['basic']``).
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter (list): If supplied, this parameter should be a list
of file name patterns, e.g. ``['*.py']``. Files will only be
returned if the final component matches one of the
patterns.
exclude (list, optional): If supplied, this parameter should be
a list of filename patterns, e.g. ``['~*', '.*']``. Files matching
any of these patterns will be removed from the walk.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: an iterator of ``(<path>, <dirs>, <files>)``
named tuples, where ``<path>`` is an absolute path to a
directory, and ``<dirs>`` and ``<files>`` are a list of
`~fs.info.Info` objects for directories and files in ``<path>``.
Example:
>>> home_fs = open_fs('~/')
>>> walker = Walker(filter=['*.py'])
>>> for path, dirs, files in walker.walk(home_fs, namespaces=['details']):
... print("[{}]".format(path))
... print("{} directories".format(len(dirs)))
... total = sum(info.size for info in files)
... print("{} bytes {}".format(total))
This method invokes `Walker.walk` with bound `FS` object.
"""
walker = self._make_walker(**kwargs)
return walker.walk(self.fs, path=path, namespaces=namespaces) |
def put(self, src, dst):
'''Upload a file to HDFS
This will take a file from the ``testfiles_path`` supplied in the constuctor.
'''
src = "%s%s" % (self._testfiles_path, src)
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-put', src, self._full_hdfs_path(dst)], True) | Upload a file to HDFS
This will take a file from the ``testfiles_path`` supplied in the constuctor. | Below is the the instruction that describes the task:
### Input:
Upload a file to HDFS
This will take a file from the ``testfiles_path`` supplied in the constuctor.
### Response:
def put(self, src, dst):
'''Upload a file to HDFS
This will take a file from the ``testfiles_path`` supplied in the constuctor.
'''
src = "%s%s" % (self._testfiles_path, src)
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-put', src, self._full_hdfs_path(dst)], True) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.