code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
async def serialize_properties(inputs: 'Inputs',
property_deps: Dict[str, List['Resource']],
input_transformer: Optional[Callable[[str], str]] = None) -> struct_pb2.Struct:
"""
Serializes an arbitrary Input bag into a Protobuf structure, keeping track of the list
of dependent resources in the `deps` list. Serializing properties is inherently async
because it awaits any futures that are contained transitively within the input bag.
"""
struct = struct_pb2.Struct()
for k, v in inputs.items():
deps = []
result = await serialize_property(v, deps, input_transformer)
# We treat properties that serialize to None as if they don't exist.
if result is not None:
# While serializing to a pb struct, we must "translate" all key names to be what the engine is going to
# expect. Resources provide the "transform" function for doing this.
translated_name = k
if input_transformer is not None:
translated_name = input_transformer(k)
log.debug(f"top-level input property translated: {k} -> {translated_name}")
# pylint: disable=unsupported-assignment-operation
struct[translated_name] = result
property_deps[translated_name] = deps
return struct | Serializes an arbitrary Input bag into a Protobuf structure, keeping track of the list
of dependent resources in the `deps` list. Serializing properties is inherently async
because it awaits any futures that are contained transitively within the input bag. | Below is the the instruction that describes the task:
### Input:
Serializes an arbitrary Input bag into a Protobuf structure, keeping track of the list
of dependent resources in the `deps` list. Serializing properties is inherently async
because it awaits any futures that are contained transitively within the input bag.
### Response:
async def serialize_properties(inputs: 'Inputs',
property_deps: Dict[str, List['Resource']],
input_transformer: Optional[Callable[[str], str]] = None) -> struct_pb2.Struct:
"""
Serializes an arbitrary Input bag into a Protobuf structure, keeping track of the list
of dependent resources in the `deps` list. Serializing properties is inherently async
because it awaits any futures that are contained transitively within the input bag.
"""
struct = struct_pb2.Struct()
for k, v in inputs.items():
deps = []
result = await serialize_property(v, deps, input_transformer)
# We treat properties that serialize to None as if they don't exist.
if result is not None:
# While serializing to a pb struct, we must "translate" all key names to be what the engine is going to
# expect. Resources provide the "transform" function for doing this.
translated_name = k
if input_transformer is not None:
translated_name = input_transformer(k)
log.debug(f"top-level input property translated: {k} -> {translated_name}")
# pylint: disable=unsupported-assignment-operation
struct[translated_name] = result
property_deps[translated_name] = deps
return struct |
def JSON_NumpyArrayEncoder(obj):
'''Define Specialize JSON encoder for numpy array'''
if isinstance(obj, np.ndarray):
return {'numpyArray': obj.tolist(),
'dtype': obj.dtype.__str__()}
elif isinstance(obj, np.generic):
return np.asscalar(obj)
else:
print type(obj)
raise TypeError(repr(obj) + " is not JSON serializable") | Define Specialize JSON encoder for numpy array | Below is the the instruction that describes the task:
### Input:
Define Specialize JSON encoder for numpy array
### Response:
def JSON_NumpyArrayEncoder(obj):
'''Define Specialize JSON encoder for numpy array'''
if isinstance(obj, np.ndarray):
return {'numpyArray': obj.tolist(),
'dtype': obj.dtype.__str__()}
elif isinstance(obj, np.generic):
return np.asscalar(obj)
else:
print type(obj)
raise TypeError(repr(obj) + " is not JSON serializable") |
def find_intersections(x, a, b, direction='all'):
"""Calculate the best estimate of intersection.
Calculates the best estimates of the intersection of two y-value
data sets that share a common x-value set.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
direction : string, optional
specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
or 'decreasing' (b becoming greater than a). Defaults to 'all'.
Returns
-------
A tuple (x, y) of array-like with the x and y coordinates of the
intersections of the lines.
"""
# Find the index of the points just before the intersection(s)
nearest_idx = nearest_intersection_idx(a, b)
next_idx = nearest_idx + 1
# Determine the sign of the change
sign_change = np.sign(a[next_idx] - b[next_idx])
# x-values around each intersection
_, x0 = _next_non_masked_element(x, nearest_idx)
_, x1 = _next_non_masked_element(x, next_idx)
# y-values around each intersection for the first line
_, a0 = _next_non_masked_element(a, nearest_idx)
_, a1 = _next_non_masked_element(a, next_idx)
# y-values around each intersection for the second line
_, b0 = _next_non_masked_element(b, nearest_idx)
_, b1 = _next_non_masked_element(b, next_idx)
# Calculate the x-intersection. This comes from finding the equations of the two lines,
# one through (x0, a0) and (x1, a1) and the other through (x0, b0) and (x1, b1),
# finding their intersection, and reducing with a bunch of algebra.
delta_y0 = a0 - b0
delta_y1 = a1 - b1
intersect_x = (delta_y1 * x0 - delta_y0 * x1) / (delta_y1 - delta_y0)
# Calculate the y-intersection of the lines. Just plug the x above into the equation
# for the line through the a points. One could solve for y like x above, but this
# causes weirder unit behavior and seems a little less good numerically.
intersect_y = ((intersect_x - x0) / (x1 - x0)) * (a1 - a0) + a0
# If there's no intersections, return
if len(intersect_x) == 0:
return intersect_x, intersect_y
# Check for duplicates
duplicate_mask = (np.ediff1d(intersect_x, to_end=1) != 0)
# Make a mask based on the direction of sign change desired
if direction == 'increasing':
mask = sign_change > 0
elif direction == 'decreasing':
mask = sign_change < 0
elif direction == 'all':
return intersect_x[duplicate_mask], intersect_y[duplicate_mask]
else:
raise ValueError('Unknown option for direction: {0}'.format(str(direction)))
return intersect_x[mask & duplicate_mask], intersect_y[mask & duplicate_mask] | Calculate the best estimate of intersection.
Calculates the best estimates of the intersection of two y-value
data sets that share a common x-value set.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
direction : string, optional
specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
or 'decreasing' (b becoming greater than a). Defaults to 'all'.
Returns
-------
A tuple (x, y) of array-like with the x and y coordinates of the
intersections of the lines. | Below is the the instruction that describes the task:
### Input:
Calculate the best estimate of intersection.
Calculates the best estimates of the intersection of two y-value
data sets that share a common x-value set.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
direction : string, optional
specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
or 'decreasing' (b becoming greater than a). Defaults to 'all'.
Returns
-------
A tuple (x, y) of array-like with the x and y coordinates of the
intersections of the lines.
### Response:
def find_intersections(x, a, b, direction='all'):
"""Calculate the best estimate of intersection.
Calculates the best estimates of the intersection of two y-value
data sets that share a common x-value set.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
direction : string, optional
specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
or 'decreasing' (b becoming greater than a). Defaults to 'all'.
Returns
-------
A tuple (x, y) of array-like with the x and y coordinates of the
intersections of the lines.
"""
# Find the index of the points just before the intersection(s)
nearest_idx = nearest_intersection_idx(a, b)
next_idx = nearest_idx + 1
# Determine the sign of the change
sign_change = np.sign(a[next_idx] - b[next_idx])
# x-values around each intersection
_, x0 = _next_non_masked_element(x, nearest_idx)
_, x1 = _next_non_masked_element(x, next_idx)
# y-values around each intersection for the first line
_, a0 = _next_non_masked_element(a, nearest_idx)
_, a1 = _next_non_masked_element(a, next_idx)
# y-values around each intersection for the second line
_, b0 = _next_non_masked_element(b, nearest_idx)
_, b1 = _next_non_masked_element(b, next_idx)
# Calculate the x-intersection. This comes from finding the equations of the two lines,
# one through (x0, a0) and (x1, a1) and the other through (x0, b0) and (x1, b1),
# finding their intersection, and reducing with a bunch of algebra.
delta_y0 = a0 - b0
delta_y1 = a1 - b1
intersect_x = (delta_y1 * x0 - delta_y0 * x1) / (delta_y1 - delta_y0)
# Calculate the y-intersection of the lines. Just plug the x above into the equation
# for the line through the a points. One could solve for y like x above, but this
# causes weirder unit behavior and seems a little less good numerically.
intersect_y = ((intersect_x - x0) / (x1 - x0)) * (a1 - a0) + a0
# If there's no intersections, return
if len(intersect_x) == 0:
return intersect_x, intersect_y
# Check for duplicates
duplicate_mask = (np.ediff1d(intersect_x, to_end=1) != 0)
# Make a mask based on the direction of sign change desired
if direction == 'increasing':
mask = sign_change > 0
elif direction == 'decreasing':
mask = sign_change < 0
elif direction == 'all':
return intersect_x[duplicate_mask], intersect_y[duplicate_mask]
else:
raise ValueError('Unknown option for direction: {0}'.format(str(direction)))
return intersect_x[mask & duplicate_mask], intersect_y[mask & duplicate_mask] |
def fold_text_string(self, prefix, hidden, **kwargs):
"""
:param str prefix:
:param str hidden:
:param kwargs: passed to :func:`fold_text`
:rtype: str
"""
import io
output_buf = io.StringIO()
self.fold_text(prefix=prefix, hidden=hidden, file=output_buf, **kwargs)
return output_buf.getvalue() | :param str prefix:
:param str hidden:
:param kwargs: passed to :func:`fold_text`
:rtype: str | Below is the the instruction that describes the task:
### Input:
:param str prefix:
:param str hidden:
:param kwargs: passed to :func:`fold_text`
:rtype: str
### Response:
def fold_text_string(self, prefix, hidden, **kwargs):
"""
:param str prefix:
:param str hidden:
:param kwargs: passed to :func:`fold_text`
:rtype: str
"""
import io
output_buf = io.StringIO()
self.fold_text(prefix=prefix, hidden=hidden, file=output_buf, **kwargs)
return output_buf.getvalue() |
def CO_ratio(self,ifig,ixaxis):
"""
plot surface C/O ratio in Figure ifig with x-axis quantity ixaxis
Parameters
----------
ifig : integer
Figure number in which to plot
ixaxis : string
what quantity is to be on the x-axis, either 'time' or 'model'
The default is 'model'
"""
def C_O(model):
surface_c12=model.get('surface_c12')
surface_o16=model.get('surface_o16')
CORatio=old_div((surface_c12*4.),(surface_o16*3.))
return CORatio
if ixaxis=='time':
xax=self.get('star_age')
elif ixaxis=='model':
xax=self.get('model_number')
else:
raise IOError("ixaxis not recognised")
pl.figure(ifig)
pl.plot(xax,C_O(self)) | plot surface C/O ratio in Figure ifig with x-axis quantity ixaxis
Parameters
----------
ifig : integer
Figure number in which to plot
ixaxis : string
what quantity is to be on the x-axis, either 'time' or 'model'
The default is 'model' | Below is the the instruction that describes the task:
### Input:
plot surface C/O ratio in Figure ifig with x-axis quantity ixaxis
Parameters
----------
ifig : integer
Figure number in which to plot
ixaxis : string
what quantity is to be on the x-axis, either 'time' or 'model'
The default is 'model'
### Response:
def CO_ratio(self,ifig,ixaxis):
"""
plot surface C/O ratio in Figure ifig with x-axis quantity ixaxis
Parameters
----------
ifig : integer
Figure number in which to plot
ixaxis : string
what quantity is to be on the x-axis, either 'time' or 'model'
The default is 'model'
"""
def C_O(model):
surface_c12=model.get('surface_c12')
surface_o16=model.get('surface_o16')
CORatio=old_div((surface_c12*4.),(surface_o16*3.))
return CORatio
if ixaxis=='time':
xax=self.get('star_age')
elif ixaxis=='model':
xax=self.get('model_number')
else:
raise IOError("ixaxis not recognised")
pl.figure(ifig)
pl.plot(xax,C_O(self)) |
def _calc(count,
last_count,
start_time,
max_count,
speed_calc_cycles,
q,
last_speed,
lock):
"""do the pre calculations in order to get TET, speed, TTG
:param count: count
:param last_count: count at the last call, allows to treat the case of no progress
between sequential calls
:param start_time: the time when start was triggered
:param max_count: the maximal value count
:type max_count:
:param speed_calc_cycles:
:type speed_calc_cycles:
:param q:
:type q:
:param last_speed:
:type last_speed:
:param lock:
:type lock:
"""
count_value = count.value
start_time_value = start_time.value
current_time = time.time()
if last_count.value != count_value:
# some progress happened
with lock:
# save current state (count, time) to queue
q.put((count_value, current_time))
# get older state from queue (or initial state)
# to to speed estimation
if q.qsize() > speed_calc_cycles:
old_count_value, old_time = q.get()
else:
old_count_value, old_time = 0, start_time_value
last_count.value = count_value
#last_old_count.value = old_count_value
#last_old_time.value = old_time
speed = (count_value - old_count_value) / (current_time - old_time)
last_speed.value = speed
else:
# progress has not changed since last call
# use also old (cached) data from the queue
#old_count_value, old_time = last_old_count.value, last_old_time.value
speed = last_speed.value
if (max_count is None):
max_count_value = None
else:
max_count_value = max_count.value
tet = (current_time - start_time_value)
if (speed == 0) or (max_count_value is None) or (max_count_value == 0):
ttg = None
else:
ttg = math.ceil((max_count_value - count_value) / speed)
return count_value, max_count_value, speed, tet, ttg | do the pre calculations in order to get TET, speed, TTG
:param count: count
:param last_count: count at the last call, allows to treat the case of no progress
between sequential calls
:param start_time: the time when start was triggered
:param max_count: the maximal value count
:type max_count:
:param speed_calc_cycles:
:type speed_calc_cycles:
:param q:
:type q:
:param last_speed:
:type last_speed:
:param lock:
:type lock: | Below is the the instruction that describes the task:
### Input:
do the pre calculations in order to get TET, speed, TTG
:param count: count
:param last_count: count at the last call, allows to treat the case of no progress
between sequential calls
:param start_time: the time when start was triggered
:param max_count: the maximal value count
:type max_count:
:param speed_calc_cycles:
:type speed_calc_cycles:
:param q:
:type q:
:param last_speed:
:type last_speed:
:param lock:
:type lock:
### Response:
def _calc(count,
last_count,
start_time,
max_count,
speed_calc_cycles,
q,
last_speed,
lock):
"""do the pre calculations in order to get TET, speed, TTG
:param count: count
:param last_count: count at the last call, allows to treat the case of no progress
between sequential calls
:param start_time: the time when start was triggered
:param max_count: the maximal value count
:type max_count:
:param speed_calc_cycles:
:type speed_calc_cycles:
:param q:
:type q:
:param last_speed:
:type last_speed:
:param lock:
:type lock:
"""
count_value = count.value
start_time_value = start_time.value
current_time = time.time()
if last_count.value != count_value:
# some progress happened
with lock:
# save current state (count, time) to queue
q.put((count_value, current_time))
# get older state from queue (or initial state)
# to to speed estimation
if q.qsize() > speed_calc_cycles:
old_count_value, old_time = q.get()
else:
old_count_value, old_time = 0, start_time_value
last_count.value = count_value
#last_old_count.value = old_count_value
#last_old_time.value = old_time
speed = (count_value - old_count_value) / (current_time - old_time)
last_speed.value = speed
else:
# progress has not changed since last call
# use also old (cached) data from the queue
#old_count_value, old_time = last_old_count.value, last_old_time.value
speed = last_speed.value
if (max_count is None):
max_count_value = None
else:
max_count_value = max_count.value
tet = (current_time - start_time_value)
if (speed == 0) or (max_count_value is None) or (max_count_value == 0):
ttg = None
else:
ttg = math.ceil((max_count_value - count_value) / speed)
return count_value, max_count_value, speed, tet, ttg |
def check_forbidden_filename(filename,
destiny_os=os.name,
restricted_names=restricted_names):
'''
Get if given filename is forbidden for current OS or filesystem.
:param filename:
:param destiny_os: destination operative system
:param fs_encoding: destination filesystem filename encoding
:return: wether is forbidden on given OS (or filesystem) or not
:rtype: bool
'''
return (
filename in restricted_names or
destiny_os == 'nt' and
filename.split('.', 1)[0].upper() in nt_device_names
) | Get if given filename is forbidden for current OS or filesystem.
:param filename:
:param destiny_os: destination operative system
:param fs_encoding: destination filesystem filename encoding
:return: wether is forbidden on given OS (or filesystem) or not
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Get if given filename is forbidden for current OS or filesystem.
:param filename:
:param destiny_os: destination operative system
:param fs_encoding: destination filesystem filename encoding
:return: wether is forbidden on given OS (or filesystem) or not
:rtype: bool
### Response:
def check_forbidden_filename(filename,
destiny_os=os.name,
restricted_names=restricted_names):
'''
Get if given filename is forbidden for current OS or filesystem.
:param filename:
:param destiny_os: destination operative system
:param fs_encoding: destination filesystem filename encoding
:return: wether is forbidden on given OS (or filesystem) or not
:rtype: bool
'''
return (
filename in restricted_names or
destiny_os == 'nt' and
filename.split('.', 1)[0].upper() in nt_device_names
) |
def handle_combined_input(args):
"""Check for cases where we have a combined input nested list.
In these cases the CWL will be double nested:
[[[rec_a], [rec_b]]]
and we remove the outer nesting.
"""
cur_args = args[:]
while len(cur_args) == 1 and isinstance(cur_args[0], (list, tuple)):
cur_args = cur_args[0]
return cur_args | Check for cases where we have a combined input nested list.
In these cases the CWL will be double nested:
[[[rec_a], [rec_b]]]
and we remove the outer nesting. | Below is the the instruction that describes the task:
### Input:
Check for cases where we have a combined input nested list.
In these cases the CWL will be double nested:
[[[rec_a], [rec_b]]]
and we remove the outer nesting.
### Response:
def handle_combined_input(args):
"""Check for cases where we have a combined input nested list.
In these cases the CWL will be double nested:
[[[rec_a], [rec_b]]]
and we remove the outer nesting.
"""
cur_args = args[:]
while len(cur_args) == 1 and isinstance(cur_args[0], (list, tuple)):
cur_args = cur_args[0]
return cur_args |
def get_filetypes(filelist, path=None, size=os.path.getsize):
""" Get a sorted list of file types and their weight in percent
from an iterable of file names.
@return: List of weighted file extensions (no '.'), sorted in descending order
@rtype: list of (weight, filetype)
"""
path = path or (lambda _: _)
# Get total size for each file extension
histo = defaultdict(int)
for entry in filelist:
ext = os.path.splitext(path(entry))[1].lstrip('.').lower()
if ext and ext[0] == 'r' and ext[1:].isdigit():
ext = "rar"
elif ext == "jpeg":
ext = "jpg"
elif ext == "mpeg":
ext = "mpg"
histo[ext] += size(entry)
# Normalize values to integer percent
total = sum(histo.values())
if total:
for ext, val in histo.items():
histo[ext] = int(val * 100.0 / total + .499)
return sorted(zip(histo.values(), histo.keys()), reverse=True) | Get a sorted list of file types and their weight in percent
from an iterable of file names.
@return: List of weighted file extensions (no '.'), sorted in descending order
@rtype: list of (weight, filetype) | Below is the the instruction that describes the task:
### Input:
Get a sorted list of file types and their weight in percent
from an iterable of file names.
@return: List of weighted file extensions (no '.'), sorted in descending order
@rtype: list of (weight, filetype)
### Response:
def get_filetypes(filelist, path=None, size=os.path.getsize):
""" Get a sorted list of file types and their weight in percent
from an iterable of file names.
@return: List of weighted file extensions (no '.'), sorted in descending order
@rtype: list of (weight, filetype)
"""
path = path or (lambda _: _)
# Get total size for each file extension
histo = defaultdict(int)
for entry in filelist:
ext = os.path.splitext(path(entry))[1].lstrip('.').lower()
if ext and ext[0] == 'r' and ext[1:].isdigit():
ext = "rar"
elif ext == "jpeg":
ext = "jpg"
elif ext == "mpeg":
ext = "mpg"
histo[ext] += size(entry)
# Normalize values to integer percent
total = sum(histo.values())
if total:
for ext, val in histo.items():
histo[ext] = int(val * 100.0 / total + .499)
return sorted(zip(histo.values(), histo.keys()), reverse=True) |
def sanitize_excel_sheet_name(sheet_name, replacement_text=""):
"""
Replace invalid characters for an Excel sheet name within
the ``sheet_name`` with the ``replacement_text``.
Invalid characters are as follows:
|invalid_excel_sheet_chars|.
The ``sheet_name`` truncate to 31 characters
(max sheet name length of Excel) from the head, if the length
of the name is exceed 31 characters.
:param str sheet_name: Excel sheet name to sanitize.
:param str replacement_text: Replacement text.
:return: A replacement string.
:rtype: str
:raises ValueError: If the ``sheet_name`` is an invalid sheet name.
"""
try:
unicode_sheet_name = _preprocess(sheet_name)
except AttributeError as e:
raise ValueError(e)
modify_sheet_name = __RE_INVALID_EXCEL_SHEET_NAME.sub(replacement_text, unicode_sheet_name)
return modify_sheet_name[:__MAX_SHEET_NAME_LEN] | Replace invalid characters for an Excel sheet name within
the ``sheet_name`` with the ``replacement_text``.
Invalid characters are as follows:
|invalid_excel_sheet_chars|.
The ``sheet_name`` truncate to 31 characters
(max sheet name length of Excel) from the head, if the length
of the name is exceed 31 characters.
:param str sheet_name: Excel sheet name to sanitize.
:param str replacement_text: Replacement text.
:return: A replacement string.
:rtype: str
:raises ValueError: If the ``sheet_name`` is an invalid sheet name. | Below is the the instruction that describes the task:
### Input:
Replace invalid characters for an Excel sheet name within
the ``sheet_name`` with the ``replacement_text``.
Invalid characters are as follows:
|invalid_excel_sheet_chars|.
The ``sheet_name`` truncate to 31 characters
(max sheet name length of Excel) from the head, if the length
of the name is exceed 31 characters.
:param str sheet_name: Excel sheet name to sanitize.
:param str replacement_text: Replacement text.
:return: A replacement string.
:rtype: str
:raises ValueError: If the ``sheet_name`` is an invalid sheet name.
### Response:
def sanitize_excel_sheet_name(sheet_name, replacement_text=""):
"""
Replace invalid characters for an Excel sheet name within
the ``sheet_name`` with the ``replacement_text``.
Invalid characters are as follows:
|invalid_excel_sheet_chars|.
The ``sheet_name`` truncate to 31 characters
(max sheet name length of Excel) from the head, if the length
of the name is exceed 31 characters.
:param str sheet_name: Excel sheet name to sanitize.
:param str replacement_text: Replacement text.
:return: A replacement string.
:rtype: str
:raises ValueError: If the ``sheet_name`` is an invalid sheet name.
"""
try:
unicode_sheet_name = _preprocess(sheet_name)
except AttributeError as e:
raise ValueError(e)
modify_sheet_name = __RE_INVALID_EXCEL_SHEET_NAME.sub(replacement_text, unicode_sheet_name)
return modify_sheet_name[:__MAX_SHEET_NAME_LEN] |
def run(self, event, lambda_context):
"""Run policy in push mode against given event.
Lambda automatically generates cloud watch logs, and metrics
for us, albeit with some deficienies, metrics no longer count
against valid resources matches, but against execution.
If metrics execution option is enabled, custodian will generate
metrics per normal.
"""
from c7n.actions import EventAction
mode = self.policy.data.get('mode', {})
if not bool(mode.get("log", True)):
root = logging.getLogger()
map(root.removeHandler, root.handlers[:])
root.handlers = [logging.NullHandler()]
resources = self.resolve_resources(event)
if not resources:
return resources
resources = self.policy.resource_manager.filter_resources(
resources, event)
if 'debug' in event:
self.policy.log.info("Filtered resources %d" % len(resources))
if not resources:
self.policy.log.info(
"policy: %s resources: %s no resources matched" % (
self.policy.name, self.policy.resource_type))
return
with self.policy.ctx:
self.policy.ctx.metrics.put_metric(
'ResourceCount', len(resources), 'Count', Scope="Policy",
buffer=False)
if 'debug' in event:
self.policy.log.info(
"Invoking actions %s", self.policy.resource_manager.actions)
self.policy._write_file(
'resources.json', utils.dumps(resources, indent=2))
for action in self.policy.resource_manager.actions:
self.policy.log.info(
"policy: %s invoking action: %s resources: %d",
self.policy.name, action.name, len(resources))
if isinstance(action, EventAction):
results = action.process(resources, event)
else:
results = action.process(resources)
self.policy._write_file(
"action-%s" % action.name, utils.dumps(results))
return resources | Run policy in push mode against given event.
Lambda automatically generates cloud watch logs, and metrics
for us, albeit with some deficienies, metrics no longer count
against valid resources matches, but against execution.
If metrics execution option is enabled, custodian will generate
metrics per normal. | Below is the the instruction that describes the task:
### Input:
Run policy in push mode against given event.
Lambda automatically generates cloud watch logs, and metrics
for us, albeit with some deficienies, metrics no longer count
against valid resources matches, but against execution.
If metrics execution option is enabled, custodian will generate
metrics per normal.
### Response:
def run(self, event, lambda_context):
"""Run policy in push mode against given event.
Lambda automatically generates cloud watch logs, and metrics
for us, albeit with some deficienies, metrics no longer count
against valid resources matches, but against execution.
If metrics execution option is enabled, custodian will generate
metrics per normal.
"""
from c7n.actions import EventAction
mode = self.policy.data.get('mode', {})
if not bool(mode.get("log", True)):
root = logging.getLogger()
map(root.removeHandler, root.handlers[:])
root.handlers = [logging.NullHandler()]
resources = self.resolve_resources(event)
if not resources:
return resources
resources = self.policy.resource_manager.filter_resources(
resources, event)
if 'debug' in event:
self.policy.log.info("Filtered resources %d" % len(resources))
if not resources:
self.policy.log.info(
"policy: %s resources: %s no resources matched" % (
self.policy.name, self.policy.resource_type))
return
with self.policy.ctx:
self.policy.ctx.metrics.put_metric(
'ResourceCount', len(resources), 'Count', Scope="Policy",
buffer=False)
if 'debug' in event:
self.policy.log.info(
"Invoking actions %s", self.policy.resource_manager.actions)
self.policy._write_file(
'resources.json', utils.dumps(resources, indent=2))
for action in self.policy.resource_manager.actions:
self.policy.log.info(
"policy: %s invoking action: %s resources: %d",
self.policy.name, action.name, len(resources))
if isinstance(action, EventAction):
results = action.process(resources, event)
else:
results = action.process(resources)
self.policy._write_file(
"action-%s" % action.name, utils.dumps(results))
return resources |
def _filter_matrix_rows(cls, matrix):
'''matrix = output from _to_matrix'''
indexes_to_keep = []
for i in range(len(matrix)):
keep_row = False
for element in matrix[i]:
if element not in {'NA', 'no'}:
keep_row = True
break
if keep_row:
indexes_to_keep.append(i)
return [matrix[i] for i in indexes_to_keep] | matrix = output from _to_matrix | Below is the the instruction that describes the task:
### Input:
matrix = output from _to_matrix
### Response:
def _filter_matrix_rows(cls, matrix):
'''matrix = output from _to_matrix'''
indexes_to_keep = []
for i in range(len(matrix)):
keep_row = False
for element in matrix[i]:
if element not in {'NA', 'no'}:
keep_row = True
break
if keep_row:
indexes_to_keep.append(i)
return [matrix[i] for i in indexes_to_keep] |
def getL4PredictedActiveCells(self):
"""
Returns the predicted active cells in each column in L4.
"""
predictedActive = []
for i in xrange(self.numColumns):
region = self.network.regions["L4Column_" + str(i)]
predictedActive.append(
region.getOutputData("predictedActiveCells").nonzero()[0])
return predictedActive | Returns the predicted active cells in each column in L4. | Below is the the instruction that describes the task:
### Input:
Returns the predicted active cells in each column in L4.
### Response:
def getL4PredictedActiveCells(self):
"""
Returns the predicted active cells in each column in L4.
"""
predictedActive = []
for i in xrange(self.numColumns):
region = self.network.regions["L4Column_" + str(i)]
predictedActive.append(
region.getOutputData("predictedActiveCells").nonzero()[0])
return predictedActive |
def add_neighbours(self):
"""
Add all the pixels at max order in the neighbourhood of the moc
"""
time_delta = 1 << (2*(IntervalSet.HPY_MAX_ORDER - self.max_order))
intervals_arr = self._interval_set._intervals
intervals_arr[:, 0] = np.maximum(intervals_arr[:, 0] - time_delta, 0)
intervals_arr[:, 1] = np.minimum(intervals_arr[:, 1] + time_delta, (1 << 58) - 1)
self._interval_set = IntervalSet(intervals_arr) | Add all the pixels at max order in the neighbourhood of the moc | Below is the the instruction that describes the task:
### Input:
Add all the pixels at max order in the neighbourhood of the moc
### Response:
def add_neighbours(self):
"""
Add all the pixels at max order in the neighbourhood of the moc
"""
time_delta = 1 << (2*(IntervalSet.HPY_MAX_ORDER - self.max_order))
intervals_arr = self._interval_set._intervals
intervals_arr[:, 0] = np.maximum(intervals_arr[:, 0] - time_delta, 0)
intervals_arr[:, 1] = np.minimum(intervals_arr[:, 1] + time_delta, (1 << 58) - 1)
self._interval_set = IntervalSet(intervals_arr) |
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response | Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object | Below is the the instruction that describes the task:
### Input:
Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
### Response:
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response |
def compute_and_cache_missing_buckets(self, start_time, end_time,
untrusted_time, force_recompute=False):
"""
Return the results for `query_function` on every `bucket_width`
time period between `start_time` and `end_time`. Look for
previously cached results to avoid recomputation. For any buckets
where all events would have occurred before `untrusted_time`,
cache the results.
:param start_time: A datetime for the beginning of the range,
aligned with `bucket_width`.
:param end_time: A datetime for the end of the range, aligned with
`bucket_width`.
:param untrusted_time: A datetime after which to not trust that
computed data is stable. Any buckets that overlap with or follow
this untrusted_time will not be cached.
:param force_recompute: A boolean that, if True, will force
recompute and recaching of even previously cached data.
"""
if untrusted_time and not untrusted_time.tzinfo:
untrusted_time = untrusted_time.replace(tzinfo=tzutc())
events = self._compute_buckets(start_time, end_time, compute_missing=True,
cache=True, untrusted_time=untrusted_time,
force_recompute=force_recompute)
for event in events:
yield event | Return the results for `query_function` on every `bucket_width`
time period between `start_time` and `end_time`. Look for
previously cached results to avoid recomputation. For any buckets
where all events would have occurred before `untrusted_time`,
cache the results.
:param start_time: A datetime for the beginning of the range,
aligned with `bucket_width`.
:param end_time: A datetime for the end of the range, aligned with
`bucket_width`.
:param untrusted_time: A datetime after which to not trust that
computed data is stable. Any buckets that overlap with or follow
this untrusted_time will not be cached.
:param force_recompute: A boolean that, if True, will force
recompute and recaching of even previously cached data. | Below is the the instruction that describes the task:
### Input:
Return the results for `query_function` on every `bucket_width`
time period between `start_time` and `end_time`. Look for
previously cached results to avoid recomputation. For any buckets
where all events would have occurred before `untrusted_time`,
cache the results.
:param start_time: A datetime for the beginning of the range,
aligned with `bucket_width`.
:param end_time: A datetime for the end of the range, aligned with
`bucket_width`.
:param untrusted_time: A datetime after which to not trust that
computed data is stable. Any buckets that overlap with or follow
this untrusted_time will not be cached.
:param force_recompute: A boolean that, if True, will force
recompute and recaching of even previously cached data.
### Response:
def compute_and_cache_missing_buckets(self, start_time, end_time,
untrusted_time, force_recompute=False):
"""
Return the results for `query_function` on every `bucket_width`
time period between `start_time` and `end_time`. Look for
previously cached results to avoid recomputation. For any buckets
where all events would have occurred before `untrusted_time`,
cache the results.
:param start_time: A datetime for the beginning of the range,
aligned with `bucket_width`.
:param end_time: A datetime for the end of the range, aligned with
`bucket_width`.
:param untrusted_time: A datetime after which to not trust that
computed data is stable. Any buckets that overlap with or follow
this untrusted_time will not be cached.
:param force_recompute: A boolean that, if True, will force
recompute and recaching of even previously cached data.
"""
if untrusted_time and not untrusted_time.tzinfo:
untrusted_time = untrusted_time.replace(tzinfo=tzutc())
events = self._compute_buckets(start_time, end_time, compute_missing=True,
cache=True, untrusted_time=untrusted_time,
force_recompute=force_recompute)
for event in events:
yield event |
def main():
"""Entrypoint for ``lander`` executable."""
args = parse_args()
config_logger(args)
logger = structlog.get_logger(__name__)
if args.show_version:
# only print the version
print_version()
sys.exit(0)
version = pkg_resources.get_distribution('lander').version
logger.info('Lander version {0}'.format(version))
config = Configuration(args=args)
# disable any build confirmed to be a PR with Travis
if config['is_travis_pull_request']:
logger.info('Skipping build from PR.')
sys.exit(0)
lander = Lander(config)
lander.build_site()
logger.info('Build complete')
if config['upload']:
lander.upload_site()
logger.info('Upload complete')
logger.info('Lander complete') | Entrypoint for ``lander`` executable. | Below is the the instruction that describes the task:
### Input:
Entrypoint for ``lander`` executable.
### Response:
def main():
"""Entrypoint for ``lander`` executable."""
args = parse_args()
config_logger(args)
logger = structlog.get_logger(__name__)
if args.show_version:
# only print the version
print_version()
sys.exit(0)
version = pkg_resources.get_distribution('lander').version
logger.info('Lander version {0}'.format(version))
config = Configuration(args=args)
# disable any build confirmed to be a PR with Travis
if config['is_travis_pull_request']:
logger.info('Skipping build from PR.')
sys.exit(0)
lander = Lander(config)
lander.build_site()
logger.info('Build complete')
if config['upload']:
lander.upload_site()
logger.info('Upload complete')
logger.info('Lander complete') |
def resolve_command(self, excmd):
"""Parse command and dispatch it (to schedulers for example) if necessary
If the command is not global it will be executed.
:param excmd: external command to handle
:type excmd: alignak.external_command.ExternalCommand
:return: result of command parsing. None for an invalid command.
"""
# Maybe the command is invalid. Bailout
try:
command = excmd.cmd_line
except AttributeError as exp: # pragma: no cover, simple protection
logger.warning("resolve_command, error with command %s", excmd)
logger.exception("Exception: %s", exp)
return None
# Parse command
command = command.strip()
cmd = self.get_command_and_args(command, excmd)
if cmd is None:
return cmd
# If we are a receiver, bail out here... do not try to execute the command
if self.mode == 'receiver' and not cmd.get('internal', False):
return cmd
if self.mode == 'applyer' and self.log_external_commands:
make_a_log = True
# #912: only log an external command if it is not a passive check
if self.my_conf.log_passive_checks and cmd['c_name'] \
in ['process_host_check_result', 'process_service_check_result']:
# Do not log the command
make_a_log = False
if make_a_log:
# I am a command dispatcher, notifies to my arbiter
self.send_an_element(make_monitoring_log('info', 'EXTERNAL COMMAND: ' + command))
if not cmd['global']:
# Execute the command
c_name = cmd['c_name']
args = cmd['args']
logger.debug("Execute command: %s %s", c_name, str(args))
logger.debug("Command time measurement: %s (%d s)",
excmd.creation_timestamp, time.time() - excmd.creation_timestamp)
statsmgr.timer('external-commands.latency', time.time() - excmd.creation_timestamp)
getattr(self, c_name)(*args)
else:
# Send command to all our schedulers
for scheduler_link in self.my_conf.schedulers:
logger.debug("Preparing an external command '%s' for the scheduler %s",
excmd, scheduler_link.name)
scheduler_link.pushed_commands.append(excmd.cmd_line)
return cmd | Parse command and dispatch it (to schedulers for example) if necessary
If the command is not global it will be executed.
:param excmd: external command to handle
:type excmd: alignak.external_command.ExternalCommand
:return: result of command parsing. None for an invalid command. | Below is the the instruction that describes the task:
### Input:
Parse command and dispatch it (to schedulers for example) if necessary
If the command is not global it will be executed.
:param excmd: external command to handle
:type excmd: alignak.external_command.ExternalCommand
:return: result of command parsing. None for an invalid command.
### Response:
def resolve_command(self, excmd):
"""Parse command and dispatch it (to schedulers for example) if necessary
If the command is not global it will be executed.
:param excmd: external command to handle
:type excmd: alignak.external_command.ExternalCommand
:return: result of command parsing. None for an invalid command.
"""
# Maybe the command is invalid. Bailout
try:
command = excmd.cmd_line
except AttributeError as exp: # pragma: no cover, simple protection
logger.warning("resolve_command, error with command %s", excmd)
logger.exception("Exception: %s", exp)
return None
# Parse command
command = command.strip()
cmd = self.get_command_and_args(command, excmd)
if cmd is None:
return cmd
# If we are a receiver, bail out here... do not try to execute the command
if self.mode == 'receiver' and not cmd.get('internal', False):
return cmd
if self.mode == 'applyer' and self.log_external_commands:
make_a_log = True
# #912: only log an external command if it is not a passive check
if self.my_conf.log_passive_checks and cmd['c_name'] \
in ['process_host_check_result', 'process_service_check_result']:
# Do not log the command
make_a_log = False
if make_a_log:
# I am a command dispatcher, notifies to my arbiter
self.send_an_element(make_monitoring_log('info', 'EXTERNAL COMMAND: ' + command))
if not cmd['global']:
# Execute the command
c_name = cmd['c_name']
args = cmd['args']
logger.debug("Execute command: %s %s", c_name, str(args))
logger.debug("Command time measurement: %s (%d s)",
excmd.creation_timestamp, time.time() - excmd.creation_timestamp)
statsmgr.timer('external-commands.latency', time.time() - excmd.creation_timestamp)
getattr(self, c_name)(*args)
else:
# Send command to all our schedulers
for scheduler_link in self.my_conf.schedulers:
logger.debug("Preparing an external command '%s' for the scheduler %s",
excmd, scheduler_link.name)
scheduler_link.pushed_commands.append(excmd.cmd_line)
return cmd |
def scaled_pressure2_send(self, time_boot_ms, press_abs, press_diff, temperature, force_mavlink1=False):
'''
Barometer readings for 2nd barometer
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
press_abs : Absolute pressure (hectopascal) (float)
press_diff : Differential pressure 1 (hectopascal) (float)
temperature : Temperature measurement (0.01 degrees celsius) (int16_t)
'''
return self.send(self.scaled_pressure2_encode(time_boot_ms, press_abs, press_diff, temperature), force_mavlink1=force_mavlink1) | Barometer readings for 2nd barometer
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
press_abs : Absolute pressure (hectopascal) (float)
press_diff : Differential pressure 1 (hectopascal) (float)
temperature : Temperature measurement (0.01 degrees celsius) (int16_t) | Below is the the instruction that describes the task:
### Input:
Barometer readings for 2nd barometer
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
press_abs : Absolute pressure (hectopascal) (float)
press_diff : Differential pressure 1 (hectopascal) (float)
temperature : Temperature measurement (0.01 degrees celsius) (int16_t)
### Response:
def scaled_pressure2_send(self, time_boot_ms, press_abs, press_diff, temperature, force_mavlink1=False):
'''
Barometer readings for 2nd barometer
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
press_abs : Absolute pressure (hectopascal) (float)
press_diff : Differential pressure 1 (hectopascal) (float)
temperature : Temperature measurement (0.01 degrees celsius) (int16_t)
'''
return self.send(self.scaled_pressure2_encode(time_boot_ms, press_abs, press_diff, temperature), force_mavlink1=force_mavlink1) |
def filter_leading_non_json_lines(buf):
'''
used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
filtered_lines = StringIO.StringIO()
stop_filtering = False
for line in buf.splitlines():
if stop_filtering or "=" in line or line.startswith('{') or line.startswith('['):
stop_filtering = True
filtered_lines.write(line + '\n')
return filtered_lines.getvalue() | used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid. | Below is the the instruction that describes the task:
### Input:
used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
### Response:
def filter_leading_non_json_lines(buf):
'''
used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
filtered_lines = StringIO.StringIO()
stop_filtering = False
for line in buf.splitlines():
if stop_filtering or "=" in line or line.startswith('{') or line.startswith('['):
stop_filtering = True
filtered_lines.write(line + '\n')
return filtered_lines.getvalue() |
def json_to_class(cls, json_str):
"""
:type cls: T|type
:type json_str: str
:rtype: T
"""
obj_raw = json.loads(json_str)
return deserialize(cls, obj_raw) | :type cls: T|type
:type json_str: str
:rtype: T | Below is the the instruction that describes the task:
### Input:
:type cls: T|type
:type json_str: str
:rtype: T
### Response:
def json_to_class(cls, json_str):
"""
:type cls: T|type
:type json_str: str
:rtype: T
"""
obj_raw = json.loads(json_str)
return deserialize(cls, obj_raw) |
def parse_options():
"""Specify the command line options to parse.
Returns
-------
opts : optparse.Values instance
Contains the option values in its 'dict' member variable.
args[0] : string or file-handler
The name of the file storing the data-set submitted
for Affinity Propagation clustering.
"""
parser = optparse.OptionParser(
usage = "Usage: %prog [options] file_name\n\n"
"file_name denotes the path where the data to be "
"processed by affinity propagation clustering is stored"
)
parser.add_option('-m', '--multiprocessing', dest = 'count',
default = multiprocessing.cpu_count(), type = 'int',
help = ("The number of processes to use (1..20) "
"[default %default]"))
parser.add_option('-f', '--file', dest = 'hdf5_file', default = None,
type = 'str',
help = ("File name or file handle of the HDF5 "
"data structure holding the matrices involved in "
"affinity propagation clustering "
"[default %default]"))
parser.add_option('-s', '--similarities', dest = 'similarities',
default = False, action = 'store_true',
help = ("Specifies if a matrix of similarities "
"has already been computed; only makes sense "
"with -f or --file in effect [default %default]"))
parser.add_option('-i', '--iterations', dest = 'max_iter',
default = 200, type = 'int',
help = ("The maximum number of message passing "
"iterations undergone before affinity "
"propagation returns, having reached "
"convergence or not [default %default]"))
parser.add_option('-c', '--convergence', dest = 'convergence_iter',
default = 15, type = 'int',
help = ("Specifies the number of consecutive "
"iterations without change in the number "
"of clusters that signals convergence "
"[default %default]") )
parser.add_option('-p', '--preference', dest = 'preference',
default = None, type = 'float',
help = ("The preference parameter of affinity "
"propagation [default %default]"))
parser.add_option('-d', '--damping', dest = 'damping',
default = 0.5, type = 'float',
help = ("The damping parameter of affinity "
"propagation; must be within 0.5 and 1.0 "
"[default %default]"))
parser.add_option('-v', '--verbose', dest = 'verbose',
default = False, action = 'store_true',
help = ("Turns on the display of messaging regarding "
"the status of the various stages of affinity "
"propagation clustering currently ongoing "
"on the user-specified data-set "
"[default %default]"))
opts, args = parser.parse_args()
if len(args) == 0:
parser.error('A data file must be specified')
if opts.similarities and (opts.hdf5_file is None):
parser.error("Option -s is conditional on -f")
if not (1 <= opts.count <= 20):
parser.error("The number of processes must range "
"from 1 to 20, inclusive")
if opts.max_iter <= 0:
parser.error("The number of iterations must be "
"a non-negative integer")
if opts.convergence_iter >= opts.max_iter:
parser.error("The number of iterations signalling convergence "
"cannot exceed the maximum number of iterations possibly "
"required")
if not (0.5 <= opts.damping <= 1.0):
parser.error("The damping parameter is restricted to values "
"between 0.5 and 1.0")
return opts, args[0] | Specify the command line options to parse.
Returns
-------
opts : optparse.Values instance
Contains the option values in its 'dict' member variable.
args[0] : string or file-handler
The name of the file storing the data-set submitted
for Affinity Propagation clustering. | Below is the the instruction that describes the task:
### Input:
Specify the command line options to parse.
Returns
-------
opts : optparse.Values instance
Contains the option values in its 'dict' member variable.
args[0] : string or file-handler
The name of the file storing the data-set submitted
for Affinity Propagation clustering.
### Response:
def parse_options():
"""Specify the command line options to parse.
Returns
-------
opts : optparse.Values instance
Contains the option values in its 'dict' member variable.
args[0] : string or file-handler
The name of the file storing the data-set submitted
for Affinity Propagation clustering.
"""
parser = optparse.OptionParser(
usage = "Usage: %prog [options] file_name\n\n"
"file_name denotes the path where the data to be "
"processed by affinity propagation clustering is stored"
)
parser.add_option('-m', '--multiprocessing', dest = 'count',
default = multiprocessing.cpu_count(), type = 'int',
help = ("The number of processes to use (1..20) "
"[default %default]"))
parser.add_option('-f', '--file', dest = 'hdf5_file', default = None,
type = 'str',
help = ("File name or file handle of the HDF5 "
"data structure holding the matrices involved in "
"affinity propagation clustering "
"[default %default]"))
parser.add_option('-s', '--similarities', dest = 'similarities',
default = False, action = 'store_true',
help = ("Specifies if a matrix of similarities "
"has already been computed; only makes sense "
"with -f or --file in effect [default %default]"))
parser.add_option('-i', '--iterations', dest = 'max_iter',
default = 200, type = 'int',
help = ("The maximum number of message passing "
"iterations undergone before affinity "
"propagation returns, having reached "
"convergence or not [default %default]"))
parser.add_option('-c', '--convergence', dest = 'convergence_iter',
default = 15, type = 'int',
help = ("Specifies the number of consecutive "
"iterations without change in the number "
"of clusters that signals convergence "
"[default %default]") )
parser.add_option('-p', '--preference', dest = 'preference',
default = None, type = 'float',
help = ("The preference parameter of affinity "
"propagation [default %default]"))
parser.add_option('-d', '--damping', dest = 'damping',
default = 0.5, type = 'float',
help = ("The damping parameter of affinity "
"propagation; must be within 0.5 and 1.0 "
"[default %default]"))
parser.add_option('-v', '--verbose', dest = 'verbose',
default = False, action = 'store_true',
help = ("Turns on the display of messaging regarding "
"the status of the various stages of affinity "
"propagation clustering currently ongoing "
"on the user-specified data-set "
"[default %default]"))
opts, args = parser.parse_args()
if len(args) == 0:
parser.error('A data file must be specified')
if opts.similarities and (opts.hdf5_file is None):
parser.error("Option -s is conditional on -f")
if not (1 <= opts.count <= 20):
parser.error("The number of processes must range "
"from 1 to 20, inclusive")
if opts.max_iter <= 0:
parser.error("The number of iterations must be "
"a non-negative integer")
if opts.convergence_iter >= opts.max_iter:
parser.error("The number of iterations signalling convergence "
"cannot exceed the maximum number of iterations possibly "
"required")
if not (0.5 <= opts.damping <= 1.0):
parser.error("The damping parameter is restricted to values "
"between 0.5 and 1.0")
return opts, args[0] |
def process_config(config, config_data):
""" Populates config with data from the configuration data dict. It handles
components, data, log, management and session sections from the
configuration data.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param config_data: The configuration data loaded from a configuration
file.
"""
if 'components' in config_data:
process_components_config_section(config, config_data['components'])
if 'data' in config_data:
process_data_config_section(config, config_data['data'])
if 'log' in config_data:
process_log_config_section(config, config_data['log'])
if 'management' in config_data:
process_management_config_section(config, config_data['management'])
if 'session' in config_data:
process_session_config_section(config, config_data['session']) | Populates config with data from the configuration data dict. It handles
components, data, log, management and session sections from the
configuration data.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param config_data: The configuration data loaded from a configuration
file. | Below is the the instruction that describes the task:
### Input:
Populates config with data from the configuration data dict. It handles
components, data, log, management and session sections from the
configuration data.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param config_data: The configuration data loaded from a configuration
file.
### Response:
def process_config(config, config_data):
""" Populates config with data from the configuration data dict. It handles
components, data, log, management and session sections from the
configuration data.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param config_data: The configuration data loaded from a configuration
file.
"""
if 'components' in config_data:
process_components_config_section(config, config_data['components'])
if 'data' in config_data:
process_data_config_section(config, config_data['data'])
if 'log' in config_data:
process_log_config_section(config, config_data['log'])
if 'management' in config_data:
process_management_config_section(config, config_data['management'])
if 'session' in config_data:
process_session_config_section(config, config_data['session']) |
def schema(self, shex: Optional[Union[str, ShExJ.Schema]]) -> None:
""" Set the schema to be used. Schema can either be a ShExC or ShExJ string or a pre-parsed schema.
:param shex: Schema
"""
self.pfx = None
if shex is not None:
if isinstance(shex, ShExJ.Schema):
self._schema = shex
else:
shext = shex.strip()
loader = SchemaLoader()
if ('\n' in shex or '\r' in shex) or shext[0] in '#<_: ':
self._schema = loader.loads(shex)
else:
self._schema = loader.load(shex) if isinstance(shex, str) else shex
if self._schema is None:
raise ValueError("Unable to parse shex file")
self.pfx = PrefixLibrary(loader.schema_text) | Set the schema to be used. Schema can either be a ShExC or ShExJ string or a pre-parsed schema.
:param shex: Schema | Below is the the instruction that describes the task:
### Input:
Set the schema to be used. Schema can either be a ShExC or ShExJ string or a pre-parsed schema.
:param shex: Schema
### Response:
def schema(self, shex: Optional[Union[str, ShExJ.Schema]]) -> None:
""" Set the schema to be used. Schema can either be a ShExC or ShExJ string or a pre-parsed schema.
:param shex: Schema
"""
self.pfx = None
if shex is not None:
if isinstance(shex, ShExJ.Schema):
self._schema = shex
else:
shext = shex.strip()
loader = SchemaLoader()
if ('\n' in shex or '\r' in shex) or shext[0] in '#<_: ':
self._schema = loader.loads(shex)
else:
self._schema = loader.load(shex) if isinstance(shex, str) else shex
if self._schema is None:
raise ValueError("Unable to parse shex file")
self.pfx = PrefixLibrary(loader.schema_text) |
def load_default_templates(self):
"""Load the default templates"""
for importer, modname, is_pkg in pkgutil.iter_modules(templates.__path__):
self.register_template('.'.join((templates.__name__, modname))) | Load the default templates | Below is the the instruction that describes the task:
### Input:
Load the default templates
### Response:
def load_default_templates(self):
"""Load the default templates"""
for importer, modname, is_pkg in pkgutil.iter_modules(templates.__path__):
self.register_template('.'.join((templates.__name__, modname))) |
def iter_stack_frames(frames=None):
"""
Given an optional list of frames (defaults to current stack),
iterates over all frames that do not contain the ``__traceback_hide__``
local variable.
"""
if not frames:
frames = inspect.stack()[1:]
for frame, lineno in ((f[0], f[2]) for f in reversed(frames)):
f_locals = getattr(frame, 'f_locals', {})
if not _getitem_from_frame(f_locals, '__traceback_hide__'):
yield frame, lineno | Given an optional list of frames (defaults to current stack),
iterates over all frames that do not contain the ``__traceback_hide__``
local variable. | Below is the the instruction that describes the task:
### Input:
Given an optional list of frames (defaults to current stack),
iterates over all frames that do not contain the ``__traceback_hide__``
local variable.
### Response:
def iter_stack_frames(frames=None):
"""
Given an optional list of frames (defaults to current stack),
iterates over all frames that do not contain the ``__traceback_hide__``
local variable.
"""
if not frames:
frames = inspect.stack()[1:]
for frame, lineno in ((f[0], f[2]) for f in reversed(frames)):
f_locals = getattr(frame, 'f_locals', {})
if not _getitem_from_frame(f_locals, '__traceback_hide__'):
yield frame, lineno |
def get_ws_subscriptions(self, apiname):
"""Returns the websocket subscriptions"""
try:
return self.services_by_name\
.get(apiname)\
.get("subscriptions")\
.copy()
except AttributeError:
raise Exception(f"Couldn't find the websocket subscriptions") | Returns the websocket subscriptions | Below is the the instruction that describes the task:
### Input:
Returns the websocket subscriptions
### Response:
def get_ws_subscriptions(self, apiname):
"""Returns the websocket subscriptions"""
try:
return self.services_by_name\
.get(apiname)\
.get("subscriptions")\
.copy()
except AttributeError:
raise Exception(f"Couldn't find the websocket subscriptions") |
def points(self):
""" returns a pointer to the points as a numpy object """
vtk_data = self.GetPoints().GetData()
arr = vtk_to_numpy(vtk_data)
return vtki_ndarray(arr, vtk_data) | returns a pointer to the points as a numpy object | Below is the the instruction that describes the task:
### Input:
returns a pointer to the points as a numpy object
### Response:
def points(self):
""" returns a pointer to the points as a numpy object """
vtk_data = self.GetPoints().GetData()
arr = vtk_to_numpy(vtk_data)
return vtki_ndarray(arr, vtk_data) |
def addch(window, y, x, ch, attr):
"""
Curses addch() method that fixes a major bug in python 3.4.
See http://bugs.python.org/issue21088
"""
if sys.version_info[:3] == (3, 4, 0):
y, x = x, y
window.addch(y, x, ch, attr) | Curses addch() method that fixes a major bug in python 3.4.
See http://bugs.python.org/issue21088 | Below is the the instruction that describes the task:
### Input:
Curses addch() method that fixes a major bug in python 3.4.
See http://bugs.python.org/issue21088
### Response:
def addch(window, y, x, ch, attr):
"""
Curses addch() method that fixes a major bug in python 3.4.
See http://bugs.python.org/issue21088
"""
if sys.version_info[:3] == (3, 4, 0):
y, x = x, y
window.addch(y, x, ch, attr) |
def impersonate(self, user, enterprise):
""" Impersonate a user in a enterprise
Args:
user: the name of the user to impersonate
enterprise: the name of the enterprise where to use impersonation
"""
if not user or not enterprise:
raise ValueError('You must set a user name and an enterprise name to begin impersonification')
self._is_impersonating = True
self._impersonation = "%s@%s" % (user, enterprise) | Impersonate a user in a enterprise
Args:
user: the name of the user to impersonate
enterprise: the name of the enterprise where to use impersonation | Below is the the instruction that describes the task:
### Input:
Impersonate a user in a enterprise
Args:
user: the name of the user to impersonate
enterprise: the name of the enterprise where to use impersonation
### Response:
def impersonate(self, user, enterprise):
""" Impersonate a user in a enterprise
Args:
user: the name of the user to impersonate
enterprise: the name of the enterprise where to use impersonation
"""
if not user or not enterprise:
raise ValueError('You must set a user name and an enterprise name to begin impersonification')
self._is_impersonating = True
self._impersonation = "%s@%s" % (user, enterprise) |
def generate_token(self, *args):
""" Convert a list of integers or strings, specified by ``*args``, into an encrypted, timestamped, and signed token.
Note: strings may not contain any ``'|'`` characters, nor start with a ``'~'`` character
as these are used as separators and integer indicators for encoding.
Example:
::
# Combine User ID with last 8 bytes of their password
# to invalidate tokens when passwords change.
user_id = user.id
password_ends_with = user.password[-8:0]
token = token_manager.generate_token(user_id, password_ends_with)
"""
concatenated_str = self.encode_data_items(*args)
token = self.encrypt_string(concatenated_str)
return token | Convert a list of integers or strings, specified by ``*args``, into an encrypted, timestamped, and signed token.
Note: strings may not contain any ``'|'`` characters, nor start with a ``'~'`` character
as these are used as separators and integer indicators for encoding.
Example:
::
# Combine User ID with last 8 bytes of their password
# to invalidate tokens when passwords change.
user_id = user.id
password_ends_with = user.password[-8:0]
token = token_manager.generate_token(user_id, password_ends_with) | Below is the the instruction that describes the task:
### Input:
Convert a list of integers or strings, specified by ``*args``, into an encrypted, timestamped, and signed token.
Note: strings may not contain any ``'|'`` characters, nor start with a ``'~'`` character
as these are used as separators and integer indicators for encoding.
Example:
::
# Combine User ID with last 8 bytes of their password
# to invalidate tokens when passwords change.
user_id = user.id
password_ends_with = user.password[-8:0]
token = token_manager.generate_token(user_id, password_ends_with)
### Response:
def generate_token(self, *args):
""" Convert a list of integers or strings, specified by ``*args``, into an encrypted, timestamped, and signed token.
Note: strings may not contain any ``'|'`` characters, nor start with a ``'~'`` character
as these are used as separators and integer indicators for encoding.
Example:
::
# Combine User ID with last 8 bytes of their password
# to invalidate tokens when passwords change.
user_id = user.id
password_ends_with = user.password[-8:0]
token = token_manager.generate_token(user_id, password_ends_with)
"""
concatenated_str = self.encode_data_items(*args)
token = self.encrypt_string(concatenated_str)
return token |
def _begin(self, connection, filterargs=(), escape=True):
"""
Begins an asynchronous search and returns the message id to retrieve
the results.
filterargs is an object that will be used for expansion of the filter
string. If escape is True, values in filterargs will be escaped.
"""
if escape:
filterargs = self._escape_filterargs(filterargs)
try:
filterstr = self.filterstr % filterargs
msgid = connection.search(force_str(self.base_dn),
self.scope, force_str(filterstr))
except ldap.LDAPError as e:
msgid = None
logger.error(u"search('%s', %d, '%s') raised %s" %
(self.base_dn, self.scope, filterstr, pprint.pformat(e)))
return msgid | Begins an asynchronous search and returns the message id to retrieve
the results.
filterargs is an object that will be used for expansion of the filter
string. If escape is True, values in filterargs will be escaped. | Below is the the instruction that describes the task:
### Input:
Begins an asynchronous search and returns the message id to retrieve
the results.
filterargs is an object that will be used for expansion of the filter
string. If escape is True, values in filterargs will be escaped.
### Response:
def _begin(self, connection, filterargs=(), escape=True):
"""
Begins an asynchronous search and returns the message id to retrieve
the results.
filterargs is an object that will be used for expansion of the filter
string. If escape is True, values in filterargs will be escaped.
"""
if escape:
filterargs = self._escape_filterargs(filterargs)
try:
filterstr = self.filterstr % filterargs
msgid = connection.search(force_str(self.base_dn),
self.scope, force_str(filterstr))
except ldap.LDAPError as e:
msgid = None
logger.error(u"search('%s', %d, '%s') raised %s" %
(self.base_dn, self.scope, filterstr, pprint.pformat(e)))
return msgid |
def multi_segment(annotation, sr=22050, length=None, **kwargs):
'''Sonify multi-level segmentations'''
# Pentatonic scale, because why not
PENT = [1, 32./27, 4./3, 3./2, 16./9]
DURATION = 0.1
h_int, _ = hierarchy_flatten(annotation)
if length is None:
length = int(sr * (max(np.max(_) for _ in h_int) + 1. / DURATION) + 1)
y = 0.0
for ints, (oc, scale) in zip(h_int, product(range(3, 3 + len(h_int)),
PENT)):
click = mkclick(440.0 * scale * oc, sr=sr, duration=DURATION)
y = y + filter_kwargs(mir_eval.sonify.clicks,
np.unique(ints),
fs=sr, length=length,
click=click)
return y | Sonify multi-level segmentations | Below is the the instruction that describes the task:
### Input:
Sonify multi-level segmentations
### Response:
def multi_segment(annotation, sr=22050, length=None, **kwargs):
'''Sonify multi-level segmentations'''
# Pentatonic scale, because why not
PENT = [1, 32./27, 4./3, 3./2, 16./9]
DURATION = 0.1
h_int, _ = hierarchy_flatten(annotation)
if length is None:
length = int(sr * (max(np.max(_) for _ in h_int) + 1. / DURATION) + 1)
y = 0.0
for ints, (oc, scale) in zip(h_int, product(range(3, 3 + len(h_int)),
PENT)):
click = mkclick(440.0 * scale * oc, sr=sr, duration=DURATION)
y = y + filter_kwargs(mir_eval.sonify.clicks,
np.unique(ints),
fs=sr, length=length,
click=click)
return y |
def merge(config):
"""Merge the current branch into master."""
repo = config.repo
active_branch = repo.active_branch
if active_branch.name == "master":
error_out("You're already on the master branch.")
if repo.is_dirty():
error_out(
'Repo is "dirty". ({})'.format(
", ".join([repr(x.b_path) for x in repo.index.diff(None)])
)
)
branch_name = active_branch.name
state = read(config.configfile)
origin_name = state.get("ORIGIN_NAME", "origin")
upstream_remote = None
for remote in repo.remotes:
if remote.name == origin_name:
upstream_remote = remote
break
if not upstream_remote:
error_out("No remote called {!r} found".format(origin_name))
repo.heads.master.checkout()
upstream_remote.pull(repo.heads.master)
repo.git.merge(branch_name)
repo.git.branch("-d", branch_name)
success_out("Branch {!r} deleted.".format(branch_name))
info_out("NOW, you might want to run:\n")
info_out("git push origin master\n\n")
push_for_you = input("Run that push? [Y/n] ").lower().strip() != "n"
if push_for_you:
upstream_remote.push("master")
success_out("Current master pushed to {}".format(upstream_remote.name)) | Merge the current branch into master. | Below is the the instruction that describes the task:
### Input:
Merge the current branch into master.
### Response:
def merge(config):
"""Merge the current branch into master."""
repo = config.repo
active_branch = repo.active_branch
if active_branch.name == "master":
error_out("You're already on the master branch.")
if repo.is_dirty():
error_out(
'Repo is "dirty". ({})'.format(
", ".join([repr(x.b_path) for x in repo.index.diff(None)])
)
)
branch_name = active_branch.name
state = read(config.configfile)
origin_name = state.get("ORIGIN_NAME", "origin")
upstream_remote = None
for remote in repo.remotes:
if remote.name == origin_name:
upstream_remote = remote
break
if not upstream_remote:
error_out("No remote called {!r} found".format(origin_name))
repo.heads.master.checkout()
upstream_remote.pull(repo.heads.master)
repo.git.merge(branch_name)
repo.git.branch("-d", branch_name)
success_out("Branch {!r} deleted.".format(branch_name))
info_out("NOW, you might want to run:\n")
info_out("git push origin master\n\n")
push_for_you = input("Run that push? [Y/n] ").lower().strip() != "n"
if push_for_you:
upstream_remote.push("master")
success_out("Current master pushed to {}".format(upstream_remote.name)) |
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section names.
The order matches that of the 'sections' object.
"""
stats = self._list_fields()
options = self._get_current_options()
section_titles = []
sections = []
observation_columns = set(self.observation_data_column_names)
not_needed = set([self.user_id,
self.item_id,
self.target])
num_obs_fields = len(observation_columns.difference(not_needed))
user_features = self.user_side_data_column_names
item_features = self.item_side_data_column_names
section_titles.append("Schema")
schema_fields = [
('User ID', 'user_id'),
('Item ID', 'item_id'),
('Target', 'target'),
('Additional observation features', _precomputed_field(num_obs_fields)),
('User side features', _precomputed_field(user_features)),
('Item side features', _precomputed_field(item_features))]
sections.append(schema_fields)
data_fields = [
('Number of observations', 'num_observations'),
('Number of users', 'num_users'),
('Number of items', 'num_items')]
section_titles.append("Statistics")
sections.append(data_fields)
training_fields = [
('Training time', 'training_time')]
if 'data_load_elapsed_time' in stats:
training_fields.append(('Data load time',
'data_load_elapsed_time'))
if 'validation_metrics_elapsed_time' in stats:
training_fields.append(('Validation metrics time',
'validation_metrics_elapsed_time'))
section_titles.append("Training summary")
sections.append(training_fields)
# Remove any options that should not be shown under "Settings"
to_ignore = ['random_seed',
'user_id',
'item_id',
'target']
for k in to_ignore:
if k in options:
del options[k]
def add_ordered_options(name, ordered_options, additional = []):
option_fields = []
for k, v in additional:
option_fields.append((k, _precomputed_field(v)))
for k in ordered_options:
if k in options:
option_fields.append((k, _precomputed_field(options[k])))
del options[k]
if option_fields:
section_titles.append(name)
sections.append(option_fields)
# Put in a number of things in order, if applicable.
# Model parameters
model_parameter_options = [
"only_top_k",
"threshold",
"num_factors",
"binary_target",
"side_data_factorization",
"solver",
"nmf",
"max_iterations",
"similarity_type",
"training_method"]
add_ordered_options("Model Parameters", model_parameter_options,
[("Model class", self.__class__.__name__)])
# Regularization type options
regularization_options = [
"regularization",
"regularization_type",
"linear_regularization",
"ranking_regularization",
"unobserved_rating_value",
"num_sampled_negative_examples",
"ials_confidence_scaling_type",
"ials_confidence_scaling_factor"]
add_ordered_options("Regularization Settings", regularization_options)
# Optimization stuff
optimization_settings = [
"init_random_sigma",
"sgd_convergence_interval",
"sgd_convergence_threshold",
"sgd_max_trial_iterations",
"sgd_sampling_block_size",
"sgd_step_adjustment_interval",
"sgd_step_size",
"sgd_trial_sample_minimum_size",
"sgd_trial_sample_proportion",
"step_size_decrease_rate",
"additional_iterations_if_unhealthy",
"adagrad_momentum_weighting",
"num_tempering_iterations",
"tempering_regularization_start_value",
"track_exact_loss"]
add_ordered_options("Optimization Settings", optimization_settings)
# clean up
option_fields = []
for k, v in _six.iteritems(options):
option_fields.append((k, _precomputed_field(v)))
if option_fields:
section_titles.append("Other Settings")
sections.append(option_fields)
return (sections, section_titles) | Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section names.
The order matches that of the 'sections' object. | Below is the the instruction that describes the task:
### Input:
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section names.
The order matches that of the 'sections' object.
### Response:
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section names.
The order matches that of the 'sections' object.
"""
stats = self._list_fields()
options = self._get_current_options()
section_titles = []
sections = []
observation_columns = set(self.observation_data_column_names)
not_needed = set([self.user_id,
self.item_id,
self.target])
num_obs_fields = len(observation_columns.difference(not_needed))
user_features = self.user_side_data_column_names
item_features = self.item_side_data_column_names
section_titles.append("Schema")
schema_fields = [
('User ID', 'user_id'),
('Item ID', 'item_id'),
('Target', 'target'),
('Additional observation features', _precomputed_field(num_obs_fields)),
('User side features', _precomputed_field(user_features)),
('Item side features', _precomputed_field(item_features))]
sections.append(schema_fields)
data_fields = [
('Number of observations', 'num_observations'),
('Number of users', 'num_users'),
('Number of items', 'num_items')]
section_titles.append("Statistics")
sections.append(data_fields)
training_fields = [
('Training time', 'training_time')]
if 'data_load_elapsed_time' in stats:
training_fields.append(('Data load time',
'data_load_elapsed_time'))
if 'validation_metrics_elapsed_time' in stats:
training_fields.append(('Validation metrics time',
'validation_metrics_elapsed_time'))
section_titles.append("Training summary")
sections.append(training_fields)
# Remove any options that should not be shown under "Settings"
to_ignore = ['random_seed',
'user_id',
'item_id',
'target']
for k in to_ignore:
if k in options:
del options[k]
def add_ordered_options(name, ordered_options, additional = []):
option_fields = []
for k, v in additional:
option_fields.append((k, _precomputed_field(v)))
for k in ordered_options:
if k in options:
option_fields.append((k, _precomputed_field(options[k])))
del options[k]
if option_fields:
section_titles.append(name)
sections.append(option_fields)
# Put in a number of things in order, if applicable.
# Model parameters
model_parameter_options = [
"only_top_k",
"threshold",
"num_factors",
"binary_target",
"side_data_factorization",
"solver",
"nmf",
"max_iterations",
"similarity_type",
"training_method"]
add_ordered_options("Model Parameters", model_parameter_options,
[("Model class", self.__class__.__name__)])
# Regularization type options
regularization_options = [
"regularization",
"regularization_type",
"linear_regularization",
"ranking_regularization",
"unobserved_rating_value",
"num_sampled_negative_examples",
"ials_confidence_scaling_type",
"ials_confidence_scaling_factor"]
add_ordered_options("Regularization Settings", regularization_options)
# Optimization stuff
optimization_settings = [
"init_random_sigma",
"sgd_convergence_interval",
"sgd_convergence_threshold",
"sgd_max_trial_iterations",
"sgd_sampling_block_size",
"sgd_step_adjustment_interval",
"sgd_step_size",
"sgd_trial_sample_minimum_size",
"sgd_trial_sample_proportion",
"step_size_decrease_rate",
"additional_iterations_if_unhealthy",
"adagrad_momentum_weighting",
"num_tempering_iterations",
"tempering_regularization_start_value",
"track_exact_loss"]
add_ordered_options("Optimization Settings", optimization_settings)
# clean up
option_fields = []
for k, v in _six.iteritems(options):
option_fields.append((k, _precomputed_field(v)))
if option_fields:
section_titles.append("Other Settings")
sections.append(option_fields)
return (sections, section_titles) |
def offset(self, position):
"""Offset of given position from stranded start of this locus.
For example, if a Locus goes from 10..20 and is on the negative strand,
then the offset of position 13 is 7, whereas if the Locus is on the
positive strand, then the offset is 3.
"""
if position > self.end or position < self.start:
raise ValueError(
"Position %d outside valid range %d..%d of %s" % (
position, self.start, self.end, self))
elif self.on_forward_strand:
return position - self.start
else:
return self.end - position | Offset of given position from stranded start of this locus.
For example, if a Locus goes from 10..20 and is on the negative strand,
then the offset of position 13 is 7, whereas if the Locus is on the
positive strand, then the offset is 3. | Below is the the instruction that describes the task:
### Input:
Offset of given position from stranded start of this locus.
For example, if a Locus goes from 10..20 and is on the negative strand,
then the offset of position 13 is 7, whereas if the Locus is on the
positive strand, then the offset is 3.
### Response:
def offset(self, position):
"""Offset of given position from stranded start of this locus.
For example, if a Locus goes from 10..20 and is on the negative strand,
then the offset of position 13 is 7, whereas if the Locus is on the
positive strand, then the offset is 3.
"""
if position > self.end or position < self.start:
raise ValueError(
"Position %d outside valid range %d..%d of %s" % (
position, self.start, self.end, self))
elif self.on_forward_strand:
return position - self.start
else:
return self.end - position |
def _ConsumeAnyTypeUrl(self, tokenizer):
"""Consumes a google.protobuf.Any type URL and returns the type name."""
# Consume "type.googleapis.com/".
tokenizer.ConsumeIdentifier()
tokenizer.Consume('.')
tokenizer.ConsumeIdentifier()
tokenizer.Consume('.')
tokenizer.ConsumeIdentifier()
tokenizer.Consume('/')
# Consume the fully-qualified type name.
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
return '.'.join(name) | Consumes a google.protobuf.Any type URL and returns the type name. | Below is the the instruction that describes the task:
### Input:
Consumes a google.protobuf.Any type URL and returns the type name.
### Response:
def _ConsumeAnyTypeUrl(self, tokenizer):
"""Consumes a google.protobuf.Any type URL and returns the type name."""
# Consume "type.googleapis.com/".
tokenizer.ConsumeIdentifier()
tokenizer.Consume('.')
tokenizer.ConsumeIdentifier()
tokenizer.Consume('.')
tokenizer.ConsumeIdentifier()
tokenizer.Consume('/')
# Consume the fully-qualified type name.
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
return '.'.join(name) |
def flattened_labels(self):
"""
Return a sequence of tuples, each containing the flattened hierarchy
of category labels for a leaf category. Each tuple is in parent ->
child order, e.g. ``('US', 'CA', 'San Francisco')``, with the leaf
category appearing last. If this categories collection is
non-hierarchical, each tuple will contain only a leaf category label.
If the plot has no series (and therefore no categories), an empty
tuple is returned.
"""
cat = self._xChart.cat
if cat is None:
return ()
if cat.multiLvlStrRef is None:
return tuple([(category.label,) for category in self])
return tuple(
[tuple([category.label for category in reversed(flat_cat)])
for flat_cat in self._iter_flattened_categories()]
) | Return a sequence of tuples, each containing the flattened hierarchy
of category labels for a leaf category. Each tuple is in parent ->
child order, e.g. ``('US', 'CA', 'San Francisco')``, with the leaf
category appearing last. If this categories collection is
non-hierarchical, each tuple will contain only a leaf category label.
If the plot has no series (and therefore no categories), an empty
tuple is returned. | Below is the the instruction that describes the task:
### Input:
Return a sequence of tuples, each containing the flattened hierarchy
of category labels for a leaf category. Each tuple is in parent ->
child order, e.g. ``('US', 'CA', 'San Francisco')``, with the leaf
category appearing last. If this categories collection is
non-hierarchical, each tuple will contain only a leaf category label.
If the plot has no series (and therefore no categories), an empty
tuple is returned.
### Response:
def flattened_labels(self):
"""
Return a sequence of tuples, each containing the flattened hierarchy
of category labels for a leaf category. Each tuple is in parent ->
child order, e.g. ``('US', 'CA', 'San Francisco')``, with the leaf
category appearing last. If this categories collection is
non-hierarchical, each tuple will contain only a leaf category label.
If the plot has no series (and therefore no categories), an empty
tuple is returned.
"""
cat = self._xChart.cat
if cat is None:
return ()
if cat.multiLvlStrRef is None:
return tuple([(category.label,) for category in self])
return tuple(
[tuple([category.label for category in reversed(flat_cat)])
for flat_cat in self._iter_flattened_categories()]
) |
def get_max_ballcount(cls, ball_diam, rolling_radius, min_gap=0.):
"""
The maximum number of balls given ``rolling_radius`` and ``ball_diam``
:param min_gap: minimum gap between balls (measured along vector between
spherical centers)
:type min_gap: :class:`float`
:return: maximum ball count
:rtype: :class:`int`
"""
min_arc = asin(((ball_diam + min_gap) / 2) / rolling_radius) * 2
return int((2 * pi) / min_arc) | The maximum number of balls given ``rolling_radius`` and ``ball_diam``
:param min_gap: minimum gap between balls (measured along vector between
spherical centers)
:type min_gap: :class:`float`
:return: maximum ball count
:rtype: :class:`int` | Below is the the instruction that describes the task:
### Input:
The maximum number of balls given ``rolling_radius`` and ``ball_diam``
:param min_gap: minimum gap between balls (measured along vector between
spherical centers)
:type min_gap: :class:`float`
:return: maximum ball count
:rtype: :class:`int`
### Response:
def get_max_ballcount(cls, ball_diam, rolling_radius, min_gap=0.):
"""
The maximum number of balls given ``rolling_radius`` and ``ball_diam``
:param min_gap: minimum gap between balls (measured along vector between
spherical centers)
:type min_gap: :class:`float`
:return: maximum ball count
:rtype: :class:`int`
"""
min_arc = asin(((ball_diam + min_gap) / 2) / rolling_radius) * 2
return int((2 * pi) / min_arc) |
def get_event_sort_key(self, event_type):
"""Return the process sort key"""
# Process sort depending on alert type
if event_type.startswith("MEM"):
# Sort TOP process by memory_percent
ret = 'memory_percent'
elif event_type.startswith("CPU_IOWAIT"):
# Sort TOP process by io_counters (only for Linux OS)
ret = 'io_counters'
else:
# Default sort is...
ret = 'cpu_percent'
return ret | Return the process sort key | Below is the the instruction that describes the task:
### Input:
Return the process sort key
### Response:
def get_event_sort_key(self, event_type):
"""Return the process sort key"""
# Process sort depending on alert type
if event_type.startswith("MEM"):
# Sort TOP process by memory_percent
ret = 'memory_percent'
elif event_type.startswith("CPU_IOWAIT"):
# Sort TOP process by io_counters (only for Linux OS)
ret = 'io_counters'
else:
# Default sort is...
ret = 'cpu_percent'
return ret |
def windchill(temperature, speed, face_level_winds=False, mask_undefined=True):
r"""Calculate the Wind Chill Temperature Index (WCTI).
Calculates WCTI from the current temperature and wind speed using the formula
outlined by the FCM [FCMR192003]_.
Specifically, these formulas assume that wind speed is measured at
10m. If, instead, the speeds are measured at face level, the winds
need to be multiplied by a factor of 1.5 (this can be done by specifying
`face_level_winds` as `True`.)
Parameters
----------
temperature : `pint.Quantity`
The air temperature
speed : `pint.Quantity`
The wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where wind chill is undefined masked. These are values where
the temperature > 50F or wind speed <= 3 miles per hour. Defaults
to `True`.
Returns
-------
`pint.Quantity`
The corresponding Wind Chill Temperature Index value(s)
See Also
--------
heat_index
"""
# Correct for lower height measurement of winds if necessary
if face_level_winds:
# No in-place so that we copy
# noinspection PyAugmentAssignment
speed = speed * 1.5
temp_limit, speed_limit = 10. * units.degC, 3 * units.mph
speed_factor = speed.to('km/hr').magnitude ** 0.16
wcti = units.Quantity((0.6215 + 0.3965 * speed_factor) * temperature.to('degC').magnitude
- 11.37 * speed_factor + 13.12, units.degC).to(temperature.units)
# See if we need to mask any undefined values
if mask_undefined:
mask = np.array((temperature > temp_limit) | (speed <= speed_limit))
if mask.any():
wcti = masked_array(wcti, mask=mask)
return wcti | r"""Calculate the Wind Chill Temperature Index (WCTI).
Calculates WCTI from the current temperature and wind speed using the formula
outlined by the FCM [FCMR192003]_.
Specifically, these formulas assume that wind speed is measured at
10m. If, instead, the speeds are measured at face level, the winds
need to be multiplied by a factor of 1.5 (this can be done by specifying
`face_level_winds` as `True`.)
Parameters
----------
temperature : `pint.Quantity`
The air temperature
speed : `pint.Quantity`
The wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where wind chill is undefined masked. These are values where
the temperature > 50F or wind speed <= 3 miles per hour. Defaults
to `True`.
Returns
-------
`pint.Quantity`
The corresponding Wind Chill Temperature Index value(s)
See Also
--------
heat_index | Below is the the instruction that describes the task:
### Input:
r"""Calculate the Wind Chill Temperature Index (WCTI).
Calculates WCTI from the current temperature and wind speed using the formula
outlined by the FCM [FCMR192003]_.
Specifically, these formulas assume that wind speed is measured at
10m. If, instead, the speeds are measured at face level, the winds
need to be multiplied by a factor of 1.5 (this can be done by specifying
`face_level_winds` as `True`.)
Parameters
----------
temperature : `pint.Quantity`
The air temperature
speed : `pint.Quantity`
The wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where wind chill is undefined masked. These are values where
the temperature > 50F or wind speed <= 3 miles per hour. Defaults
to `True`.
Returns
-------
`pint.Quantity`
The corresponding Wind Chill Temperature Index value(s)
See Also
--------
heat_index
### Response:
def windchill(temperature, speed, face_level_winds=False, mask_undefined=True):
r"""Calculate the Wind Chill Temperature Index (WCTI).
Calculates WCTI from the current temperature and wind speed using the formula
outlined by the FCM [FCMR192003]_.
Specifically, these formulas assume that wind speed is measured at
10m. If, instead, the speeds are measured at face level, the winds
need to be multiplied by a factor of 1.5 (this can be done by specifying
`face_level_winds` as `True`.)
Parameters
----------
temperature : `pint.Quantity`
The air temperature
speed : `pint.Quantity`
The wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where wind chill is undefined masked. These are values where
the temperature > 50F or wind speed <= 3 miles per hour. Defaults
to `True`.
Returns
-------
`pint.Quantity`
The corresponding Wind Chill Temperature Index value(s)
See Also
--------
heat_index
"""
# Correct for lower height measurement of winds if necessary
if face_level_winds:
# No in-place so that we copy
# noinspection PyAugmentAssignment
speed = speed * 1.5
temp_limit, speed_limit = 10. * units.degC, 3 * units.mph
speed_factor = speed.to('km/hr').magnitude ** 0.16
wcti = units.Quantity((0.6215 + 0.3965 * speed_factor) * temperature.to('degC').magnitude
- 11.37 * speed_factor + 13.12, units.degC).to(temperature.units)
# See if we need to mask any undefined values
if mask_undefined:
mask = np.array((temperature > temp_limit) | (speed <= speed_limit))
if mask.any():
wcti = masked_array(wcti, mask=mask)
return wcti |
def _handle(self, msg):
"""
Pass a received message to the registered handlers.
:param msg: received message
:type msg: :class:`fatbotslim.irc.Message`
"""
def handler_yielder():
for handler in self.handlers:
yield handler
def handler_callback(_):
if msg.propagate:
try:
h = hyielder.next()
g = self._pool.spawn(handler_runner, h)
g.link(handler_callback)
except StopIteration:
pass
def handler_runner(h):
for command in h.commands:
if command == msg.command:
method = getattr(h, h.commands[command])
method(msg)
hyielder = handler_yielder()
try:
next_handler = hyielder.next()
g = self._pool.spawn(handler_runner, next_handler)
g.link(handler_callback)
except StopIteration:
pass | Pass a received message to the registered handlers.
:param msg: received message
:type msg: :class:`fatbotslim.irc.Message` | Below is the the instruction that describes the task:
### Input:
Pass a received message to the registered handlers.
:param msg: received message
:type msg: :class:`fatbotslim.irc.Message`
### Response:
def _handle(self, msg):
"""
Pass a received message to the registered handlers.
:param msg: received message
:type msg: :class:`fatbotslim.irc.Message`
"""
def handler_yielder():
for handler in self.handlers:
yield handler
def handler_callback(_):
if msg.propagate:
try:
h = hyielder.next()
g = self._pool.spawn(handler_runner, h)
g.link(handler_callback)
except StopIteration:
pass
def handler_runner(h):
for command in h.commands:
if command == msg.command:
method = getattr(h, h.commands[command])
method(msg)
hyielder = handler_yielder()
try:
next_handler = hyielder.next()
g = self._pool.spawn(handler_runner, next_handler)
g.link(handler_callback)
except StopIteration:
pass |
def shrink(script, iterations=1):
""" Shrink (erode, reduce) the current set of selected faces
Args:
script: the FilterScript object or script filename to write
the filter to.
iterations (int): the number of times to shrink the selection.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
filter_xml = ' <filter name="Erode Selection"/>\n'
for _ in range(iterations):
util.write_filter(script, filter_xml)
return None | Shrink (erode, reduce) the current set of selected faces
Args:
script: the FilterScript object or script filename to write
the filter to.
iterations (int): the number of times to shrink the selection.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA | Below is the the instruction that describes the task:
### Input:
Shrink (erode, reduce) the current set of selected faces
Args:
script: the FilterScript object or script filename to write
the filter to.
iterations (int): the number of times to shrink the selection.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
### Response:
def shrink(script, iterations=1):
""" Shrink (erode, reduce) the current set of selected faces
Args:
script: the FilterScript object or script filename to write
the filter to.
iterations (int): the number of times to shrink the selection.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
filter_xml = ' <filter name="Erode Selection"/>\n'
for _ in range(iterations):
util.write_filter(script, filter_xml)
return None |
def scrub(data, units=False):
"""
For input data [w,f,e] or [w,f] returns the list with NaN, negative, and zero flux
(and corresponding wavelengths and errors) removed.
"""
units = [i.unit if hasattr(i, 'unit') else 1 for i in data]
data = [np.asarray(i.value if hasattr(i, 'unit') else i, dtype=np.float32) for i in data if
isinstance(i, np.ndarray)]
data = [i[np.where(~np.isinf(data[1]))] for i in data]
data = [i[np.where(np.logical_and(data[1] > 0, ~np.isnan(data[1])))] for i in data]
data = [i[np.unique(data[0], return_index=True)[1]] for i in data]
return [i[np.lexsort([data[0]])] * Q for i, Q in zip(data, units)] if units else [i[np.lexsort([data[0]])] for i in
data] | For input data [w,f,e] or [w,f] returns the list with NaN, negative, and zero flux
(and corresponding wavelengths and errors) removed. | Below is the the instruction that describes the task:
### Input:
For input data [w,f,e] or [w,f] returns the list with NaN, negative, and zero flux
(and corresponding wavelengths and errors) removed.
### Response:
def scrub(data, units=False):
"""
For input data [w,f,e] or [w,f] returns the list with NaN, negative, and zero flux
(and corresponding wavelengths and errors) removed.
"""
units = [i.unit if hasattr(i, 'unit') else 1 for i in data]
data = [np.asarray(i.value if hasattr(i, 'unit') else i, dtype=np.float32) for i in data if
isinstance(i, np.ndarray)]
data = [i[np.where(~np.isinf(data[1]))] for i in data]
data = [i[np.where(np.logical_and(data[1] > 0, ~np.isnan(data[1])))] for i in data]
data = [i[np.unique(data[0], return_index=True)[1]] for i in data]
return [i[np.lexsort([data[0]])] * Q for i, Q in zip(data, units)] if units else [i[np.lexsort([data[0]])] for i in
data] |
def _as_dict(self, r):
"""Convert the record to a dictionary using field names as keys."""
d = dict()
for i, f in enumerate(self._field_names):
d[f] = r[i] if i < len(r) else None
return d | Convert the record to a dictionary using field names as keys. | Below is the the instruction that describes the task:
### Input:
Convert the record to a dictionary using field names as keys.
### Response:
def _as_dict(self, r):
"""Convert the record to a dictionary using field names as keys."""
d = dict()
for i, f in enumerate(self._field_names):
d[f] = r[i] if i < len(r) else None
return d |
def init():
'''
Get an sqlite3 connection, and initialize the package database if necessary
'''
if not os.path.exists(__opts__['spm_cache_dir']):
log.debug('Creating SPM cache directory at %s', __opts__['spm_db'])
os.makedirs(__opts__['spm_cache_dir'])
if not os.path.exists(__opts__['spm_db']):
log.debug('Creating new package database at %s', __opts__['spm_db'])
sqlite3.enable_callback_tracebacks(True)
conn = sqlite3.connect(__opts__['spm_db'], isolation_level=None)
try:
conn.execute('SELECT count(*) FROM packages')
except OperationalError:
conn.execute('''CREATE TABLE packages (
package text,
version text,
release text,
installed text,
os text,
os_family text,
dependencies text,
os_dependencies text,
os_family_dependencies text,
summary text,
description text
)''')
try:
conn.execute('SELECT count(*) FROM files')
except OperationalError:
conn.execute('''CREATE TABLE files (
package text,
path text,
size real,
mode text,
sum text,
major text,
minor text,
linkname text,
linkpath text,
uname text,
gname text,
mtime text
)''')
return conn | Get an sqlite3 connection, and initialize the package database if necessary | Below is the the instruction that describes the task:
### Input:
Get an sqlite3 connection, and initialize the package database if necessary
### Response:
def init():
'''
Get an sqlite3 connection, and initialize the package database if necessary
'''
if not os.path.exists(__opts__['spm_cache_dir']):
log.debug('Creating SPM cache directory at %s', __opts__['spm_db'])
os.makedirs(__opts__['spm_cache_dir'])
if not os.path.exists(__opts__['spm_db']):
log.debug('Creating new package database at %s', __opts__['spm_db'])
sqlite3.enable_callback_tracebacks(True)
conn = sqlite3.connect(__opts__['spm_db'], isolation_level=None)
try:
conn.execute('SELECT count(*) FROM packages')
except OperationalError:
conn.execute('''CREATE TABLE packages (
package text,
version text,
release text,
installed text,
os text,
os_family text,
dependencies text,
os_dependencies text,
os_family_dependencies text,
summary text,
description text
)''')
try:
conn.execute('SELECT count(*) FROM files')
except OperationalError:
conn.execute('''CREATE TABLE files (
package text,
path text,
size real,
mode text,
sum text,
major text,
minor text,
linkname text,
linkpath text,
uname text,
gname text,
mtime text
)''')
return conn |
def get_row_dict(self, row_idx):
""" Return a dictionary representation for a matrix row
:param row_idx: which row
:return: a dict of feature keys/values, not including ones which are the default value
"""
try:
row = self._rows[row_idx]
except TypeError:
row = self._rows[self._row_name_idx[row_idx]]
if isinstance(row, dict):
return row
else:
if row_idx not in self._row_memo:
self._row_memo[row_idx] = dict((self._column_name_list[idx], v) for idx, v in enumerate(row) if v != self._default_value)
return self._row_memo[row_idx] | Return a dictionary representation for a matrix row
:param row_idx: which row
:return: a dict of feature keys/values, not including ones which are the default value | Below is the the instruction that describes the task:
### Input:
Return a dictionary representation for a matrix row
:param row_idx: which row
:return: a dict of feature keys/values, not including ones which are the default value
### Response:
def get_row_dict(self, row_idx):
""" Return a dictionary representation for a matrix row
:param row_idx: which row
:return: a dict of feature keys/values, not including ones which are the default value
"""
try:
row = self._rows[row_idx]
except TypeError:
row = self._rows[self._row_name_idx[row_idx]]
if isinstance(row, dict):
return row
else:
if row_idx not in self._row_memo:
self._row_memo[row_idx] = dict((self._column_name_list[idx], v) for idx, v in enumerate(row) if v != self._default_value)
return self._row_memo[row_idx] |
def fdot(x, y):
"""Algorithm 5.10. Dot product algorithm in K-fold working precision,
K >= 3.
"""
xx = x.reshape(-1, x.shape[-1])
yy = y.reshape(y.shape[0], -1)
xx = numpy.ascontiguousarray(xx)
yy = numpy.ascontiguousarray(yy)
r = _accupy.kdot_helper(xx, yy).reshape((-1,) + x.shape[:-1] + y.shape[1:])
return fsum(r) | Algorithm 5.10. Dot product algorithm in K-fold working precision,
K >= 3. | Below is the the instruction that describes the task:
### Input:
Algorithm 5.10. Dot product algorithm in K-fold working precision,
K >= 3.
### Response:
def fdot(x, y):
"""Algorithm 5.10. Dot product algorithm in K-fold working precision,
K >= 3.
"""
xx = x.reshape(-1, x.shape[-1])
yy = y.reshape(y.shape[0], -1)
xx = numpy.ascontiguousarray(xx)
yy = numpy.ascontiguousarray(yy)
r = _accupy.kdot_helper(xx, yy).reshape((-1,) + x.shape[:-1] + y.shape[1:])
return fsum(r) |
def getBriefAndDetailedRST(textRoot, node):
'''
Given an input ``node``, return a tuple of strings where the first element of
the return is the ``brief`` description and the second is the ``detailed``
description.
.. todo:: actually document this
'''
node_xml_contents = utils.nodeCompoundXMLContents(node)
if not node_xml_contents:
return "", ""
try:
node_soup = BeautifulSoup(node_xml_contents, "lxml-xml")
except:
utils.fancyError("Unable to parse [{0}] xml using BeautifulSoup".format(node.name))
try:
# In the file xml definitions, things such as enums or defines are listed inside
# of <sectiondef> tags, which may have some nested <briefdescription> or
# <detaileddescription> tags. So as long as we make sure not to search
# recursively, then the following will extract the file descriptions only
# process the brief description if provided
brief = node_soup.doxygen.compounddef.find_all("briefdescription", recursive=False)
brief_desc = ""
if len(brief) == 1:
brief = brief[0]
# Empty descriptions will usually get parsed as a single newline, which we
# want to ignore ;)
if not brief.get_text().isspace():
brief_desc = convertDescriptionToRST(textRoot, node, brief, None)
# process the detailed description if provided
detailed = node_soup.doxygen.compounddef.find_all("detaileddescription", recursive=False)
detailed_desc = ""
if len(detailed) == 1:
detailed = detailed[0]
if not detailed.get_text().isspace():
detailed_desc = convertDescriptionToRST(textRoot, node, detailed, "Detailed Description")
return brief_desc, detailed_desc
except:
utils.fancyError(
"Could not acquire soup.doxygen.compounddef; likely not a doxygen xml file."
) | Given an input ``node``, return a tuple of strings where the first element of
the return is the ``brief`` description and the second is the ``detailed``
description.
.. todo:: actually document this | Below is the the instruction that describes the task:
### Input:
Given an input ``node``, return a tuple of strings where the first element of
the return is the ``brief`` description and the second is the ``detailed``
description.
.. todo:: actually document this
### Response:
def getBriefAndDetailedRST(textRoot, node):
'''
Given an input ``node``, return a tuple of strings where the first element of
the return is the ``brief`` description and the second is the ``detailed``
description.
.. todo:: actually document this
'''
node_xml_contents = utils.nodeCompoundXMLContents(node)
if not node_xml_contents:
return "", ""
try:
node_soup = BeautifulSoup(node_xml_contents, "lxml-xml")
except:
utils.fancyError("Unable to parse [{0}] xml using BeautifulSoup".format(node.name))
try:
# In the file xml definitions, things such as enums or defines are listed inside
# of <sectiondef> tags, which may have some nested <briefdescription> or
# <detaileddescription> tags. So as long as we make sure not to search
# recursively, then the following will extract the file descriptions only
# process the brief description if provided
brief = node_soup.doxygen.compounddef.find_all("briefdescription", recursive=False)
brief_desc = ""
if len(brief) == 1:
brief = brief[0]
# Empty descriptions will usually get parsed as a single newline, which we
# want to ignore ;)
if not brief.get_text().isspace():
brief_desc = convertDescriptionToRST(textRoot, node, brief, None)
# process the detailed description if provided
detailed = node_soup.doxygen.compounddef.find_all("detaileddescription", recursive=False)
detailed_desc = ""
if len(detailed) == 1:
detailed = detailed[0]
if not detailed.get_text().isspace():
detailed_desc = convertDescriptionToRST(textRoot, node, detailed, "Detailed Description")
return brief_desc, detailed_desc
except:
utils.fancyError(
"Could not acquire soup.doxygen.compounddef; likely not a doxygen xml file."
) |
def store_state(node, reaching, defined, stack):
"""Push the final state of the primal onto the stack for the adjoint.
Python's scoping rules make it possible for variables to not be defined in
certain blocks based on the control flow path taken at runtime. In order to
make sure we don't try to push non-existing variables onto the stack, we
defined these variables explicitly (by assigning `None` to them) at the
beginning of the function.
All the variables that reach the return statement are pushed onto the
stack, and in the adjoint they are popped off in reverse order.
Args:
node: A module with the primal and adjoint function definitions as returned
by `reverse_ad`.
reaching: The variable definitions that reach the end of the primal.
defined: The variables defined at the end of the primal.
stack: The stack node to use for storing and restoring state.
Returns:
node: A node with the requisite pushes and pops added to make sure that
state is transferred between primal and adjoint split motion calls.
"""
defs = [def_ for def_ in reaching if not isinstance(def_[1], gast.arguments)]
if not len(defs):
return node
reaching, original_defs = zip(*defs)
# Explicitly define variables that might or might not be in scope at the end
assignments = []
for id_ in set(reaching) - defined:
assignments.append(quoting.quote('{} = None'.format(id_)))
# Store variables at the end of the function and restore them
store = []
load = []
for id_, def_ in zip(reaching, original_defs):
# If the original definition of a value that we need to store
# was an initialization as a stack, then we should be using `push_stack`
# to store its state, and `pop_stack` to restore it. This allows
# us to avoid doing any `add_grad` calls on the stack, which result
# in type errors in unoptimized mode (they are usually elided
# after calling `dead_code_elimination`).
if isinstance(
def_, gast.Assign) and 'tangent.Stack()' in quoting.unquote(def_.value):
push, pop, op_id = get_push_pop_stack()
else:
push, pop, op_id = get_push_pop()
store.append(
template.replace(
'push(_stack, val, op_id)',
push=push,
val=id_,
_stack=stack,
op_id=op_id))
load.append(
template.replace(
'val = pop(_stack, op_id)',
pop=pop,
val=id_,
_stack=stack,
op_id=op_id))
body, return_ = node.body[0].body[:-1], node.body[0].body[-1]
node.body[0].body = assignments + body + store + [return_]
node.body[1].body = load[::-1] + node.body[1].body
return node | Push the final state of the primal onto the stack for the adjoint.
Python's scoping rules make it possible for variables to not be defined in
certain blocks based on the control flow path taken at runtime. In order to
make sure we don't try to push non-existing variables onto the stack, we
defined these variables explicitly (by assigning `None` to them) at the
beginning of the function.
All the variables that reach the return statement are pushed onto the
stack, and in the adjoint they are popped off in reverse order.
Args:
node: A module with the primal and adjoint function definitions as returned
by `reverse_ad`.
reaching: The variable definitions that reach the end of the primal.
defined: The variables defined at the end of the primal.
stack: The stack node to use for storing and restoring state.
Returns:
node: A node with the requisite pushes and pops added to make sure that
state is transferred between primal and adjoint split motion calls. | Below is the the instruction that describes the task:
### Input:
Push the final state of the primal onto the stack for the adjoint.
Python's scoping rules make it possible for variables to not be defined in
certain blocks based on the control flow path taken at runtime. In order to
make sure we don't try to push non-existing variables onto the stack, we
defined these variables explicitly (by assigning `None` to them) at the
beginning of the function.
All the variables that reach the return statement are pushed onto the
stack, and in the adjoint they are popped off in reverse order.
Args:
node: A module with the primal and adjoint function definitions as returned
by `reverse_ad`.
reaching: The variable definitions that reach the end of the primal.
defined: The variables defined at the end of the primal.
stack: The stack node to use for storing and restoring state.
Returns:
node: A node with the requisite pushes and pops added to make sure that
state is transferred between primal and adjoint split motion calls.
### Response:
def store_state(node, reaching, defined, stack):
"""Push the final state of the primal onto the stack for the adjoint.
Python's scoping rules make it possible for variables to not be defined in
certain blocks based on the control flow path taken at runtime. In order to
make sure we don't try to push non-existing variables onto the stack, we
defined these variables explicitly (by assigning `None` to them) at the
beginning of the function.
All the variables that reach the return statement are pushed onto the
stack, and in the adjoint they are popped off in reverse order.
Args:
node: A module with the primal and adjoint function definitions as returned
by `reverse_ad`.
reaching: The variable definitions that reach the end of the primal.
defined: The variables defined at the end of the primal.
stack: The stack node to use for storing and restoring state.
Returns:
node: A node with the requisite pushes and pops added to make sure that
state is transferred between primal and adjoint split motion calls.
"""
defs = [def_ for def_ in reaching if not isinstance(def_[1], gast.arguments)]
if not len(defs):
return node
reaching, original_defs = zip(*defs)
# Explicitly define variables that might or might not be in scope at the end
assignments = []
for id_ in set(reaching) - defined:
assignments.append(quoting.quote('{} = None'.format(id_)))
# Store variables at the end of the function and restore them
store = []
load = []
for id_, def_ in zip(reaching, original_defs):
# If the original definition of a value that we need to store
# was an initialization as a stack, then we should be using `push_stack`
# to store its state, and `pop_stack` to restore it. This allows
# us to avoid doing any `add_grad` calls on the stack, which result
# in type errors in unoptimized mode (they are usually elided
# after calling `dead_code_elimination`).
if isinstance(
def_, gast.Assign) and 'tangent.Stack()' in quoting.unquote(def_.value):
push, pop, op_id = get_push_pop_stack()
else:
push, pop, op_id = get_push_pop()
store.append(
template.replace(
'push(_stack, val, op_id)',
push=push,
val=id_,
_stack=stack,
op_id=op_id))
load.append(
template.replace(
'val = pop(_stack, op_id)',
pop=pop,
val=id_,
_stack=stack,
op_id=op_id))
body, return_ = node.body[0].body[:-1], node.body[0].body[-1]
node.body[0].body = assignments + body + store + [return_]
node.body[1].body = load[::-1] + node.body[1].body
return node |
def suspend_to_background(self, suspend_group=True):
"""
(Not thread safe -- to be called from inside the key bindings.)
Suspend process.
:param suspend_group: When true, suspend the whole process group.
(This is the default, and probably what you want.)
"""
# Only suspend when the opperating system supports it.
# (Not on Windows.)
if hasattr(signal, 'SIGTSTP'):
def run():
# Send `SIGSTP` to own process.
# This will cause it to suspend.
# Usually we want the whole process group to be suspended. This
# handles the case when input is piped from another process.
if suspend_group:
os.kill(0, signal.SIGTSTP)
else:
os.kill(os.getpid(), signal.SIGTSTP)
self.run_in_terminal(run) | (Not thread safe -- to be called from inside the key bindings.)
Suspend process.
:param suspend_group: When true, suspend the whole process group.
(This is the default, and probably what you want.) | Below is the the instruction that describes the task:
### Input:
(Not thread safe -- to be called from inside the key bindings.)
Suspend process.
:param suspend_group: When true, suspend the whole process group.
(This is the default, and probably what you want.)
### Response:
def suspend_to_background(self, suspend_group=True):
"""
(Not thread safe -- to be called from inside the key bindings.)
Suspend process.
:param suspend_group: When true, suspend the whole process group.
(This is the default, and probably what you want.)
"""
# Only suspend when the opperating system supports it.
# (Not on Windows.)
if hasattr(signal, 'SIGTSTP'):
def run():
# Send `SIGSTP` to own process.
# This will cause it to suspend.
# Usually we want the whole process group to be suspended. This
# handles the case when input is piped from another process.
if suspend_group:
os.kill(0, signal.SIGTSTP)
else:
os.kill(os.getpid(), signal.SIGTSTP)
self.run_in_terminal(run) |
def load_hdf(cls, filename, path='', name=None):
"""
A class method to load a saved StarModel from an HDF5 file.
File must have been created by a call to :func:`StarModel.save_hdf`.
:param filename:
H5 file to load.
:param path: (optional)
Path within HDF file.
:return:
:class:`StarModel` object.
"""
if not os.path.exists(filename):
raise IOError('{} does not exist.'.format(filename))
store = pd.HDFStore(filename)
try:
samples = store[path+'/samples']
attrs = store.get_storer(path+'/samples').attrs
except:
store.close()
raise
try:
ic = attrs.ic_type(attrs.ic_bands)
except AttributeError:
ic = attrs.ic_type
use_emcee = attrs.use_emcee
mnest = True
try:
basename = attrs._mnest_basename
except AttributeError:
mnest = False
bounds = attrs._bounds
priors = attrs._priors
if name is None:
try:
name = attrs.name
except:
name = ''
store.close()
obs = ObservationTree.load_hdf(filename, path+'/obs', ic=ic)
mod = cls(ic, obs=obs,
use_emcee=use_emcee, name=name)
mod._samples = samples
if mnest:
mod._mnest_basename = basename
mod._directory = os.path.dirname(filename)
return mod | A class method to load a saved StarModel from an HDF5 file.
File must have been created by a call to :func:`StarModel.save_hdf`.
:param filename:
H5 file to load.
:param path: (optional)
Path within HDF file.
:return:
:class:`StarModel` object. | Below is the the instruction that describes the task:
### Input:
A class method to load a saved StarModel from an HDF5 file.
File must have been created by a call to :func:`StarModel.save_hdf`.
:param filename:
H5 file to load.
:param path: (optional)
Path within HDF file.
:return:
:class:`StarModel` object.
### Response:
def load_hdf(cls, filename, path='', name=None):
"""
A class method to load a saved StarModel from an HDF5 file.
File must have been created by a call to :func:`StarModel.save_hdf`.
:param filename:
H5 file to load.
:param path: (optional)
Path within HDF file.
:return:
:class:`StarModel` object.
"""
if not os.path.exists(filename):
raise IOError('{} does not exist.'.format(filename))
store = pd.HDFStore(filename)
try:
samples = store[path+'/samples']
attrs = store.get_storer(path+'/samples').attrs
except:
store.close()
raise
try:
ic = attrs.ic_type(attrs.ic_bands)
except AttributeError:
ic = attrs.ic_type
use_emcee = attrs.use_emcee
mnest = True
try:
basename = attrs._mnest_basename
except AttributeError:
mnest = False
bounds = attrs._bounds
priors = attrs._priors
if name is None:
try:
name = attrs.name
except:
name = ''
store.close()
obs = ObservationTree.load_hdf(filename, path+'/obs', ic=ic)
mod = cls(ic, obs=obs,
use_emcee=use_emcee, name=name)
mod._samples = samples
if mnest:
mod._mnest_basename = basename
mod._directory = os.path.dirname(filename)
return mod |
async def profile(self):
"""|coro|
Gets the user's profile.
.. note::
This only applies to non-bot accounts.
Raises
-------
Forbidden
Not allowed to fetch profiles.
HTTPException
Fetching the profile failed.
Returns
--------
:class:`Profile`
The profile of the user.
"""
state = self._state
data = await state.http.get_user_profile(self.id)
def transform(d):
return state._get_guild(int(d['id']))
since = data.get('premium_since')
mutual_guilds = list(filter(None, map(transform, data.get('mutual_guilds', []))))
return Profile(flags=data['user'].get('flags', 0),
premium_since=parse_time(since),
mutual_guilds=mutual_guilds,
user=self,
connected_accounts=data['connected_accounts']) | |coro|
Gets the user's profile.
.. note::
This only applies to non-bot accounts.
Raises
-------
Forbidden
Not allowed to fetch profiles.
HTTPException
Fetching the profile failed.
Returns
--------
:class:`Profile`
The profile of the user. | Below is the the instruction that describes the task:
### Input:
|coro|
Gets the user's profile.
.. note::
This only applies to non-bot accounts.
Raises
-------
Forbidden
Not allowed to fetch profiles.
HTTPException
Fetching the profile failed.
Returns
--------
:class:`Profile`
The profile of the user.
### Response:
async def profile(self):
"""|coro|
Gets the user's profile.
.. note::
This only applies to non-bot accounts.
Raises
-------
Forbidden
Not allowed to fetch profiles.
HTTPException
Fetching the profile failed.
Returns
--------
:class:`Profile`
The profile of the user.
"""
state = self._state
data = await state.http.get_user_profile(self.id)
def transform(d):
return state._get_guild(int(d['id']))
since = data.get('premium_since')
mutual_guilds = list(filter(None, map(transform, data.get('mutual_guilds', []))))
return Profile(flags=data['user'].get('flags', 0),
premium_since=parse_time(since),
mutual_guilds=mutual_guilds,
user=self,
connected_accounts=data['connected_accounts']) |
def fire_mixing(ys=None, FLs=None): # pragma: no cover
'''
Crowl, Daniel A., and Joseph F. Louvar. Chemical Process Safety:
Fundamentals with Applications. 2E. Upper Saddle River, N.J:
Prentice Hall, 2001.
>>> fire_mixing(ys=normalize([0.0024, 0.0061, 0.0015]), FLs=[.012, .053, .031])
0.02751172136637643
>>> fire_mixing(ys=normalize([0.0024, 0.0061, 0.0015]), FLs=[.075, .15, .32])
0.12927551844869378
'''
return 1./sum([yi/FLi for yi, FLi in zip(ys, FLs)]) | Crowl, Daniel A., and Joseph F. Louvar. Chemical Process Safety:
Fundamentals with Applications. 2E. Upper Saddle River, N.J:
Prentice Hall, 2001.
>>> fire_mixing(ys=normalize([0.0024, 0.0061, 0.0015]), FLs=[.012, .053, .031])
0.02751172136637643
>>> fire_mixing(ys=normalize([0.0024, 0.0061, 0.0015]), FLs=[.075, .15, .32])
0.12927551844869378 | Below is the the instruction that describes the task:
### Input:
Crowl, Daniel A., and Joseph F. Louvar. Chemical Process Safety:
Fundamentals with Applications. 2E. Upper Saddle River, N.J:
Prentice Hall, 2001.
>>> fire_mixing(ys=normalize([0.0024, 0.0061, 0.0015]), FLs=[.012, .053, .031])
0.02751172136637643
>>> fire_mixing(ys=normalize([0.0024, 0.0061, 0.0015]), FLs=[.075, .15, .32])
0.12927551844869378
### Response:
def fire_mixing(ys=None, FLs=None): # pragma: no cover
'''
Crowl, Daniel A., and Joseph F. Louvar. Chemical Process Safety:
Fundamentals with Applications. 2E. Upper Saddle River, N.J:
Prentice Hall, 2001.
>>> fire_mixing(ys=normalize([0.0024, 0.0061, 0.0015]), FLs=[.012, .053, .031])
0.02751172136637643
>>> fire_mixing(ys=normalize([0.0024, 0.0061, 0.0015]), FLs=[.075, .15, .32])
0.12927551844869378
'''
return 1./sum([yi/FLi for yi, FLi in zip(ys, FLs)]) |
def create_factor(self, tool, sources, sink, alignment_node=None):
"""
Creates a factor. Instantiates a single tool for all of the plates, and connects the source and sink nodes with
that tool.
Note that the tool parameters these are currently fixed over a plate. For parameters that vary over a plate,
an extra input stream should be used
:param alignment_node:
:param tool: The tool to use. This is either an instantiated Tool object or a dict with "name" and "parameters"
:param sources: The source nodes
:param sink: The sink node
:return: The factor object
:type tool: Tool | dict
:type sources: list[Node] | tuple[Node] | None
:type sink: Node
:type alignment_node: Node | None
:rtype: Factor
"""
# if isinstance(tool, dict):
# tool = self.channels.get_tool(**tool)
if not isinstance(tool, BaseTool):
raise ValueError("Expected Tool, got {}".format(type(tool)))
if sink.plates:
if isinstance(tool, (AggregateTool, SelectorTool)):
if not sources or len(sources) > 2:
raise FactorDefinitionError("{} requires one or two source nodes".format(type(tool)))
if len(sources) == 2 and sources[0].plates:
raise FactorDefinitionError("{} requires the first source to have no plates".format(type(tool)))
if not sources[-1].plates:
raise FactorDefinitionError("{} source must live on a plate".format(type(tool)))
if len(sources[-1].plates) != 1:
# Make sure that there are exactly two plates that don't match: one from each side
diff, counts, is_sub_plate = sources[-1].difference(sink)
if counts == [1, 1]:
# TODO: This sub-plate selection is deprecated
if not is_sub_plate:
raise IncompatiblePlatesError("Sink plate is not a simplification of source plate")
else:
# If there are two plates, and one (or both) of them is a root plate, than assume that we are
# simplifying by removing that plate
if next(p.is_root for p in sources[-1].plates):
if len(sink.plates) != 1:
raise IncompatiblePlatesError(
"Multiple sink plates defined. "
"Did you intend a simplification of 2 source plates to a sink plate?")
if sink.plates[0] not in sources[-1].plates:
raise IncompatiblePlatesError(
"Source and sink plates do not match. "
"Did you intend a simplification of 2 source plates to a sink plate?")
else:
if len(sink.plates) > 1:
raise NotImplementedError
source_plates = sources[-1].plates
sink_plate = sink.plates[0]
if len(source_plates) != 2:
raise IncompatiblePlatesError(
"Sink plate is not a simplification of source plate (source must be 2 plates)")
plate_diff = set(source_plates).difference({sink_plate, })
if len(plate_diff) != 1:
raise IncompatiblePlatesError(
"Sink plate is not a simplification of source plate "
"(the number of plates in the set difference of source and sink is not 1")
plate_diff = list(plate_diff)[0]
if plate_diff.parent != sink_plate.parent:
raise IncompatiblePlatesError(
"Sink plate is not a simplification of source plate (parents do not match)")
else:
# Check if the parent plate is valid instead
source_plate = sources[-1].plates[0]
sink_plate = sink.plates[0]
error = self.check_plate_compatibility(tool, source_plate, sink_plate)
if error is not None:
raise IncompatiblePlatesError(error)
else:
if sources:
# Check that the plates are compatible
source_plates = list(itertools.chain(*(source.plate_ids for source in sources)))
for p in sink.plate_ids:
if p not in set(source_plates):
raise IncompatiblePlatesError("{} not in source plates".format(p))
for p in source_plates:
if p not in set(sink.plate_ids):
raise IncompatiblePlatesError("{} not in sink plates".format(p))
plates = sink.plates
else:
plates = None
factor = Factor(tool=tool, source_nodes=sources,
sink_node=sink, alignment_node=alignment_node,
plates=plates)
self._add_factor(factor)
return factor | Creates a factor. Instantiates a single tool for all of the plates, and connects the source and sink nodes with
that tool.
Note that the tool parameters these are currently fixed over a plate. For parameters that vary over a plate,
an extra input stream should be used
:param alignment_node:
:param tool: The tool to use. This is either an instantiated Tool object or a dict with "name" and "parameters"
:param sources: The source nodes
:param sink: The sink node
:return: The factor object
:type tool: Tool | dict
:type sources: list[Node] | tuple[Node] | None
:type sink: Node
:type alignment_node: Node | None
:rtype: Factor | Below is the the instruction that describes the task:
### Input:
Creates a factor. Instantiates a single tool for all of the plates, and connects the source and sink nodes with
that tool.
Note that the tool parameters these are currently fixed over a plate. For parameters that vary over a plate,
an extra input stream should be used
:param alignment_node:
:param tool: The tool to use. This is either an instantiated Tool object or a dict with "name" and "parameters"
:param sources: The source nodes
:param sink: The sink node
:return: The factor object
:type tool: Tool | dict
:type sources: list[Node] | tuple[Node] | None
:type sink: Node
:type alignment_node: Node | None
:rtype: Factor
### Response:
def create_factor(self, tool, sources, sink, alignment_node=None):
"""
Creates a factor. Instantiates a single tool for all of the plates, and connects the source and sink nodes with
that tool.
Note that the tool parameters these are currently fixed over a plate. For parameters that vary over a plate,
an extra input stream should be used
:param alignment_node:
:param tool: The tool to use. This is either an instantiated Tool object or a dict with "name" and "parameters"
:param sources: The source nodes
:param sink: The sink node
:return: The factor object
:type tool: Tool | dict
:type sources: list[Node] | tuple[Node] | None
:type sink: Node
:type alignment_node: Node | None
:rtype: Factor
"""
# if isinstance(tool, dict):
# tool = self.channels.get_tool(**tool)
if not isinstance(tool, BaseTool):
raise ValueError("Expected Tool, got {}".format(type(tool)))
if sink.plates:
if isinstance(tool, (AggregateTool, SelectorTool)):
if not sources or len(sources) > 2:
raise FactorDefinitionError("{} requires one or two source nodes".format(type(tool)))
if len(sources) == 2 and sources[0].plates:
raise FactorDefinitionError("{} requires the first source to have no plates".format(type(tool)))
if not sources[-1].plates:
raise FactorDefinitionError("{} source must live on a plate".format(type(tool)))
if len(sources[-1].plates) != 1:
# Make sure that there are exactly two plates that don't match: one from each side
diff, counts, is_sub_plate = sources[-1].difference(sink)
if counts == [1, 1]:
# TODO: This sub-plate selection is deprecated
if not is_sub_plate:
raise IncompatiblePlatesError("Sink plate is not a simplification of source plate")
else:
# If there are two plates, and one (or both) of them is a root plate, than assume that we are
# simplifying by removing that plate
if next(p.is_root for p in sources[-1].plates):
if len(sink.plates) != 1:
raise IncompatiblePlatesError(
"Multiple sink plates defined. "
"Did you intend a simplification of 2 source plates to a sink plate?")
if sink.plates[0] not in sources[-1].plates:
raise IncompatiblePlatesError(
"Source and sink plates do not match. "
"Did you intend a simplification of 2 source plates to a sink plate?")
else:
if len(sink.plates) > 1:
raise NotImplementedError
source_plates = sources[-1].plates
sink_plate = sink.plates[0]
if len(source_plates) != 2:
raise IncompatiblePlatesError(
"Sink plate is not a simplification of source plate (source must be 2 plates)")
plate_diff = set(source_plates).difference({sink_plate, })
if len(plate_diff) != 1:
raise IncompatiblePlatesError(
"Sink plate is not a simplification of source plate "
"(the number of plates in the set difference of source and sink is not 1")
plate_diff = list(plate_diff)[0]
if plate_diff.parent != sink_plate.parent:
raise IncompatiblePlatesError(
"Sink plate is not a simplification of source plate (parents do not match)")
else:
# Check if the parent plate is valid instead
source_plate = sources[-1].plates[0]
sink_plate = sink.plates[0]
error = self.check_plate_compatibility(tool, source_plate, sink_plate)
if error is not None:
raise IncompatiblePlatesError(error)
else:
if sources:
# Check that the plates are compatible
source_plates = list(itertools.chain(*(source.plate_ids for source in sources)))
for p in sink.plate_ids:
if p not in set(source_plates):
raise IncompatiblePlatesError("{} not in source plates".format(p))
for p in source_plates:
if p not in set(sink.plate_ids):
raise IncompatiblePlatesError("{} not in sink plates".format(p))
plates = sink.plates
else:
plates = None
factor = Factor(tool=tool, source_nodes=sources,
sink_node=sink, alignment_node=alignment_node,
plates=plates)
self._add_factor(factor)
return factor |
def _set_pwm(self, raw_values):
"""
Set pwm values on the controlled pins.
:param raw_values: Raw values to set (0-4095).
"""
for i in range(len(self._pins)):
self._device.set_pwm(self._pins[i], 0, raw_values[i]) | Set pwm values on the controlled pins.
:param raw_values: Raw values to set (0-4095). | Below is the the instruction that describes the task:
### Input:
Set pwm values on the controlled pins.
:param raw_values: Raw values to set (0-4095).
### Response:
def _set_pwm(self, raw_values):
"""
Set pwm values on the controlled pins.
:param raw_values: Raw values to set (0-4095).
"""
for i in range(len(self._pins)):
self._device.set_pwm(self._pins[i], 0, raw_values[i]) |
def ratio_and_percentage_with_time_remaining(current, total, time_remaining):
"""Returns the progress ratio, percentage and time remaining."""
return "{} / {} ({}% completed) (~{} remaining)".format(
current,
total,
int(current / total * 100),
time_remaining) | Returns the progress ratio, percentage and time remaining. | Below is the the instruction that describes the task:
### Input:
Returns the progress ratio, percentage and time remaining.
### Response:
def ratio_and_percentage_with_time_remaining(current, total, time_remaining):
"""Returns the progress ratio, percentage and time remaining."""
return "{} / {} ({}% completed) (~{} remaining)".format(
current,
total,
int(current / total * 100),
time_remaining) |
def _set_trigger_mode(self, v, load=False):
"""
Setter method for trigger_mode, mapped from YANG variable /rbridge_id/event_handler/activate/name/trigger_mode (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_trigger_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trigger_mode() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'each-instance': {'value': 1}, u'only-once': {'value': 3}, u'on-first-instance': {'value': 2}},), default=unicode("each-instance"), is_leaf=True, yang_name="trigger-mode", rest_name="trigger-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Trigger-mode controls how the action is launched with the configured event trigger (default = each-instance).'}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """trigger_mode must be of a type compatible with enumeration""",
'defined-type': "brocade-event-handler:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'each-instance': {'value': 1}, u'only-once': {'value': 3}, u'on-first-instance': {'value': 2}},), default=unicode("each-instance"), is_leaf=True, yang_name="trigger-mode", rest_name="trigger-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Trigger-mode controls how the action is launched with the configured event trigger (default = each-instance).'}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='enumeration', is_config=True)""",
})
self.__trigger_mode = t
if hasattr(self, '_set'):
self._set() | Setter method for trigger_mode, mapped from YANG variable /rbridge_id/event_handler/activate/name/trigger_mode (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_trigger_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trigger_mode() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for trigger_mode, mapped from YANG variable /rbridge_id/event_handler/activate/name/trigger_mode (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_trigger_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trigger_mode() directly.
### Response:
def _set_trigger_mode(self, v, load=False):
"""
Setter method for trigger_mode, mapped from YANG variable /rbridge_id/event_handler/activate/name/trigger_mode (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_trigger_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trigger_mode() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'each-instance': {'value': 1}, u'only-once': {'value': 3}, u'on-first-instance': {'value': 2}},), default=unicode("each-instance"), is_leaf=True, yang_name="trigger-mode", rest_name="trigger-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Trigger-mode controls how the action is launched with the configured event trigger (default = each-instance).'}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """trigger_mode must be of a type compatible with enumeration""",
'defined-type': "brocade-event-handler:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'each-instance': {'value': 1}, u'only-once': {'value': 3}, u'on-first-instance': {'value': 2}},), default=unicode("each-instance"), is_leaf=True, yang_name="trigger-mode", rest_name="trigger-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Trigger-mode controls how the action is launched with the configured event trigger (default = each-instance).'}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='enumeration', is_config=True)""",
})
self.__trigger_mode = t
if hasattr(self, '_set'):
self._set() |
def _deconstruct_url(self, url: str) -> List[str]:
"""
Split a regular URL into parts
:param url: A normalized URL
:return: Parts of the URL
:raises kua.routes.RouteError: \
If the depth of the URL exceeds\
the max depth of the deepest\
registered pattern
:private:
"""
parts = url.split('/', self._max_depth + 1)
if depth_of(parts) > self._max_depth:
raise RouteError('No match')
return parts | Split a regular URL into parts
:param url: A normalized URL
:return: Parts of the URL
:raises kua.routes.RouteError: \
If the depth of the URL exceeds\
the max depth of the deepest\
registered pattern
:private: | Below is the the instruction that describes the task:
### Input:
Split a regular URL into parts
:param url: A normalized URL
:return: Parts of the URL
:raises kua.routes.RouteError: \
If the depth of the URL exceeds\
the max depth of the deepest\
registered pattern
:private:
### Response:
def _deconstruct_url(self, url: str) -> List[str]:
"""
Split a regular URL into parts
:param url: A normalized URL
:return: Parts of the URL
:raises kua.routes.RouteError: \
If the depth of the URL exceeds\
the max depth of the deepest\
registered pattern
:private:
"""
parts = url.split('/', self._max_depth + 1)
if depth_of(parts) > self._max_depth:
raise RouteError('No match')
return parts |
def clearScreen(cls):
"""Clear the screen"""
if "win32" in sys.platform:
os.system('cls')
elif "linux" in sys.platform:
os.system('clear')
elif 'darwin' in sys.platform:
os.system('clear')
else:
cit.err("No clearScreen for " + sys.platform) | Clear the screen | Below is the the instruction that describes the task:
### Input:
Clear the screen
### Response:
def clearScreen(cls):
"""Clear the screen"""
if "win32" in sys.platform:
os.system('cls')
elif "linux" in sys.platform:
os.system('clear')
elif 'darwin' in sys.platform:
os.system('clear')
else:
cit.err("No clearScreen for " + sys.platform) |
def save_freesurfer_geometry(filename, obj, volume_info=None, create_stamp=None):
'''
save_mgh(filename, obj) saves the given object to the given filename in the mgh format and
returns the filename.
All options that can be given to the to_mgh function can also be passed to this function; they
are used to modify the object prior to exporting it.
'''
obj = geo.to_mesh(obj)
fsio.write_geometry(filename, obj.coordinates.T, obj.tess.faces.T,
volume_info=volume_info, create_stamp=create_stamp)
return filename | save_mgh(filename, obj) saves the given object to the given filename in the mgh format and
returns the filename.
All options that can be given to the to_mgh function can also be passed to this function; they
are used to modify the object prior to exporting it. | Below is the the instruction that describes the task:
### Input:
save_mgh(filename, obj) saves the given object to the given filename in the mgh format and
returns the filename.
All options that can be given to the to_mgh function can also be passed to this function; they
are used to modify the object prior to exporting it.
### Response:
def save_freesurfer_geometry(filename, obj, volume_info=None, create_stamp=None):
'''
save_mgh(filename, obj) saves the given object to the given filename in the mgh format and
returns the filename.
All options that can be given to the to_mgh function can also be passed to this function; they
are used to modify the object prior to exporting it.
'''
obj = geo.to_mesh(obj)
fsio.write_geometry(filename, obj.coordinates.T, obj.tess.faces.T,
volume_info=volume_info, create_stamp=create_stamp)
return filename |
def compile_graphql_to_gremlin(schema, graphql_string, type_equivalence_hints=None):
"""Compile the GraphQL input using the schema into a Gremlin query and associated metadata.
Args:
schema: GraphQL schema object describing the schema of the graph to be queried
graphql_string: the GraphQL query to compile to Gremlin, as a string
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
a CompilationResult object
"""
lowering_func = ir_lowering_gremlin.lower_ir
query_emitter_func = emit_gremlin.emit_code_from_ir
return _compile_graphql_generic(
GREMLIN_LANGUAGE, lowering_func, query_emitter_func,
schema, graphql_string, type_equivalence_hints, None) | Compile the GraphQL input using the schema into a Gremlin query and associated metadata.
Args:
schema: GraphQL schema object describing the schema of the graph to be queried
graphql_string: the GraphQL query to compile to Gremlin, as a string
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
a CompilationResult object | Below is the the instruction that describes the task:
### Input:
Compile the GraphQL input using the schema into a Gremlin query and associated metadata.
Args:
schema: GraphQL schema object describing the schema of the graph to be queried
graphql_string: the GraphQL query to compile to Gremlin, as a string
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
a CompilationResult object
### Response:
def compile_graphql_to_gremlin(schema, graphql_string, type_equivalence_hints=None):
"""Compile the GraphQL input using the schema into a Gremlin query and associated metadata.
Args:
schema: GraphQL schema object describing the schema of the graph to be queried
graphql_string: the GraphQL query to compile to Gremlin, as a string
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
a CompilationResult object
"""
lowering_func = ir_lowering_gremlin.lower_ir
query_emitter_func = emit_gremlin.emit_code_from_ir
return _compile_graphql_generic(
GREMLIN_LANGUAGE, lowering_func, query_emitter_func,
schema, graphql_string, type_equivalence_hints, None) |
def nms(dets, thresh):
"""
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
:param dets: [[x1, y1, x2, y2 score]]
:param thresh: retain overlap < thresh
:return: indexes to keep
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep | greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
:param dets: [[x1, y1, x2, y2 score]]
:param thresh: retain overlap < thresh
:return: indexes to keep | Below is the the instruction that describes the task:
### Input:
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
:param dets: [[x1, y1, x2, y2 score]]
:param thresh: retain overlap < thresh
:return: indexes to keep
### Response:
def nms(dets, thresh):
"""
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
:param dets: [[x1, y1, x2, y2 score]]
:param thresh: retain overlap < thresh
:return: indexes to keep
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep |
def external_dependencies(self):
"""
Return all the external images this Dockerfile will depend on
These are images from self.dependent_images that aren't defined in this configuration.
"""
found = []
for dep in self.dependent_images:
if isinstance(dep, six.string_types):
if dep not in found:
yield dep
found.append(dep) | Return all the external images this Dockerfile will depend on
These are images from self.dependent_images that aren't defined in this configuration. | Below is the the instruction that describes the task:
### Input:
Return all the external images this Dockerfile will depend on
These are images from self.dependent_images that aren't defined in this configuration.
### Response:
def external_dependencies(self):
"""
Return all the external images this Dockerfile will depend on
These are images from self.dependent_images that aren't defined in this configuration.
"""
found = []
for dep in self.dependent_images:
if isinstance(dep, six.string_types):
if dep not in found:
yield dep
found.append(dep) |
def var(nums, mean_func=amean, ddof=0):
r"""Calculate the variance.
The variance (:math:`\sigma^2`) of a series of numbers (:math:`x_i`) with
mean :math:`\mu` and population :math:`N` is:
:math:`\sigma^2 = \frac{1}{N}\sum_{i=1}^{N}(x_i-\mu)^2`.
Cf. https://en.wikipedia.org/wiki/Variance
Parameters
----------
nums : list
A series of numbers
mean_func : function
A mean function (amean by default)
ddof : int
The degrees of freedom (0 by default)
Returns
-------
float
The variance of the values in the series
Examples
--------
>>> var([1, 1, 1, 1])
0.0
>>> var([1, 2, 3, 4])
1.25
>>> round(var([1, 2, 3, 4], ddof=1), 12)
1.666666666667
"""
x_bar = mean_func(nums)
return sum((x - x_bar) ** 2 for x in nums) / (len(nums) - ddof) | r"""Calculate the variance.
The variance (:math:`\sigma^2`) of a series of numbers (:math:`x_i`) with
mean :math:`\mu` and population :math:`N` is:
:math:`\sigma^2 = \frac{1}{N}\sum_{i=1}^{N}(x_i-\mu)^2`.
Cf. https://en.wikipedia.org/wiki/Variance
Parameters
----------
nums : list
A series of numbers
mean_func : function
A mean function (amean by default)
ddof : int
The degrees of freedom (0 by default)
Returns
-------
float
The variance of the values in the series
Examples
--------
>>> var([1, 1, 1, 1])
0.0
>>> var([1, 2, 3, 4])
1.25
>>> round(var([1, 2, 3, 4], ddof=1), 12)
1.666666666667 | Below is the the instruction that describes the task:
### Input:
r"""Calculate the variance.
The variance (:math:`\sigma^2`) of a series of numbers (:math:`x_i`) with
mean :math:`\mu` and population :math:`N` is:
:math:`\sigma^2 = \frac{1}{N}\sum_{i=1}^{N}(x_i-\mu)^2`.
Cf. https://en.wikipedia.org/wiki/Variance
Parameters
----------
nums : list
A series of numbers
mean_func : function
A mean function (amean by default)
ddof : int
The degrees of freedom (0 by default)
Returns
-------
float
The variance of the values in the series
Examples
--------
>>> var([1, 1, 1, 1])
0.0
>>> var([1, 2, 3, 4])
1.25
>>> round(var([1, 2, 3, 4], ddof=1), 12)
1.666666666667
### Response:
def var(nums, mean_func=amean, ddof=0):
r"""Calculate the variance.
The variance (:math:`\sigma^2`) of a series of numbers (:math:`x_i`) with
mean :math:`\mu` and population :math:`N` is:
:math:`\sigma^2 = \frac{1}{N}\sum_{i=1}^{N}(x_i-\mu)^2`.
Cf. https://en.wikipedia.org/wiki/Variance
Parameters
----------
nums : list
A series of numbers
mean_func : function
A mean function (amean by default)
ddof : int
The degrees of freedom (0 by default)
Returns
-------
float
The variance of the values in the series
Examples
--------
>>> var([1, 1, 1, 1])
0.0
>>> var([1, 2, 3, 4])
1.25
>>> round(var([1, 2, 3, 4], ddof=1), 12)
1.666666666667
"""
x_bar = mean_func(nums)
return sum((x - x_bar) ** 2 for x in nums) / (len(nums) - ddof) |
def get_security_repository(self):
""" Security repository """
from .repositories import SecurityRepository
if not self.security_repo:
self.security_repo = SecurityRepository(self.session)
return self.security_repo | Security repository | Below is the the instruction that describes the task:
### Input:
Security repository
### Response:
def get_security_repository(self):
""" Security repository """
from .repositories import SecurityRepository
if not self.security_repo:
self.security_repo = SecurityRepository(self.session)
return self.security_repo |
def update_args(self, override_args):
"""Update the argument used to invoke the application
Note that this will also update the dictionary of input and output files.
Parameters
-----------
override_args : dict
Dictionary of arguments to override the current values
"""
self.args = extract_arguments(override_args, self.args)
self._latch_file_info()
scratch_dir = self.args.get('scratch', None)
if is_not_null(scratch_dir):
self._file_stage = FileStageManager(scratch_dir, '.') | Update the argument used to invoke the application
Note that this will also update the dictionary of input and output files.
Parameters
-----------
override_args : dict
Dictionary of arguments to override the current values | Below is the the instruction that describes the task:
### Input:
Update the argument used to invoke the application
Note that this will also update the dictionary of input and output files.
Parameters
-----------
override_args : dict
Dictionary of arguments to override the current values
### Response:
def update_args(self, override_args):
"""Update the argument used to invoke the application
Note that this will also update the dictionary of input and output files.
Parameters
-----------
override_args : dict
Dictionary of arguments to override the current values
"""
self.args = extract_arguments(override_args, self.args)
self._latch_file_info()
scratch_dir = self.args.get('scratch', None)
if is_not_null(scratch_dir):
self._file_stage = FileStageManager(scratch_dir, '.') |
def log_critical(msg, logger="TaskLogger"):
"""Log a CRITICAL message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
name : `str`, optional (default: "TaskLogger")
Name used to retrieve the unique TaskLogger
Returns
-------
logger : TaskLogger
"""
tasklogger = get_tasklogger(logger)
tasklogger.critical(msg)
return tasklogger | Log a CRITICAL message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
name : `str`, optional (default: "TaskLogger")
Name used to retrieve the unique TaskLogger
Returns
-------
logger : TaskLogger | Below is the the instruction that describes the task:
### Input:
Log a CRITICAL message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
name : `str`, optional (default: "TaskLogger")
Name used to retrieve the unique TaskLogger
Returns
-------
logger : TaskLogger
### Response:
def log_critical(msg, logger="TaskLogger"):
"""Log a CRITICAL message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
name : `str`, optional (default: "TaskLogger")
Name used to retrieve the unique TaskLogger
Returns
-------
logger : TaskLogger
"""
tasklogger = get_tasklogger(logger)
tasklogger.critical(msg)
return tasklogger |
def delete_model(self, model):
"""Ran when a model is being deleted."""
for mixin in self.post_processing_mixins:
mixin.delete_model(model)
super().delete_model(model) | Ran when a model is being deleted. | Below is the the instruction that describes the task:
### Input:
Ran when a model is being deleted.
### Response:
def delete_model(self, model):
"""Ran when a model is being deleted."""
for mixin in self.post_processing_mixins:
mixin.delete_model(model)
super().delete_model(model) |
def read_csv(filename):
"""Pull locations from a user's CSV file.
Read gpsbabel_'s CSV output format
.. _gpsbabel: http://www.gpsbabel.org/
Args:
filename (str): CSV file to parse
Returns:
tuple of dict and list: List of locations as ``str`` objects
"""
field_names = ('latitude', 'longitude', 'name')
data = utils.prepare_csv_read(filename, field_names, skipinitialspace=True)
locations = {}
args = []
for index, row in enumerate(data, 1):
name = '%02i:%s' % (index, row['name'])
locations[name] = (row['latitude'], row['longitude'])
args.append(name)
return locations, args | Pull locations from a user's CSV file.
Read gpsbabel_'s CSV output format
.. _gpsbabel: http://www.gpsbabel.org/
Args:
filename (str): CSV file to parse
Returns:
tuple of dict and list: List of locations as ``str`` objects | Below is the the instruction that describes the task:
### Input:
Pull locations from a user's CSV file.
Read gpsbabel_'s CSV output format
.. _gpsbabel: http://www.gpsbabel.org/
Args:
filename (str): CSV file to parse
Returns:
tuple of dict and list: List of locations as ``str`` objects
### Response:
def read_csv(filename):
"""Pull locations from a user's CSV file.
Read gpsbabel_'s CSV output format
.. _gpsbabel: http://www.gpsbabel.org/
Args:
filename (str): CSV file to parse
Returns:
tuple of dict and list: List of locations as ``str`` objects
"""
field_names = ('latitude', 'longitude', 'name')
data = utils.prepare_csv_read(filename, field_names, skipinitialspace=True)
locations = {}
args = []
for index, row in enumerate(data, 1):
name = '%02i:%s' % (index, row['name'])
locations[name] = (row['latitude'], row['longitude'])
args.append(name)
return locations, args |
def idx_name_to_num(L):
"""
Switch from index-by-name to index-by-number.
:param dict L: Metadata
:return dict: Modified metadata
"""
logger_jsons.info("enter idx_name_to_num")
# Process the paleoData section
if "paleoData" in L:
L["paleoData"] = _export_section(L["paleoData"], "paleo")
# Process the chronData section
if "chronData" in L:
L["chronData"] = _export_section(L["chronData"], "chron")
logger_jsons.info("exit idx_name_to_num")
return L | Switch from index-by-name to index-by-number.
:param dict L: Metadata
:return dict: Modified metadata | Below is the the instruction that describes the task:
### Input:
Switch from index-by-name to index-by-number.
:param dict L: Metadata
:return dict: Modified metadata
### Response:
def idx_name_to_num(L):
"""
Switch from index-by-name to index-by-number.
:param dict L: Metadata
:return dict: Modified metadata
"""
logger_jsons.info("enter idx_name_to_num")
# Process the paleoData section
if "paleoData" in L:
L["paleoData"] = _export_section(L["paleoData"], "paleo")
# Process the chronData section
if "chronData" in L:
L["chronData"] = _export_section(L["chronData"], "chron")
logger_jsons.info("exit idx_name_to_num")
return L |
def _attach_handler_events(self, handler, events=None):
"""
Search handler for methods named after events, attaching to event handlers as applicable.
:param object handler: Handler instance
:param list events: List of event names to look for. If not specified, will do all known event names.
"""
if not events:
events = self
for name in events:
meth = getattr(handler, name, None)
if meth:
self.events[name] += meth | Search handler for methods named after events, attaching to event handlers as applicable.
:param object handler: Handler instance
:param list events: List of event names to look for. If not specified, will do all known event names. | Below is the the instruction that describes the task:
### Input:
Search handler for methods named after events, attaching to event handlers as applicable.
:param object handler: Handler instance
:param list events: List of event names to look for. If not specified, will do all known event names.
### Response:
def _attach_handler_events(self, handler, events=None):
"""
Search handler for methods named after events, attaching to event handlers as applicable.
:param object handler: Handler instance
:param list events: List of event names to look for. If not specified, will do all known event names.
"""
if not events:
events = self
for name in events:
meth = getattr(handler, name, None)
if meth:
self.events[name] += meth |
def filesize(self):
"""
Lazy evaluation of start and end of logfile.
Returns None for stdin input currently.
"""
if self.from_stdin:
return None
if not self._filesize:
self._calculate_bounds()
return self._filesize | Lazy evaluation of start and end of logfile.
Returns None for stdin input currently. | Below is the the instruction that describes the task:
### Input:
Lazy evaluation of start and end of logfile.
Returns None for stdin input currently.
### Response:
def filesize(self):
"""
Lazy evaluation of start and end of logfile.
Returns None for stdin input currently.
"""
if self.from_stdin:
return None
if not self._filesize:
self._calculate_bounds()
return self._filesize |
def make_dirs_if_dont_exist(path):
""" Create directories in path if they do not exist """
if path[-1] not in ['/']: path += '/'
path = os.path.dirname(path)
if path != '':
try: os.makedirs(path)
except OSError: pass | Create directories in path if they do not exist | Below is the the instruction that describes the task:
### Input:
Create directories in path if they do not exist
### Response:
def make_dirs_if_dont_exist(path):
""" Create directories in path if they do not exist """
if path[-1] not in ['/']: path += '/'
path = os.path.dirname(path)
if path != '':
try: os.makedirs(path)
except OSError: pass |
def roman_to_int(roman_string):
"""
Converts a string of roman numbers into an integer.
.. code: python
reusables.roman_to_int("XXXVI")
# 36
:param roman_string: XVI or similar
:return: parsed integer
"""
roman_string = roman_string.upper().strip()
if "IIII" in roman_string:
raise ValueError("Malformed roman string")
value = 0
skip_one = False
last_number = None
for i, letter in enumerate(roman_string):
if letter not in _roman_dict:
raise ValueError("Malformed roman string")
if skip_one:
skip_one = False
continue
if i < (len(roman_string) - 1):
double_check = letter + roman_string[i + 1]
if double_check in _roman_dict:
if last_number and _roman_dict[double_check] > last_number:
raise ValueError("Malformed roman string")
last_number = _roman_dict[double_check]
value += _roman_dict[double_check]
skip_one = True
continue
if last_number and _roman_dict[letter] > last_number:
raise ValueError("Malformed roman string")
last_number = _roman_dict[letter]
value += _roman_dict[letter]
return value | Converts a string of roman numbers into an integer.
.. code: python
reusables.roman_to_int("XXXVI")
# 36
:param roman_string: XVI or similar
:return: parsed integer | Below is the the instruction that describes the task:
### Input:
Converts a string of roman numbers into an integer.
.. code: python
reusables.roman_to_int("XXXVI")
# 36
:param roman_string: XVI or similar
:return: parsed integer
### Response:
def roman_to_int(roman_string):
"""
Converts a string of roman numbers into an integer.
.. code: python
reusables.roman_to_int("XXXVI")
# 36
:param roman_string: XVI or similar
:return: parsed integer
"""
roman_string = roman_string.upper().strip()
if "IIII" in roman_string:
raise ValueError("Malformed roman string")
value = 0
skip_one = False
last_number = None
for i, letter in enumerate(roman_string):
if letter not in _roman_dict:
raise ValueError("Malformed roman string")
if skip_one:
skip_one = False
continue
if i < (len(roman_string) - 1):
double_check = letter + roman_string[i + 1]
if double_check in _roman_dict:
if last_number and _roman_dict[double_check] > last_number:
raise ValueError("Malformed roman string")
last_number = _roman_dict[double_check]
value += _roman_dict[double_check]
skip_one = True
continue
if last_number and _roman_dict[letter] > last_number:
raise ValueError("Malformed roman string")
last_number = _roman_dict[letter]
value += _roman_dict[letter]
return value |
def fill_heatmap(self):
"""Fills code heatmap and execution count dictionaries."""
for module_path, lineno, runtime in self.lines_without_stdlib:
self._execution_count[module_path][lineno] += 1
self._heatmap[module_path][lineno] += runtime | Fills code heatmap and execution count dictionaries. | Below is the the instruction that describes the task:
### Input:
Fills code heatmap and execution count dictionaries.
### Response:
def fill_heatmap(self):
"""Fills code heatmap and execution count dictionaries."""
for module_path, lineno, runtime in self.lines_without_stdlib:
self._execution_count[module_path][lineno] += 1
self._heatmap[module_path][lineno] += runtime |
def entry_point():
"""
External entry point which calls main() and
if Stop is raised, calls sys.exit()
"""
try:
main("omego", items=[
(InstallCommand.NAME, InstallCommand),
(UpgradeCommand.NAME, UpgradeCommand),
(ConvertCommand.NAME, ConvertCommand),
(DownloadCommand.NAME, DownloadCommand),
(DbCommand.NAME, DbCommand),
(Version.NAME, Version)])
except Stop, stop:
if stop.rc != 0:
print "ERROR:", stop
else:
print stop
sys.exit(stop.rc) | External entry point which calls main() and
if Stop is raised, calls sys.exit() | Below is the the instruction that describes the task:
### Input:
External entry point which calls main() and
if Stop is raised, calls sys.exit()
### Response:
def entry_point():
"""
External entry point which calls main() and
if Stop is raised, calls sys.exit()
"""
try:
main("omego", items=[
(InstallCommand.NAME, InstallCommand),
(UpgradeCommand.NAME, UpgradeCommand),
(ConvertCommand.NAME, ConvertCommand),
(DownloadCommand.NAME, DownloadCommand),
(DbCommand.NAME, DbCommand),
(Version.NAME, Version)])
except Stop, stop:
if stop.rc != 0:
print "ERROR:", stop
else:
print stop
sys.exit(stop.rc) |
def __set_status(self, value):
'''
Sets the status of the invoice.
@param value:str
'''
if value not in [INVOICE_DUE, INVOICE_PAID, INVOICE_CANCELED,
INVOICE_IRRECOVERABLE]:
raise ValueError("Invalid invoice status")
self.__status = value | Sets the status of the invoice.
@param value:str | Below is the the instruction that describes the task:
### Input:
Sets the status of the invoice.
@param value:str
### Response:
def __set_status(self, value):
'''
Sets the status of the invoice.
@param value:str
'''
if value not in [INVOICE_DUE, INVOICE_PAID, INVOICE_CANCELED,
INVOICE_IRRECOVERABLE]:
raise ValueError("Invalid invoice status")
self.__status = value |
def _build_late_dispatcher(func_name):
"""Return a function that calls method 'func_name' on objects.
This is useful for building late-bound dynamic dispatch.
Arguments:
func_name: The name of the instance method that should be called.
Returns:
A function that takes an 'obj' parameter, followed by *args and
returns the result of calling the instance method with the same
name as the contents of 'func_name' on the 'obj' object with the
arguments from *args.
"""
def _late_dynamic_dispatcher(obj, *args):
method = getattr(obj, func_name, None)
if not callable(method):
raise NotImplementedError(
"Instance method %r is not implemented by %r." % (
func_name, obj))
return method(*args)
return _late_dynamic_dispatcher | Return a function that calls method 'func_name' on objects.
This is useful for building late-bound dynamic dispatch.
Arguments:
func_name: The name of the instance method that should be called.
Returns:
A function that takes an 'obj' parameter, followed by *args and
returns the result of calling the instance method with the same
name as the contents of 'func_name' on the 'obj' object with the
arguments from *args. | Below is the the instruction that describes the task:
### Input:
Return a function that calls method 'func_name' on objects.
This is useful for building late-bound dynamic dispatch.
Arguments:
func_name: The name of the instance method that should be called.
Returns:
A function that takes an 'obj' parameter, followed by *args and
returns the result of calling the instance method with the same
name as the contents of 'func_name' on the 'obj' object with the
arguments from *args.
### Response:
def _build_late_dispatcher(func_name):
"""Return a function that calls method 'func_name' on objects.
This is useful for building late-bound dynamic dispatch.
Arguments:
func_name: The name of the instance method that should be called.
Returns:
A function that takes an 'obj' parameter, followed by *args and
returns the result of calling the instance method with the same
name as the contents of 'func_name' on the 'obj' object with the
arguments from *args.
"""
def _late_dynamic_dispatcher(obj, *args):
method = getattr(obj, func_name, None)
if not callable(method):
raise NotImplementedError(
"Instance method %r is not implemented by %r." % (
func_name, obj))
return method(*args)
return _late_dynamic_dispatcher |
def get_pull_requests_by_project(self, project, search_criteria, max_comment_length=None, skip=None, top=None):
"""GetPullRequestsByProject.
[Preview API] Retrieve all pull requests matching a specified criteria.
:param str project: Project ID or project name
:param :class:`<GitPullRequestSearchCriteria> <azure.devops.v5_1.git.models.GitPullRequestSearchCriteria>` search_criteria: Pull requests will be returned that match this search criteria.
:param int max_comment_length: Not used.
:param int skip: The number of pull requests to ignore. For example, to retrieve results 101-150, set top to 50 and skip to 100.
:param int top: The number of pull requests to retrieve.
:rtype: [GitPullRequest]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if search_criteria is not None:
if search_criteria.repository_id is not None:
query_parameters['searchCriteria.repositoryId'] = search_criteria.repository_id
if search_criteria.creator_id is not None:
query_parameters['searchCriteria.creatorId'] = search_criteria.creator_id
if search_criteria.reviewer_id is not None:
query_parameters['searchCriteria.reviewerId'] = search_criteria.reviewer_id
if search_criteria.status is not None:
query_parameters['searchCriteria.status'] = search_criteria.status
if search_criteria.target_ref_name is not None:
query_parameters['searchCriteria.targetRefName'] = search_criteria.target_ref_name
if search_criteria.source_repository_id is not None:
query_parameters['searchCriteria.sourceRepositoryId'] = search_criteria.source_repository_id
if search_criteria.source_ref_name is not None:
query_parameters['searchCriteria.sourceRefName'] = search_criteria.source_ref_name
if search_criteria.include_links is not None:
query_parameters['searchCriteria.includeLinks'] = search_criteria.include_links
if max_comment_length is not None:
query_parameters['maxCommentLength'] = self._serialize.query('max_comment_length', max_comment_length, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='a5d28130-9cd2-40fa-9f08-902e7daa9efb',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[GitPullRequest]', self._unwrap_collection(response)) | GetPullRequestsByProject.
[Preview API] Retrieve all pull requests matching a specified criteria.
:param str project: Project ID or project name
:param :class:`<GitPullRequestSearchCriteria> <azure.devops.v5_1.git.models.GitPullRequestSearchCriteria>` search_criteria: Pull requests will be returned that match this search criteria.
:param int max_comment_length: Not used.
:param int skip: The number of pull requests to ignore. For example, to retrieve results 101-150, set top to 50 and skip to 100.
:param int top: The number of pull requests to retrieve.
:rtype: [GitPullRequest] | Below is the the instruction that describes the task:
### Input:
GetPullRequestsByProject.
[Preview API] Retrieve all pull requests matching a specified criteria.
:param str project: Project ID or project name
:param :class:`<GitPullRequestSearchCriteria> <azure.devops.v5_1.git.models.GitPullRequestSearchCriteria>` search_criteria: Pull requests will be returned that match this search criteria.
:param int max_comment_length: Not used.
:param int skip: The number of pull requests to ignore. For example, to retrieve results 101-150, set top to 50 and skip to 100.
:param int top: The number of pull requests to retrieve.
:rtype: [GitPullRequest]
### Response:
def get_pull_requests_by_project(self, project, search_criteria, max_comment_length=None, skip=None, top=None):
"""GetPullRequestsByProject.
[Preview API] Retrieve all pull requests matching a specified criteria.
:param str project: Project ID or project name
:param :class:`<GitPullRequestSearchCriteria> <azure.devops.v5_1.git.models.GitPullRequestSearchCriteria>` search_criteria: Pull requests will be returned that match this search criteria.
:param int max_comment_length: Not used.
:param int skip: The number of pull requests to ignore. For example, to retrieve results 101-150, set top to 50 and skip to 100.
:param int top: The number of pull requests to retrieve.
:rtype: [GitPullRequest]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if search_criteria is not None:
if search_criteria.repository_id is not None:
query_parameters['searchCriteria.repositoryId'] = search_criteria.repository_id
if search_criteria.creator_id is not None:
query_parameters['searchCriteria.creatorId'] = search_criteria.creator_id
if search_criteria.reviewer_id is not None:
query_parameters['searchCriteria.reviewerId'] = search_criteria.reviewer_id
if search_criteria.status is not None:
query_parameters['searchCriteria.status'] = search_criteria.status
if search_criteria.target_ref_name is not None:
query_parameters['searchCriteria.targetRefName'] = search_criteria.target_ref_name
if search_criteria.source_repository_id is not None:
query_parameters['searchCriteria.sourceRepositoryId'] = search_criteria.source_repository_id
if search_criteria.source_ref_name is not None:
query_parameters['searchCriteria.sourceRefName'] = search_criteria.source_ref_name
if search_criteria.include_links is not None:
query_parameters['searchCriteria.includeLinks'] = search_criteria.include_links
if max_comment_length is not None:
query_parameters['maxCommentLength'] = self._serialize.query('max_comment_length', max_comment_length, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='a5d28130-9cd2-40fa-9f08-902e7daa9efb',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[GitPullRequest]', self._unwrap_collection(response)) |
def write_files(dos, pdos, prefix=None, directory=None, zero_to_efermi=True):
"""Write the density of states data to disk.
Args:
dos (:obj:`~pymatgen.electronic_structure.dos.Dos` or \
:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The total
density of states.
pdos (dict): The projected density of states. Formatted as a
:obj:`dict` of :obj:`dict` mapping the elements and their orbitals
to :obj:`~pymatgen.electronic_structure.dos.Dos` objects. For
example::
{
'Bi': {'s': Dos, 'p': Dos},
'S': {'s': Dos}
}
prefix (:obj:`str`, optional): A prefix for file names.
directory (:obj:`str`, optional): The directory in which to save files.
zero_to_efermi (:obj:`bool`, optional): Normalise the energy such
that the Fermi level is set as 0 eV.
"""
# defining these cryptic lists makes formatting the data much easier later
if len(dos.densities) == 1:
sdata = [[Spin.up, 1, '']]
else:
sdata = [[Spin.up, 1, '(up)'], [Spin.down, -1, '(down)']]
header = ['energy']
eners = dos.energies - dos.efermi if zero_to_efermi else dos.energies
tdos_data = [eners]
for spin, sign, label in sdata:
header.append('dos{}'.format(label))
tdos_data.append(dos.densities[spin] * sign)
tdos_data = np.stack(tdos_data, axis=1)
filename = "{}_total_dos.dat".format(prefix) if prefix else 'total_dos.dat'
if directory:
filename = os.path.join(directory, filename)
np.savetxt(filename, tdos_data, header=" ".join(header))
spin = len(dos.densities)
for el, el_pdos in pdos.items():
header = ['energy']
pdos_data = [eners]
for orb in sort_orbitals(el_pdos):
for spin, sign, label in sdata:
header.append('{}{}'.format(orb, label))
pdos_data.append(el_pdos[orb].densities[spin] * sign)
pdos_data = np.stack(pdos_data, axis=1)
if prefix:
filename = '{}_{}_dos.dat'.format(prefix, el)
else:
filename = '{}_dos.dat'.format(el)
if directory:
filename = os.path.join(directory, filename)
np.savetxt(filename, pdos_data, header=" ".join(header)) | Write the density of states data to disk.
Args:
dos (:obj:`~pymatgen.electronic_structure.dos.Dos` or \
:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The total
density of states.
pdos (dict): The projected density of states. Formatted as a
:obj:`dict` of :obj:`dict` mapping the elements and their orbitals
to :obj:`~pymatgen.electronic_structure.dos.Dos` objects. For
example::
{
'Bi': {'s': Dos, 'p': Dos},
'S': {'s': Dos}
}
prefix (:obj:`str`, optional): A prefix for file names.
directory (:obj:`str`, optional): The directory in which to save files.
zero_to_efermi (:obj:`bool`, optional): Normalise the energy such
that the Fermi level is set as 0 eV. | Below is the the instruction that describes the task:
### Input:
Write the density of states data to disk.
Args:
dos (:obj:`~pymatgen.electronic_structure.dos.Dos` or \
:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The total
density of states.
pdos (dict): The projected density of states. Formatted as a
:obj:`dict` of :obj:`dict` mapping the elements and their orbitals
to :obj:`~pymatgen.electronic_structure.dos.Dos` objects. For
example::
{
'Bi': {'s': Dos, 'p': Dos},
'S': {'s': Dos}
}
prefix (:obj:`str`, optional): A prefix for file names.
directory (:obj:`str`, optional): The directory in which to save files.
zero_to_efermi (:obj:`bool`, optional): Normalise the energy such
that the Fermi level is set as 0 eV.
### Response:
def write_files(dos, pdos, prefix=None, directory=None, zero_to_efermi=True):
"""Write the density of states data to disk.
Args:
dos (:obj:`~pymatgen.electronic_structure.dos.Dos` or \
:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The total
density of states.
pdos (dict): The projected density of states. Formatted as a
:obj:`dict` of :obj:`dict` mapping the elements and their orbitals
to :obj:`~pymatgen.electronic_structure.dos.Dos` objects. For
example::
{
'Bi': {'s': Dos, 'p': Dos},
'S': {'s': Dos}
}
prefix (:obj:`str`, optional): A prefix for file names.
directory (:obj:`str`, optional): The directory in which to save files.
zero_to_efermi (:obj:`bool`, optional): Normalise the energy such
that the Fermi level is set as 0 eV.
"""
# defining these cryptic lists makes formatting the data much easier later
if len(dos.densities) == 1:
sdata = [[Spin.up, 1, '']]
else:
sdata = [[Spin.up, 1, '(up)'], [Spin.down, -1, '(down)']]
header = ['energy']
eners = dos.energies - dos.efermi if zero_to_efermi else dos.energies
tdos_data = [eners]
for spin, sign, label in sdata:
header.append('dos{}'.format(label))
tdos_data.append(dos.densities[spin] * sign)
tdos_data = np.stack(tdos_data, axis=1)
filename = "{}_total_dos.dat".format(prefix) if prefix else 'total_dos.dat'
if directory:
filename = os.path.join(directory, filename)
np.savetxt(filename, tdos_data, header=" ".join(header))
spin = len(dos.densities)
for el, el_pdos in pdos.items():
header = ['energy']
pdos_data = [eners]
for orb in sort_orbitals(el_pdos):
for spin, sign, label in sdata:
header.append('{}{}'.format(orb, label))
pdos_data.append(el_pdos[orb].densities[spin] * sign)
pdos_data = np.stack(pdos_data, axis=1)
if prefix:
filename = '{}_{}_dos.dat'.format(prefix, el)
else:
filename = '{}_dos.dat'.format(el)
if directory:
filename = os.path.join(directory, filename)
np.savetxt(filename, pdos_data, header=" ".join(header)) |
def check_publish_block(self, block_header):
"""Check if a candidate block is ready to be claimed.
block_header (BlockHeader): the block_header to be checked if it
should be claimed
Returns:
Boolean: True if the candidate block_header should be claimed.
"""
if any(publisher_key != block_header.signer_public_key
for publisher_key in self._valid_block_publishers):
return False
if self._min_wait_time == 0:
return True
if self._min_wait_time < 0:
return False
assert self._min_wait_time > 0
if self._max_wait_time <= 0:
return self._start_time + self._min_wait_time <= time.time()
assert self._max_wait_time > 0
if self._max_wait_time <= self._min_wait_time:
return False
assert 0 < self._min_wait_time < self._max_wait_time
return self._start_time + self._wait_time <= time.time() | Check if a candidate block is ready to be claimed.
block_header (BlockHeader): the block_header to be checked if it
should be claimed
Returns:
Boolean: True if the candidate block_header should be claimed. | Below is the the instruction that describes the task:
### Input:
Check if a candidate block is ready to be claimed.
block_header (BlockHeader): the block_header to be checked if it
should be claimed
Returns:
Boolean: True if the candidate block_header should be claimed.
### Response:
def check_publish_block(self, block_header):
"""Check if a candidate block is ready to be claimed.
block_header (BlockHeader): the block_header to be checked if it
should be claimed
Returns:
Boolean: True if the candidate block_header should be claimed.
"""
if any(publisher_key != block_header.signer_public_key
for publisher_key in self._valid_block_publishers):
return False
if self._min_wait_time == 0:
return True
if self._min_wait_time < 0:
return False
assert self._min_wait_time > 0
if self._max_wait_time <= 0:
return self._start_time + self._min_wait_time <= time.time()
assert self._max_wait_time > 0
if self._max_wait_time <= self._min_wait_time:
return False
assert 0 < self._min_wait_time < self._max_wait_time
return self._start_time + self._wait_time <= time.time() |
def _idForObject(self, defaultObject):
"""
Generate an opaque identifier which can be used to talk about
C{defaultObject}.
@rtype: C{int}
"""
identifier = self._allocateID()
self._idsToObjects[identifier] = defaultObject
return identifier | Generate an opaque identifier which can be used to talk about
C{defaultObject}.
@rtype: C{int} | Below is the the instruction that describes the task:
### Input:
Generate an opaque identifier which can be used to talk about
C{defaultObject}.
@rtype: C{int}
### Response:
def _idForObject(self, defaultObject):
"""
Generate an opaque identifier which can be used to talk about
C{defaultObject}.
@rtype: C{int}
"""
identifier = self._allocateID()
self._idsToObjects[identifier] = defaultObject
return identifier |
def nextSunrise(date, pos):
""" Returns the date of the next sunrise. """
jd = eph.nextSunrise(date.jd, pos.lat, pos.lon)
return Datetime.fromJD(jd, date.utcoffset) | Returns the date of the next sunrise. | Below is the the instruction that describes the task:
### Input:
Returns the date of the next sunrise.
### Response:
def nextSunrise(date, pos):
""" Returns the date of the next sunrise. """
jd = eph.nextSunrise(date.jd, pos.lat, pos.lon)
return Datetime.fromJD(jd, date.utcoffset) |
def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
try:
return fname and os.path.exists(fname) and os.path.getsize(fname) > 0
except OSError:
return False | Check if a file exists and is non-empty. | Below is the the instruction that describes the task:
### Input:
Check if a file exists and is non-empty.
### Response:
def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
try:
return fname and os.path.exists(fname) and os.path.getsize(fname) > 0
except OSError:
return False |
def _get_row_sparse(self, arr_list, ctx, row_id):
""" Get row_sparse data from row_sparse parameters based on row_id. """
# get row sparse params based on row ids
if not isinstance(row_id, ndarray.NDArray):
raise TypeError("row_id must have NDArray type, but %s is given"%(type(row_id)))
if not self._trainer:
raise RuntimeError("Cannot get row_sparse data for Parameter '%s' when no " \
"Trainer is created with it."%self.name)
results = self._check_and_get(arr_list, ctx)
# fetch row sparse params from the trainer
self._trainer._row_sparse_pull(self, results, row_id)
return results | Get row_sparse data from row_sparse parameters based on row_id. | Below is the the instruction that describes the task:
### Input:
Get row_sparse data from row_sparse parameters based on row_id.
### Response:
def _get_row_sparse(self, arr_list, ctx, row_id):
""" Get row_sparse data from row_sparse parameters based on row_id. """
# get row sparse params based on row ids
if not isinstance(row_id, ndarray.NDArray):
raise TypeError("row_id must have NDArray type, but %s is given"%(type(row_id)))
if not self._trainer:
raise RuntimeError("Cannot get row_sparse data for Parameter '%s' when no " \
"Trainer is created with it."%self.name)
results = self._check_and_get(arr_list, ctx)
# fetch row sparse params from the trainer
self._trainer._row_sparse_pull(self, results, row_id)
return results |
def set_primary_mgt(self, interface_id, auth_request=None,
address=None):
"""
Specifies the Primary Control IP address for Management Server
contact. For single FW and cluster FW's, this will enable 'Outgoing',
'Auth Request' and the 'Primary Control' interface. For clusters, the
primary heartbeat will NOT follow this change and should be set
separately using :meth:`.set_primary_heartbeat`.
For virtual FW engines, only auth_request and outgoing will be set.
For master engines, only primary control and outgoing will be set.
Primary management can be set on an interface with single IP's,
multiple IP's or VLANs.
::
engine.interface_options.set_primary_mgt(1)
Set primary management on a VLAN interface::
engine.interface_options.set_primary_mgt('1.100')
Set primary management and different interface for auth_request::
engine.interface_options.set_primary_mgt(
interface_id='1.100', auth_request=0)
Set on specific IP address of interface VLAN with multiple addresses::
engine.interface_options.set_primary_mgt(
interface_id='3.100', address='50.50.50.1')
:param str,int interface_id: interface id to make management
:param str address: if the interface for management has more than
one ip address, this specifies which IP to bind to.
:param str,int auth_request: if setting primary mgt on a cluster
interface with no CVI, you must pick another interface to set
the auth_request field to (default: None)
:raises InterfaceNotFound: specified interface is not found
:raises UpdateElementFailed: updating management fails
:return: None
.. note:: Setting primary management on a cluster interface with no
CVI requires you to set the interface for auth_request.
"""
intfattr = ['primary_mgt', 'outgoing']
if self.interface.engine.type in ('virtual_fw',):
intfattr.remove('primary_mgt')
for attribute in intfattr:
self.interface.set_unset(interface_id, attribute, address)
if auth_request is not None:
self.interface.set_auth_request(auth_request)
else:
self.interface.set_auth_request(interface_id, address)
self._engine.update() | Specifies the Primary Control IP address for Management Server
contact. For single FW and cluster FW's, this will enable 'Outgoing',
'Auth Request' and the 'Primary Control' interface. For clusters, the
primary heartbeat will NOT follow this change and should be set
separately using :meth:`.set_primary_heartbeat`.
For virtual FW engines, only auth_request and outgoing will be set.
For master engines, only primary control and outgoing will be set.
Primary management can be set on an interface with single IP's,
multiple IP's or VLANs.
::
engine.interface_options.set_primary_mgt(1)
Set primary management on a VLAN interface::
engine.interface_options.set_primary_mgt('1.100')
Set primary management and different interface for auth_request::
engine.interface_options.set_primary_mgt(
interface_id='1.100', auth_request=0)
Set on specific IP address of interface VLAN with multiple addresses::
engine.interface_options.set_primary_mgt(
interface_id='3.100', address='50.50.50.1')
:param str,int interface_id: interface id to make management
:param str address: if the interface for management has more than
one ip address, this specifies which IP to bind to.
:param str,int auth_request: if setting primary mgt on a cluster
interface with no CVI, you must pick another interface to set
the auth_request field to (default: None)
:raises InterfaceNotFound: specified interface is not found
:raises UpdateElementFailed: updating management fails
:return: None
.. note:: Setting primary management on a cluster interface with no
CVI requires you to set the interface for auth_request. | Below is the the instruction that describes the task:
### Input:
Specifies the Primary Control IP address for Management Server
contact. For single FW and cluster FW's, this will enable 'Outgoing',
'Auth Request' and the 'Primary Control' interface. For clusters, the
primary heartbeat will NOT follow this change and should be set
separately using :meth:`.set_primary_heartbeat`.
For virtual FW engines, only auth_request and outgoing will be set.
For master engines, only primary control and outgoing will be set.
Primary management can be set on an interface with single IP's,
multiple IP's or VLANs.
::
engine.interface_options.set_primary_mgt(1)
Set primary management on a VLAN interface::
engine.interface_options.set_primary_mgt('1.100')
Set primary management and different interface for auth_request::
engine.interface_options.set_primary_mgt(
interface_id='1.100', auth_request=0)
Set on specific IP address of interface VLAN with multiple addresses::
engine.interface_options.set_primary_mgt(
interface_id='3.100', address='50.50.50.1')
:param str,int interface_id: interface id to make management
:param str address: if the interface for management has more than
one ip address, this specifies which IP to bind to.
:param str,int auth_request: if setting primary mgt on a cluster
interface with no CVI, you must pick another interface to set
the auth_request field to (default: None)
:raises InterfaceNotFound: specified interface is not found
:raises UpdateElementFailed: updating management fails
:return: None
.. note:: Setting primary management on a cluster interface with no
CVI requires you to set the interface for auth_request.
### Response:
def set_primary_mgt(self, interface_id, auth_request=None,
address=None):
"""
Specifies the Primary Control IP address for Management Server
contact. For single FW and cluster FW's, this will enable 'Outgoing',
'Auth Request' and the 'Primary Control' interface. For clusters, the
primary heartbeat will NOT follow this change and should be set
separately using :meth:`.set_primary_heartbeat`.
For virtual FW engines, only auth_request and outgoing will be set.
For master engines, only primary control and outgoing will be set.
Primary management can be set on an interface with single IP's,
multiple IP's or VLANs.
::
engine.interface_options.set_primary_mgt(1)
Set primary management on a VLAN interface::
engine.interface_options.set_primary_mgt('1.100')
Set primary management and different interface for auth_request::
engine.interface_options.set_primary_mgt(
interface_id='1.100', auth_request=0)
Set on specific IP address of interface VLAN with multiple addresses::
engine.interface_options.set_primary_mgt(
interface_id='3.100', address='50.50.50.1')
:param str,int interface_id: interface id to make management
:param str address: if the interface for management has more than
one ip address, this specifies which IP to bind to.
:param str,int auth_request: if setting primary mgt on a cluster
interface with no CVI, you must pick another interface to set
the auth_request field to (default: None)
:raises InterfaceNotFound: specified interface is not found
:raises UpdateElementFailed: updating management fails
:return: None
.. note:: Setting primary management on a cluster interface with no
CVI requires you to set the interface for auth_request.
"""
intfattr = ['primary_mgt', 'outgoing']
if self.interface.engine.type in ('virtual_fw',):
intfattr.remove('primary_mgt')
for attribute in intfattr:
self.interface.set_unset(interface_id, attribute, address)
if auth_request is not None:
self.interface.set_auth_request(auth_request)
else:
self.interface.set_auth_request(interface_id, address)
self._engine.update() |
def user_exists(self, name):
"""Check if a given user exists."""
users = self.data['users']
for user in users:
if user['name'] == name:
return True
return False | Check if a given user exists. | Below is the the instruction that describes the task:
### Input:
Check if a given user exists.
### Response:
def user_exists(self, name):
"""Check if a given user exists."""
users = self.data['users']
for user in users:
if user['name'] == name:
return True
return False |
def set_attr(self, **kwargs):
"""Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, STRING_TYPES):
raise ValueError("Set Attr only accepts string values")
value = c_str(str(value))
_check_call(_LIB.XGBoosterSetAttr(
self.handle, c_str(key), value)) | Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute. | Below is the the instruction that describes the task:
### Input:
Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute.
### Response:
def set_attr(self, **kwargs):
"""Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, STRING_TYPES):
raise ValueError("Set Attr only accepts string values")
value = c_str(str(value))
_check_call(_LIB.XGBoosterSetAttr(
self.handle, c_str(key), value)) |
def write_bytes(self, data: bytes, *, append=True):
''' write bytes into the file. '''
mode = 'ab' if append else 'wb'
return self.write(data, mode=mode) | write bytes into the file. | Below is the the instruction that describes the task:
### Input:
write bytes into the file.
### Response:
def write_bytes(self, data: bytes, *, append=True):
''' write bytes into the file. '''
mode = 'ab' if append else 'wb'
return self.write(data, mode=mode) |
def list_items(queue):
'''
List contents of a queue
'''
itemstuple = _list_items(queue)
items = [item[0] for item in itemstuple]
return items | List contents of a queue | Below is the the instruction that describes the task:
### Input:
List contents of a queue
### Response:
def list_items(queue):
'''
List contents of a queue
'''
itemstuple = _list_items(queue)
items = [item[0] for item in itemstuple]
return items |
def name2unicode(name):
"""Converts Adobe glyph names to Unicode numbers."""
if name in glyphname2unicode:
return glyphname2unicode[name]
m = STRIP_NAME.search(name)
if not m:
raise KeyError(name)
return unichr(int(m.group(0))) | Converts Adobe glyph names to Unicode numbers. | Below is the the instruction that describes the task:
### Input:
Converts Adobe glyph names to Unicode numbers.
### Response:
def name2unicode(name):
"""Converts Adobe glyph names to Unicode numbers."""
if name in glyphname2unicode:
return glyphname2unicode[name]
m = STRIP_NAME.search(name)
if not m:
raise KeyError(name)
return unichr(int(m.group(0))) |
def __set_cache(self, tokens):
"""
Sets the tokens cache.
:param tokens: Completer tokens list.
:type tokens: tuple or list
"""
if DefaultCompleter._DefaultCompleter__tokens.get(self.__language):
return
DefaultCompleter._DefaultCompleter__tokens[self.__language] = tokens | Sets the tokens cache.
:param tokens: Completer tokens list.
:type tokens: tuple or list | Below is the the instruction that describes the task:
### Input:
Sets the tokens cache.
:param tokens: Completer tokens list.
:type tokens: tuple or list
### Response:
def __set_cache(self, tokens):
"""
Sets the tokens cache.
:param tokens: Completer tokens list.
:type tokens: tuple or list
"""
if DefaultCompleter._DefaultCompleter__tokens.get(self.__language):
return
DefaultCompleter._DefaultCompleter__tokens[self.__language] = tokens |
def best_buy_3(self):
"""三日均價由下往上
"""
return self.data.continuous(self.data.moving_average(self.data.price, 3)) == 1 | 三日均價由下往上 | Below is the the instruction that describes the task:
### Input:
三日均價由下往上
### Response:
def best_buy_3(self):
"""三日均價由下往上
"""
return self.data.continuous(self.data.moving_average(self.data.price, 3)) == 1 |
def highest(self):
"""Return the items with the higest score.
If this ScoreSet is empty, returns None.
"""
scores = self.scores()
if not scores:
return None
maxscore = max(map(score, scores))
return filter(lambda x: score(x) == maxscore, scores) | Return the items with the higest score.
If this ScoreSet is empty, returns None. | Below is the the instruction that describes the task:
### Input:
Return the items with the higest score.
If this ScoreSet is empty, returns None.
### Response:
def highest(self):
"""Return the items with the higest score.
If this ScoreSet is empty, returns None.
"""
scores = self.scores()
if not scores:
return None
maxscore = max(map(score, scores))
return filter(lambda x: score(x) == maxscore, scores) |
def _activate_thin_device(name, dm_id, size, pool):
"""
Provisions an LVM device-mapper thin device reflecting,
DM device id 'dm_id' in the docker pool.
"""
table = '0 %d thin /dev/mapper/%s %s' % (int(size) // 512, pool, dm_id)
cmd = ['dmsetup', 'create', name, '--table', table]
r = util.subp(cmd)
if r.return_code != 0:
raise MountError('Failed to create thin device: %s' %
r.stderr.decode(sys.getdefaultencoding())) | Provisions an LVM device-mapper thin device reflecting,
DM device id 'dm_id' in the docker pool. | Below is the the instruction that describes the task:
### Input:
Provisions an LVM device-mapper thin device reflecting,
DM device id 'dm_id' in the docker pool.
### Response:
def _activate_thin_device(name, dm_id, size, pool):
"""
Provisions an LVM device-mapper thin device reflecting,
DM device id 'dm_id' in the docker pool.
"""
table = '0 %d thin /dev/mapper/%s %s' % (int(size) // 512, pool, dm_id)
cmd = ['dmsetup', 'create', name, '--table', table]
r = util.subp(cmd)
if r.return_code != 0:
raise MountError('Failed to create thin device: %s' %
r.stderr.decode(sys.getdefaultencoding())) |
def _mock_imethodcall(self, methodname, namespace, response_params_rqd=None,
**params): # pylint: disable=unused-argument
"""
Mocks the WBEMConnection._imethodcall() method.
This mock calls methods within this class that fake the processing
in a WBEM server (at the CIM Object level) for the varisous CIM/XML
methods and return.
Each function is named with the lower case method namd prepended with
'_fake_'.
"""
method_name = '_fake_' + methodname.lower()
method_name = getattr(self, method_name)
result = method_name(namespace, **params)
# sleep for defined number of seconds
if self._response_delay:
time.sleep(self._response_delay)
return result | Mocks the WBEMConnection._imethodcall() method.
This mock calls methods within this class that fake the processing
in a WBEM server (at the CIM Object level) for the varisous CIM/XML
methods and return.
Each function is named with the lower case method namd prepended with
'_fake_'. | Below is the the instruction that describes the task:
### Input:
Mocks the WBEMConnection._imethodcall() method.
This mock calls methods within this class that fake the processing
in a WBEM server (at the CIM Object level) for the varisous CIM/XML
methods and return.
Each function is named with the lower case method namd prepended with
'_fake_'.
### Response:
def _mock_imethodcall(self, methodname, namespace, response_params_rqd=None,
**params): # pylint: disable=unused-argument
"""
Mocks the WBEMConnection._imethodcall() method.
This mock calls methods within this class that fake the processing
in a WBEM server (at the CIM Object level) for the varisous CIM/XML
methods and return.
Each function is named with the lower case method namd prepended with
'_fake_'.
"""
method_name = '_fake_' + methodname.lower()
method_name = getattr(self, method_name)
result = method_name(namespace, **params)
# sleep for defined number of seconds
if self._response_delay:
time.sleep(self._response_delay)
return result |
def branches():
# type: () -> List[str]
""" Return a list of branches in the current repo.
Returns:
list[str]: A list of branches in the current repo.
"""
out = shell.run(
'git branch',
capture=True,
never_pretend=True
).stdout.strip()
return [x.strip('* \t\n') for x in out.splitlines()] | Return a list of branches in the current repo.
Returns:
list[str]: A list of branches in the current repo. | Below is the the instruction that describes the task:
### Input:
Return a list of branches in the current repo.
Returns:
list[str]: A list of branches in the current repo.
### Response:
def branches():
# type: () -> List[str]
""" Return a list of branches in the current repo.
Returns:
list[str]: A list of branches in the current repo.
"""
out = shell.run(
'git branch',
capture=True,
never_pretend=True
).stdout.strip()
return [x.strip('* \t\n') for x in out.splitlines()] |
def _cleanup_label(label):
"""
Reformat the ALL CAPS OMIM labels to something more pleasant to read.
This will:
1. remove the abbreviation suffixes
2. convert the roman numerals to integer numbers
3. make the text title case,
except for suplied conjunctions/prepositions/articles
:param label:
:return:
"""
conjunctions = ['and', 'but', 'yet', 'for', 'nor', 'so']
little_preps = [
'at', 'by', 'in', 'of', 'on', 'to', 'up', 'as', 'it', 'or']
articles = ['a', 'an', 'the']
# remove the abbreviation
lbl = label.split(r';')[0]
fixedwords = []
i = 0
for wrd in lbl.split():
i += 1
# convert the roman numerals to numbers,
# but assume that the first word is not
# a roman numeral (this permits things like "X inactivation"
if i > 1 and re.match(romanNumeralPattern, wrd):
n = fromRoman(wrd)
# make the assumption that the number of syndromes are <100
# this allows me to retain "SYNDROME C"
# and not convert it to "SYNDROME 100"
if 0 < n < 100:
# get the non-roman suffix, if present.
# for example, IIIB or IVA
suffix = wrd.replace(toRoman(n), '', 1)
fixed = ''.join((str(n), suffix))
wrd = fixed
# capitalize first letter
wrd = wrd.title()
# replace interior conjunctions, prepositions,
# and articles with lowercase
if wrd.lower() in (conjunctions+little_preps+articles) and i != 1:
wrd = wrd.lower()
fixedwords.append(wrd)
lbl = ' '.join(fixedwords)
# print (label, '-->', lbl)
return lbl | Reformat the ALL CAPS OMIM labels to something more pleasant to read.
This will:
1. remove the abbreviation suffixes
2. convert the roman numerals to integer numbers
3. make the text title case,
except for suplied conjunctions/prepositions/articles
:param label:
:return: | Below is the the instruction that describes the task:
### Input:
Reformat the ALL CAPS OMIM labels to something more pleasant to read.
This will:
1. remove the abbreviation suffixes
2. convert the roman numerals to integer numbers
3. make the text title case,
except for suplied conjunctions/prepositions/articles
:param label:
:return:
### Response:
def _cleanup_label(label):
"""
Reformat the ALL CAPS OMIM labels to something more pleasant to read.
This will:
1. remove the abbreviation suffixes
2. convert the roman numerals to integer numbers
3. make the text title case,
except for suplied conjunctions/prepositions/articles
:param label:
:return:
"""
conjunctions = ['and', 'but', 'yet', 'for', 'nor', 'so']
little_preps = [
'at', 'by', 'in', 'of', 'on', 'to', 'up', 'as', 'it', 'or']
articles = ['a', 'an', 'the']
# remove the abbreviation
lbl = label.split(r';')[0]
fixedwords = []
i = 0
for wrd in lbl.split():
i += 1
# convert the roman numerals to numbers,
# but assume that the first word is not
# a roman numeral (this permits things like "X inactivation"
if i > 1 and re.match(romanNumeralPattern, wrd):
n = fromRoman(wrd)
# make the assumption that the number of syndromes are <100
# this allows me to retain "SYNDROME C"
# and not convert it to "SYNDROME 100"
if 0 < n < 100:
# get the non-roman suffix, if present.
# for example, IIIB or IVA
suffix = wrd.replace(toRoman(n), '', 1)
fixed = ''.join((str(n), suffix))
wrd = fixed
# capitalize first letter
wrd = wrd.title()
# replace interior conjunctions, prepositions,
# and articles with lowercase
if wrd.lower() in (conjunctions+little_preps+articles) and i != 1:
wrd = wrd.lower()
fixedwords.append(wrd)
lbl = ' '.join(fixedwords)
# print (label, '-->', lbl)
return lbl |
def tracktype(self, tracktype):
"""
When setting the track type, the valid parameters for this track type
need to be set as well.
"""
self._tracktype = tracktype
if tracktype is not None:
if 'bed' in tracktype.lower():
tracktype = 'bigBed'
elif 'wig' in tracktype.lower():
tracktype = 'bigWig'
self.params.update(constants.track_typespecific_fields[tracktype]) | When setting the track type, the valid parameters for this track type
need to be set as well. | Below is the the instruction that describes the task:
### Input:
When setting the track type, the valid parameters for this track type
need to be set as well.
### Response:
def tracktype(self, tracktype):
"""
When setting the track type, the valid parameters for this track type
need to be set as well.
"""
self._tracktype = tracktype
if tracktype is not None:
if 'bed' in tracktype.lower():
tracktype = 'bigBed'
elif 'wig' in tracktype.lower():
tracktype = 'bigWig'
self.params.update(constants.track_typespecific_fields[tracktype]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.