code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def block_html(self, html):
"""Rendering block level pure html content.
:param html: text content of the html snippet.
"""
if self.options.get('skip_style') and \
html.lower().startswith('<style'):
return ''
if self.options.get('escape'):
return escape(html)
return html
|
Rendering block level pure html content.
:param html: text content of the html snippet.
|
def run_single_with_display(wf, display):
"""Adds a display to the single runner. Everything still runs in a single
thread. Every time a job is pulled by the worker, a message goes to the
display routine; when the job is finished the result is sent to the display
routine."""
S = Scheduler(error_handler=display.error_handler)
W = Queue() \
>> branch(log_job_start.to(sink_map(display))) \
>> worker \
>> branch(sink_map(display))
return S.run(W, get_workflow(wf))
|
Adds a display to the single runner. Everything still runs in a single
thread. Every time a job is pulled by the worker, a message goes to the
display routine; when the job is finished the result is sent to the display
routine.
|
def uncrop(data, crinfo, orig_shape, resize=False, outside_mode="constant", cval=0):
"""
Put some boundary to input image.
:param data: input data
:param crinfo: array with minimum and maximum index along each axis
[[minX, maxX],[minY, maxY],[minZ, maxZ]]. If crinfo is None, the whole input image is placed into [0, 0, 0].
If crinfo is just series of three numbers, it is used as an initial point for input image placement.
:param orig_shape: shape of uncropped image
:param resize: True or False (default). Usefull if the data.shape does not fit to crinfo shape.
:param outside_mode: 'constant', 'nearest'
:return:
"""
if crinfo is None:
crinfo = list(zip([0] * data.ndim, orig_shape))
elif np.asarray(crinfo).size == data.ndim:
crinfo = list(zip(crinfo, np.asarray(crinfo) + data.shape))
crinfo = fix_crinfo(crinfo)
data_out = np.ones(orig_shape, dtype=data.dtype) * cval
# print 'uncrop ', crinfo
# print orig_shape
# print data.shape
if resize:
data = resize_to_shape(data, crinfo[:, 1] - crinfo[:, 0])
startx = np.round(crinfo[0][0]).astype(int)
starty = np.round(crinfo[1][0]).astype(int)
startz = np.round(crinfo[2][0]).astype(int)
data_out[
# np.round(crinfo[0][0]).astype(int):np.round(crinfo[0][1]).astype(int)+1,
# np.round(crinfo[1][0]).astype(int):np.round(crinfo[1][1]).astype(int)+1,
# np.round(crinfo[2][0]).astype(int):np.round(crinfo[2][1]).astype(int)+1
startx : startx + data.shape[0],
starty : starty + data.shape[1],
startz : startz + data.shape[2],
] = data
if outside_mode == "nearest":
# for ax in range(data.ndims):
# ax = 0
# copy border slice to pixels out of boundary - the higher part
for ax in range(data.ndim):
# the part under the crop
start = np.round(crinfo[ax][0]).astype(int)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = start
repeated_slice = np.expand_dims(data_out[slices], ax)
append_sz = start
if append_sz > 0:
tile0 = np.repeat(repeated_slice, append_sz, axis=ax)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = slice(None, start)
# data_out[start + data.shape[ax] : , :, :] = tile0
data_out[slices] = tile0
# plt.imshow(np.squeeze(repeated_slice))
# plt.show()
# the part over the crop
start = np.round(crinfo[ax][0]).astype(int)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = start + data.shape[ax] - 1
repeated_slice = np.expand_dims(data_out[slices], ax)
append_sz = data_out.shape[ax] - (start + data.shape[ax])
if append_sz > 0:
tile0 = np.repeat(repeated_slice, append_sz, axis=ax)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = slice(start + data.shape[ax], None)
# data_out[start + data.shape[ax] : , :, :] = tile0
data_out[slices] = tile0
# plt.imshow(np.squeeze(repeated_slice))
# plt.show()
return data_out
|
Put some boundary to input image.
:param data: input data
:param crinfo: array with minimum and maximum index along each axis
[[minX, maxX],[minY, maxY],[minZ, maxZ]]. If crinfo is None, the whole input image is placed into [0, 0, 0].
If crinfo is just series of three numbers, it is used as an initial point for input image placement.
:param orig_shape: shape of uncropped image
:param resize: True or False (default). Usefull if the data.shape does not fit to crinfo shape.
:param outside_mode: 'constant', 'nearest'
:return:
|
def create_regular_expression(self, regexp):
"""
Create a regular expression for this inspection situation
context. The inspection situation must be using an inspection
context that supports regex.
:param str regexp: regular expression string
:raises CreateElementFailed: failed to modify the situation
"""
for parameter in self.situation_context.situation_parameters:
if parameter.type == 'regexp':
return self.add_parameter_value(
'reg_exp_situation_parameter_values',
**{'parameter_ref': parameter.href,
'reg_exp': regexp}) # Treat as raw string
raise CreateElementFailed('The situation does not support a regular '
'expression as a context value.')
|
Create a regular expression for this inspection situation
context. The inspection situation must be using an inspection
context that supports regex.
:param str regexp: regular expression string
:raises CreateElementFailed: failed to modify the situation
|
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
|
Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
|
def object(self, o_type, o_name=None):
"""Get an object from the scheduler.
The result is a serialized object which is a Json structure containing:
- content: the serialized object content
- __sys_python_module__: the python class of the returned object
The Alignak unserialize function of the alignak.misc.serialization package allows
to restore the initial object.
.. code-block:: python
from alignak.misc.serialization import unserialize
from alignak.objects.hostgroup import Hostgroup
raw_data = req.get("http://127.0.0.1:7768/object/hostgroup/allhosts")
print("Got: %s / %s" % (raw_data.status_code, raw_data.content))
assert raw_data.status_code == 200
object = raw_data.json()
group = unserialize(object, True)
assert group.__class__ == Hostgroup
assert group.get_name() == 'allhosts'
As an example:
{
"__sys_python_module__": "alignak.objects.hostgroup.Hostgroup",
"content": {
"uuid": "32248642-97dd-4f39-aaa2-5120112a765d",
"name": "",
"hostgroup_name": "allhosts",
"use": [],
"tags": [],
"alias": "All Hosts",
"notes": "",
"definition_order": 100,
"register": true,
"unknown_members": [],
"notes_url": "",
"action_url": "",
"imported_from": "unknown",
"conf_is_correct": true,
"configuration_errors": [],
"configuration_warnings": [],
"realm": "",
"downtimes": {},
"hostgroup_members": [],
"members": [
"553d47bc-27aa-426c-a664-49c4c0c4a249",
"f88093ca-e61b-43ff-a41e-613f7ad2cea2",
"df1e2e13-552d-43de-ad2a-fe80ad4ba979",
"d3d667dd-f583-4668-9f44-22ef3dcb53ad"
]
}
}
:param o_type: searched object type
:type o_type: str
:param o_name: searched object name (or uuid)
:type o_name: str
:return: serialized object information
:rtype: str
"""
o_found = self._get_object(o_type=o_type, o_name=o_name)
if not o_found:
return {'_status': u'ERR', '_message': u'Required %s not found.' % o_type}
return o_found
|
Get an object from the scheduler.
The result is a serialized object which is a Json structure containing:
- content: the serialized object content
- __sys_python_module__: the python class of the returned object
The Alignak unserialize function of the alignak.misc.serialization package allows
to restore the initial object.
.. code-block:: python
from alignak.misc.serialization import unserialize
from alignak.objects.hostgroup import Hostgroup
raw_data = req.get("http://127.0.0.1:7768/object/hostgroup/allhosts")
print("Got: %s / %s" % (raw_data.status_code, raw_data.content))
assert raw_data.status_code == 200
object = raw_data.json()
group = unserialize(object, True)
assert group.__class__ == Hostgroup
assert group.get_name() == 'allhosts'
As an example:
{
"__sys_python_module__": "alignak.objects.hostgroup.Hostgroup",
"content": {
"uuid": "32248642-97dd-4f39-aaa2-5120112a765d",
"name": "",
"hostgroup_name": "allhosts",
"use": [],
"tags": [],
"alias": "All Hosts",
"notes": "",
"definition_order": 100,
"register": true,
"unknown_members": [],
"notes_url": "",
"action_url": "",
"imported_from": "unknown",
"conf_is_correct": true,
"configuration_errors": [],
"configuration_warnings": [],
"realm": "",
"downtimes": {},
"hostgroup_members": [],
"members": [
"553d47bc-27aa-426c-a664-49c4c0c4a249",
"f88093ca-e61b-43ff-a41e-613f7ad2cea2",
"df1e2e13-552d-43de-ad2a-fe80ad4ba979",
"d3d667dd-f583-4668-9f44-22ef3dcb53ad"
]
}
}
:param o_type: searched object type
:type o_type: str
:param o_name: searched object name (or uuid)
:type o_name: str
:return: serialized object information
:rtype: str
|
def add_mod(self, seq, mod):
"""Create a tree.{Complement, LookAhead, Neg, Until}"""
modstr = self.value(mod)
if modstr == '~':
seq.parser_tree = parsing.Complement(seq.parser_tree)
elif modstr == '!!':
seq.parser_tree = parsing.LookAhead(seq.parser_tree)
elif modstr == '!':
seq.parser_tree = parsing.Neg(seq.parser_tree)
elif modstr == '->':
seq.parser_tree = parsing.Until(seq.parser_tree)
return True
|
Create a tree.{Complement, LookAhead, Neg, Until}
|
def _setup_metric_group_values(self):
"""
Return the list of MetricGroupValues objects for this metrics response,
by processing its metrics response string.
The lines in the metrics response string are::
MetricsResponse: MetricsGroup{0,*}
<emptyline> a third empty line at the end
MetricsGroup: MetricsGroupName
ObjectValues{0,*}
<emptyline> a second empty line after each MG
ObjectValues: ObjectURI
Timestamp
ValueRow{1,*}
<emptyline> a first empty line after this blk
"""
mg_defs = self._metrics_context.metric_group_definitions
metric_group_name = None
resource_uri = None
dt_timestamp = None
object_values = None
metric_group_values = list()
state = 0
for mr_line in self._metrics_response_str.splitlines():
if state == 0:
if object_values is not None:
# Store the result from the previous metric group
mgv = MetricGroupValues(metric_group_name, object_values)
metric_group_values.append(mgv)
object_values = None
if mr_line == '':
# Skip initial (or trailing) empty lines
pass
else:
# Process the next metrics group
metric_group_name = mr_line.strip('"') # No " or \ inside
assert metric_group_name in mg_defs
m_defs = mg_defs[metric_group_name].metric_definitions
object_values = list()
state = 1
elif state == 1:
if mr_line == '':
# There are no (or no more) ObjectValues items in this
# metrics group
state = 0
else:
# There are ObjectValues items
resource_uri = mr_line.strip('"') # No " or \ inside
state = 2
elif state == 2:
# Process the timestamp
assert mr_line != ''
try:
dt_timestamp = datetime_from_timestamp(int(mr_line))
except ValueError:
# Sometimes, the returned epoch timestamp values are way
# too large, e.g. 3651584404810066 (which would translate
# to the year 115791 A.D.). Python datetime supports
# up to the year 9999. We circumvent this issue by
# simply using the current date&time.
# TODO: Remove the circumvention for too large timestamps.
dt_timestamp = datetime.now(pytz.utc)
state = 3
elif state == 3:
if mr_line != '':
# Process the metric values in the ValueRow line
str_values = mr_line.split(',')
metrics = dict()
for m_name in m_defs:
m_def = m_defs[m_name]
m_type = m_def.type
m_value_str = str_values[m_def.index]
m_value = _metric_value(m_value_str, m_type)
metrics[m_name] = m_value
ov = MetricObjectValues(
self._client, mg_defs[metric_group_name], resource_uri,
dt_timestamp, metrics)
object_values.append(ov)
# stay in this state, for more ValueRow lines
else:
# On the empty line after the last ValueRow line
state = 1
return metric_group_values
|
Return the list of MetricGroupValues objects for this metrics response,
by processing its metrics response string.
The lines in the metrics response string are::
MetricsResponse: MetricsGroup{0,*}
<emptyline> a third empty line at the end
MetricsGroup: MetricsGroupName
ObjectValues{0,*}
<emptyline> a second empty line after each MG
ObjectValues: ObjectURI
Timestamp
ValueRow{1,*}
<emptyline> a first empty line after this blk
|
def project_from_files(
files, func_wrapper=_astroid_wrapper, project_name="no name", black_list=("CVS",)
):
"""return a Project from a list of files or modules"""
# build the project representation
astroid_manager = manager.AstroidManager()
project = Project(project_name)
for something in files:
if not os.path.exists(something):
fpath = modutils.file_from_modpath(something.split("."))
elif os.path.isdir(something):
fpath = os.path.join(something, "__init__.py")
else:
fpath = something
ast = func_wrapper(astroid_manager.ast_from_file, fpath)
if ast is None:
continue
# XXX why is first file defining the project.path ?
project.path = project.path or ast.file
project.add_module(ast)
base_name = ast.name
# recurse in package except if __init__ was explicitly given
if ast.package and something.find("__init__") == -1:
# recurse on others packages / modules if this is a package
for fpath in modutils.get_module_files(
os.path.dirname(ast.file), black_list
):
ast = func_wrapper(astroid_manager.ast_from_file, fpath)
if ast is None or ast.name == base_name:
continue
project.add_module(ast)
return project
|
return a Project from a list of files or modules
|
def upload_file_handle(
self,
bucket: str,
key: str,
src_file_handle: typing.BinaryIO,
content_type: str=None,
metadata: dict=None):
"""
Saves the contents of a file handle as the contents of an object in a bucket.
"""
raise NotImplementedError()
|
Saves the contents of a file handle as the contents of an object in a bucket.
|
def uninstall(self, package):
"""Uninstalls the given package (given in pip's package syntax or a tuple of
('name', 'ver')) from this virtual environment."""
if isinstance(package, tuple):
package = '=='.join(package)
if not self.is_installed(package):
self._write_to_log('%s is not installed, skipping' % package)
return
try:
self._execute_pip(['uninstall', '-y', package])
except subprocess.CalledProcessError as e:
raise PackageRemovalException((e.returncode, e.output, package))
|
Uninstalls the given package (given in pip's package syntax or a tuple of
('name', 'ver')) from this virtual environment.
|
def delete(args):
"""
Delete a template by name
"""
m = TemplateManager(args.hosts)
m.delete(args.name)
|
Delete a template by name
|
def _scatter_obs(
adata,
x=None,
y=None,
color=None,
use_raw=None,
layers='X',
sort_order=True,
alpha=None,
basis=None,
groups=None,
components=None,
projection='2d',
legend_loc='right margin',
legend_fontsize=None,
legend_fontweight=None,
color_map=None,
palette=None,
frameon=None,
right_margin=None,
left_margin=None,
size=None,
title=None,
show=None,
save=None,
ax=None):
"""See docstring of scatter."""
sanitize_anndata(adata)
from scipy.sparse import issparse
if use_raw is None and adata.raw is not None: use_raw = True
# process layers
if layers is None:
layers = 'X'
if isinstance(layers, str) and (layers == 'X' or layers in adata.layers.keys()):
layers = (layers, layers, layers)
elif isinstance(layers, (tuple, list)) and len(layers) == 3:
for layer in layers:
if layer not in adata.layers.keys() and layer != 'X':
raise ValueError(
'`layers` should have elements that are either \'X\' or in adata.layers.keys().')
else:
raise ValueError('`layers` should be a string or a list/tuple of length 3.')
if use_raw and (layers != ('X', 'X', 'X') or layers != ['X', 'X', 'X']):
ValueError('`use_raw` must be `False` if layers other than \'X\' are used.')
if legend_loc not in VALID_LEGENDLOCS:
raise ValueError(
'Invalid `legend_loc`, need to be one of: {}.'.format(VALID_LEGENDLOCS))
if components is None: components = '1,2' if '2d' in projection else '1,2,3'
if isinstance(components, str): components = components.split(',')
components = np.array(components).astype(int) - 1
keys = ['grey'] if color is None else [color] if isinstance(color, str) else color
if title is not None and isinstance(title, str):
title = [title]
highlights = adata.uns['highlights'] if 'highlights' in adata.uns else []
if basis is not None:
try:
# ignore the '0th' diffusion component
if basis == 'diffmap': components += 1
Y = adata.obsm['X_' + basis][:, components]
# correct the component vector for use in labeling etc.
if basis == 'diffmap': components -= 1
except KeyError:
raise KeyError('compute coordinates using visualization tool {} first'
.format(basis))
elif x is not None and y is not None:
x_arr = adata._get_obs_array(x, use_raw=use_raw, layer=layers[0])
y_arr = adata._get_obs_array(y, use_raw=use_raw, layer=layers[1])
x_arr = x_arr.toarray().flatten() if issparse(x_arr) else x_arr
y_arr = y_arr.toarray().flatten() if issparse(y_arr) else y_arr
Y = np.c_[x_arr[:, None], y_arr[:, None]]
else:
raise ValueError('Either provide a `basis` or `x` and `y`.')
if size is None:
n = Y.shape[0]
size = 120000 / n
if legend_loc.startswith('on data') and legend_fontsize is None:
legend_fontsize = rcParams['legend.fontsize']
elif legend_fontsize is None:
legend_fontsize = rcParams['legend.fontsize']
palette_was_none = False
if palette is None: palette_was_none = True
if isinstance(palette, list):
if not is_color_like(palette[0]):
palettes = palette
else:
palettes = [palette]
else:
palettes = [palette for i in range(len(keys))]
for i, palette in enumerate(palettes):
palettes[i] = utils.default_palette(palette)
if basis is not None:
component_name = (
'DC' if basis == 'diffmap'
else 'tSNE' if basis == 'tsne'
else 'UMAP' if basis == 'umap'
else 'PC' if basis == 'pca'
else basis.replace('draw_graph_', '').upper() if 'draw_graph' in basis
else basis)
else:
component_name = None
axis_labels = (x, y) if component_name is None else None
show_ticks = True if component_name is None else False
# generate the colors
color_ids = []
categoricals = []
colorbars = []
for ikey, key in enumerate(keys):
c = 'white'
categorical = False # by default, assume continuous or flat color
colorbar = None
# test whether we have categorial or continuous annotation
if key in adata.obs_keys():
if is_categorical_dtype(adata.obs[key]):
categorical = True
else:
c = adata.obs[key]
# coloring according to gene expression
elif (use_raw
and adata.raw is not None
and key in adata.raw.var_names):
c = adata.raw[:, key].X
elif key in adata.var_names:
c = adata[:, key].X if layers[2] == 'X' else adata[:, key].layers[layers[2]]
c = c.toarray().flatten() if issparse(c) else c
elif is_color_like(key): # a flat color
c = key
colorbar = False
else:
raise ValueError(
'key \'{}\' is invalid! pass valid observation annotation, '
'one of {} or a gene name {}'
.format(key, adata.obs_keys(), adata.var_names))
if colorbar is None:
colorbar = not categorical
colorbars.append(colorbar)
if categorical: categoricals.append(ikey)
color_ids.append(c)
if right_margin is None and len(categoricals) > 0:
if legend_loc == 'right margin': right_margin = 0.5
if title is None and keys[0] is not None:
title = [key.replace('_', ' ') if not is_color_like(key) else '' for key in keys]
axs = scatter_base(Y,
title=title,
alpha=alpha,
component_name=component_name,
axis_labels=axis_labels,
component_indexnames=components + 1,
projection=projection,
colors=color_ids,
highlights=highlights,
colorbars=colorbars,
right_margin=right_margin,
left_margin=left_margin,
sizes=[size for c in keys],
color_map=color_map,
show_ticks=show_ticks,
ax=ax)
def add_centroid(centroids, name, Y, mask):
Y_mask = Y[mask]
if Y_mask.shape[0] == 0: return
median = np.median(Y_mask, axis=0)
i = np.argmin(np.sum(np.abs(Y_mask - median), axis=1))
centroids[name] = Y_mask[i]
# loop over all categorical annotation and plot it
for i, ikey in enumerate(categoricals):
palette = palettes[i]
key = keys[ikey]
utils.add_colors_for_categorical_sample_annotation(
adata, key, palette, force_update_colors=not palette_was_none)
# actually plot the groups
mask_remaining = np.ones(Y.shape[0], dtype=bool)
centroids = {}
if groups is None:
for iname, name in enumerate(adata.obs[key].cat.categories):
if name not in settings.categories_to_ignore:
mask = scatter_group(axs[ikey], key, iname,
adata, Y, projection, size=size, alpha=alpha)
mask_remaining[mask] = False
if legend_loc.startswith('on data'): add_centroid(centroids, name, Y, mask)
else:
groups = [groups] if isinstance(groups, str) else groups
for name in groups:
if name not in set(adata.obs[key].cat.categories):
raise ValueError('"' + name + '" is invalid!'
+ ' specify valid name, one of '
+ str(adata.obs[key].cat.categories))
else:
iname = np.flatnonzero(adata.obs[key].cat.categories.values == name)[0]
mask = scatter_group(axs[ikey], key, iname,
adata, Y, projection, size=size, alpha=alpha)
if legend_loc.startswith('on data'): add_centroid(centroids, name, Y, mask)
mask_remaining[mask] = False
if mask_remaining.sum() > 0:
data = [Y[mask_remaining, 0], Y[mask_remaining, 1]]
if projection == '3d': data.append(Y[mask_remaining, 2])
axs[ikey].scatter(*data, marker='.', c='lightgrey', s=size,
edgecolors='none', zorder=-1)
legend = None
if legend_loc.startswith('on data'):
if legend_fontweight is None:
legend_fontweight = 'bold'
for name, pos in centroids.items():
axs[ikey].text(pos[0], pos[1], name,
weight=legend_fontweight,
verticalalignment='center',
horizontalalignment='center',
fontsize=legend_fontsize)
all_pos = np.zeros((len(adata.obs[key].cat.categories), 2))
for iname, name in enumerate(adata.obs[key].cat.categories):
if name in centroids:
all_pos[iname] = centroids[name]
else:
all_pos[iname] = [np.nan, np.nan]
utils._tmp_cluster_pos = all_pos
if legend_loc == 'on data export':
filename = settings.writedir + 'pos.csv'
logg.msg('exporting label positions to {}'.format(filename), v=1)
if settings.writedir != '' and not os.path.exists(settings.writedir):
os.makedirs(settings.writedir)
np.savetxt(filename, all_pos, delimiter=',')
elif legend_loc == 'right margin':
legend = axs[ikey].legend(
frameon=False, loc='center left',
bbox_to_anchor=(1, 0.5),
ncol=(1 if len(adata.obs[key].cat.categories) <= 14
else 2 if len(adata.obs[key].cat.categories) <= 30 else 3),
fontsize=legend_fontsize)
elif legend_loc != 'none':
legend = axs[ikey].legend(
frameon=False, loc=legend_loc, fontsize=legend_fontsize)
if legend is not None:
for handle in legend.legendHandles: handle.set_sizes([300.0])
# draw a frame around the scatter
frameon = settings._frameon if frameon is None else frameon
if not frameon and x is None and y is None:
for ax in axs:
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_frame_on(False)
utils.savefig_or_show('scatter' if basis is None else basis, show=show, save=save)
if show == False: return axs if len(keys) > 1 else axs[0]
|
See docstring of scatter.
|
def pkg_config_libdirs(packages):
"""
Returns a list of all library paths that pkg-config says should be included when
linking against the list of packages given as 'packages'. An empty return list means
that the package may be found in the standard system locations, irrespective of
pkg-config.
"""
# don't try calling pkg-config if NO_PKGCONFIG is set in environment
if os.environ.get("NO_PKGCONFIG", None):
return []
# if calling pkg-config failes, don't continue and don't try again.
try:
FNULL = open(os.devnull, 'w')
subprocess.check_call(["pkg-config", "--version"], stdout=FNULL, close_fds=True)
except:
print("PyCBC.libutils: pkg-config call failed, setting NO_PKGCONFIG=1",
file=sys.stderr)
os.environ['NO_PKGCONFIG'] = "1"
return []
# First, check that we can call pkg-config on each package in the list
for pkg in packages:
if not pkg_config_check_exists(pkg):
raise ValueError("Package {0} cannot be found on the pkg-config search path".format(pkg))
libdirs = []
for token in getoutput("PKG_CONFIG_ALLOW_SYSTEM_LIBS=1 pkg-config --libs-only-L {0}".format(' '.join(packages))).split():
if token.startswith("-L"):
libdirs.append(token[2:])
return libdirs
|
Returns a list of all library paths that pkg-config says should be included when
linking against the list of packages given as 'packages'. An empty return list means
that the package may be found in the standard system locations, irrespective of
pkg-config.
|
def children(self):
""""Return the table's other column that have this column as a parent, excluding labels"""
for c in self.table.columns:
if c.parent == self.name and not c.valuetype_class.is_label():
yield c
|
Return the table's other column that have this column as a parent, excluding labels
|
def format_lines(statements, lines):
"""Nicely format a list of line numbers.
Format a list of line numbers for printing by coalescing groups of lines as
long as the lines represent consecutive statements. This will coalesce
even if there are gaps between statements.
For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
`lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
"""
pairs = []
i = 0
j = 0
start = None
statements = sorted(statements)
lines = sorted(lines)
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start == None:
start = lines[j]
end = lines[j]
j += 1
elif start:
pairs.append((start, end))
start = None
i += 1
if start:
pairs.append((start, end))
ret = ', '.join(map(nice_pair, pairs))
return ret
|
Nicely format a list of line numbers.
Format a list of line numbers for printing by coalescing groups of lines as
long as the lines represent consecutive statements. This will coalesce
even if there are gaps between statements.
For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
`lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
|
def stage_config(self):
"""
A shortcut property for settings of a stage.
"""
def get_stage_setting(stage, extended_stages=None):
if extended_stages is None:
extended_stages = []
if stage in extended_stages:
raise RuntimeError(stage + " has already been extended to these settings. "
"There is a circular extends within the settings file.")
extended_stages.append(stage)
try:
stage_settings = dict(self.zappa_settings[stage].copy())
except KeyError:
raise ClickException("Cannot extend settings for undefined stage '" + stage + "'.")
extends_stage = self.zappa_settings[stage].get('extends', None)
if not extends_stage:
return stage_settings
extended_settings = get_stage_setting(stage=extends_stage, extended_stages=extended_stages)
extended_settings.update(stage_settings)
return extended_settings
settings = get_stage_setting(stage=self.api_stage)
# Backwards compatible for delete_zip setting that was more explicitly named delete_local_zip
if u'delete_zip' in settings:
settings[u'delete_local_zip'] = settings.get(u'delete_zip')
settings.update(self.stage_config_overrides)
return settings
|
A shortcut property for settings of a stage.
|
def _len_objid(self):
'''Get the actual size of the content, as some attributes have variable sizes'''
try:
return self._size
except AttributeError:
temp = (self.object_id, self.birth_vol_id, self.birth_object_id, self.birth_domain_id)
self._size = sum([ObjectID._UUID_SIZE for data in temp if data is not None])
return self._size
|
Get the actual size of the content, as some attributes have variable sizes
|
def _update_progress(self, percentage, **kwargs):
"""
Update the progress with a percentage, including updating the progressbar as well as calling the progress
callback.
:param float percentage: Percentage of the progressbar. from 0.0 to 100.0.
:param kwargs: Other parameters that will be passed to the progress_callback handler.
:return: None
"""
if self._show_progressbar:
if self._progressbar is None:
self._initialize_progressbar()
self._progressbar.update(percentage * 10000)
if self._progress_callback is not None:
self._progress_callback(percentage, **kwargs)
|
Update the progress with a percentage, including updating the progressbar as well as calling the progress
callback.
:param float percentage: Percentage of the progressbar. from 0.0 to 100.0.
:param kwargs: Other parameters that will be passed to the progress_callback handler.
:return: None
|
def create_channel(
self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs
) -> Channel:
"""Append a new channel.
Parameters
----------
name : string
Unique name for this channel.
values : array (optional)
Array. If None, an empty array equaling the data shape is
created. Default is None.
shape : tuple of int
Shape to use. Must broadcast with the full shape.
Only used if `values` is None.
Default is the full shape of self.
units : string (optional)
Channel units. Default is None.
dtype : numpy.dtype (optional)
dtype to use for dataset, default is np.float64.
Only used if `values` is None.
kwargs : dict
Additional keyword arguments passed to Channel instantiation.
Returns
-------
Channel
Created channel.
"""
if name in self.channel_names:
warnings.warn(name, wt_exceptions.ObjectExistsWarning)
return self[name]
elif name in self.variable_names:
raise wt_exceptions.NameNotUniqueError(name)
require_kwargs = {"chunks": True}
if values is None:
if shape is None:
require_kwargs["shape"] = self.shape
else:
require_kwargs["shape"] = shape
if dtype is None:
require_kwargs["dtype"] = np.dtype(np.float64)
else:
require_kwargs["dtype"] = dtype
if require_kwargs["dtype"].kind in "fcmM":
require_kwargs["fillvalue"] = np.nan
else:
require_kwargs["fillvalue"] = 0
else:
require_kwargs["data"] = values
require_kwargs["shape"] = values.shape
require_kwargs["dtype"] = values.dtype
if np.prod(require_kwargs["shape"]) == 1:
require_kwargs["chunks"] = None
# create dataset
dataset_id = self.require_dataset(name=name, **require_kwargs).id
channel = Channel(self, dataset_id, units=units, **kwargs)
# finish
self.attrs["channel_names"] = np.append(self.attrs["channel_names"], name.encode())
return channel
|
Append a new channel.
Parameters
----------
name : string
Unique name for this channel.
values : array (optional)
Array. If None, an empty array equaling the data shape is
created. Default is None.
shape : tuple of int
Shape to use. Must broadcast with the full shape.
Only used if `values` is None.
Default is the full shape of self.
units : string (optional)
Channel units. Default is None.
dtype : numpy.dtype (optional)
dtype to use for dataset, default is np.float64.
Only used if `values` is None.
kwargs : dict
Additional keyword arguments passed to Channel instantiation.
Returns
-------
Channel
Created channel.
|
def choose(n, k):
"""
A fast way to calculate binomial coefficients by Andrew Dalke (contrib).
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in xrange(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
|
A fast way to calculate binomial coefficients by Andrew Dalke (contrib).
|
def added_env_paths(env_vars, env=None):
"""
:param dict|None env_vars: Env vars to customize
:param dict env: Original env vars
"""
if not env_vars:
return None
if not env:
env = dict(os.environ)
result = dict(env)
for env_var, paths in env_vars.items():
separator = paths[0]
paths = paths[1:]
current = env.get(env_var, "")
current = [x for x in current.split(separator) if x]
added = 0
for path in paths.split(separator):
if path not in current:
added += 1
current.append(path)
if added:
result[env_var] = separator.join(current)
return result
|
:param dict|None env_vars: Env vars to customize
:param dict env: Original env vars
|
def connect_put_namespaced_service_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
"""connect_put_namespaced_service_proxy_with_path # noqa: E501
connect PUT requests to proxy of Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_put_namespaced_service_proxy_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_put_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
else:
(data) = self.connect_put_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
return data
|
connect_put_namespaced_service_proxy_with_path # noqa: E501
connect PUT requests to proxy of Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_put_namespaced_service_proxy_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread.
|
def role_exists(role, **kwargs):
'''
Checks if a role exists.
CLI Example:
.. code-block:: bash
salt minion mssql.role_exists db_owner
'''
# We should get one, and only one row
return len(tsql_query(query='sp_helprole "{0}"'.format(role), as_dict=True, **kwargs)) == 1
|
Checks if a role exists.
CLI Example:
.. code-block:: bash
salt minion mssql.role_exists db_owner
|
def list_authors():
"""List all authors.
e.g.: GET /authors"""
authors = Author.query.all()
content = '<p>Authors:</p>'
for author in authors:
content += '<p>%s</p>' % author.name
return content
|
List all authors.
e.g.: GET /authors
|
def poisson_equation(image, gradient=1, max_iter=100, convergence=.01, percentile = 90.0):
'''Estimate the solution to the Poisson Equation
The Poisson Equation is the solution to gradient(x) = h^2/4 and, in this
context, we use a boundary condition where x is zero for background
pixels. Also, we set h^2/4 = 1 to indicate that each pixel is a distance
of 1 from its neighbors.
The estimation exits after max_iter iterations or if the given percentile
of foreground pixels differ by less than the convergence fraction
from one pass to the next.
Some ideas taken from Gorelick, "Shape representation and classification
using the Poisson Equation", IEEE Transactions on Pattern Analysis and
Machine Intelligence V28, # 12, 2006
image - binary image with foreground as True
gradient - the target gradient between 4-adjacent pixels
max_iter - maximum # of iterations at a given level
convergence - target fractional difference between values from previous
and next pass
percentile - measure convergence at this percentile
'''
# Evaluate the poisson equation with zero-padded boundaries
pe = np.zeros((image.shape[0]+2, image.shape[1]+2))
if image.shape[0] > 64 and image.shape[1] > 64:
#
# Sub-sample to get seed values
#
sub_image = image[::2, ::2]
sub_pe = poisson_equation(sub_image,
gradient=gradient*2,
max_iter=max_iter,
convergence=convergence)
coordinates = np.mgrid[0:(sub_pe.shape[0]*2),
0:(sub_pe.shape[1]*2)].astype(float) / 2
pe[1:(sub_image.shape[0]*2+1), 1:(sub_image.shape[1]*2+1)] = \
scind.map_coordinates(sub_pe, coordinates, order=1)
pe[:image.shape[0], :image.shape[1]][~image] = 0
else:
pe[1:-1,1:-1] = image
#
# evaluate only at i and j within the foreground
#
i, j = np.mgrid[0:pe.shape[0], 0:pe.shape[1]]
mask = (i>0) & (i<pe.shape[0]-1) & (j>0) & (j<pe.shape[1]-1)
mask[mask] = image[i[mask]-1, j[mask]-1]
i = i[mask]
j = j[mask]
if len(i) == 0:
return pe[1:-1, 1:-1]
if len(i) == 1:
# Just in case "percentile" can't work when unable to interpolate
# between a single value... Isolated pixels have value = 1
#
pe[mask] = 1
return pe[1:-1, 1:-1]
for itr in range(max_iter):
next_pe = (pe[i+1, j] + pe[i-1, j] + pe[i, j+1] + pe[i, j-1]) / 4 + 1
difference = np.abs((pe[mask] - next_pe) / next_pe)
pe[mask] = next_pe
if np.percentile(difference, percentile) <= convergence:
break
return pe[1:-1, 1:-1]
|
Estimate the solution to the Poisson Equation
The Poisson Equation is the solution to gradient(x) = h^2/4 and, in this
context, we use a boundary condition where x is zero for background
pixels. Also, we set h^2/4 = 1 to indicate that each pixel is a distance
of 1 from its neighbors.
The estimation exits after max_iter iterations or if the given percentile
of foreground pixels differ by less than the convergence fraction
from one pass to the next.
Some ideas taken from Gorelick, "Shape representation and classification
using the Poisson Equation", IEEE Transactions on Pattern Analysis and
Machine Intelligence V28, # 12, 2006
image - binary image with foreground as True
gradient - the target gradient between 4-adjacent pixels
max_iter - maximum # of iterations at a given level
convergence - target fractional difference between values from previous
and next pass
percentile - measure convergence at this percentile
|
def apply_parameters(self, parameters):
"""Recursively apply dictionary entries in 'parameters' to {item}s in recipe
structure, leaving undefined {item}s as they are. A special case is a
{$REPLACE:item}, which replaces the string with a copy of the referenced
parameter item.
Examples:
parameters = { 'x':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '5': '{y}' }
parameters = { 'y':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '{x}': '5' }
parameters = { 'x':'3', 'y':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '3': '5' }
parameters = { 'l': [ 1, 2 ] }
apply_parameters( { 'x': '{$REPLACE:l}' }, parameters )
=> { 'x': [ 1, 2 ] }
"""
class SafeString(object):
def __init__(self, s):
self.string = s
def __repr__(self):
return "{" + self.string + "}"
def __str__(self):
return "{" + self.string + "}"
def __getitem__(self, item):
return SafeString(self.string + "[" + item + "]")
class SafeDict(dict):
"""A dictionary that returns undefined keys as {keyname}.
This can be used to selectively replace variables in datastructures."""
def __missing__(self, key):
return SafeString(key)
# By default the python formatter class is used to resolve {item} references
formatter = string.Formatter()
# Special format strings "{$REPLACE:(...)}" use this data structure
# formatter to return the referenced data structure rather than a formatted
# string.
ds_formatter = string.Formatter()
def ds_format_field(value, spec):
ds_format_field.last = value
return ""
ds_formatter.format_field = ds_format_field
params = SafeDict(parameters)
def _recursive_apply(item):
"""Helper function to recursively apply replacements."""
if isinstance(item, basestring):
if item.startswith("{$REPLACE") and item.endswith("}"):
try:
ds_formatter.vformat("{" + item[10:-1] + "}", (), parameters)
except KeyError:
return None
return copy.deepcopy(ds_formatter.format_field.last)
else:
return formatter.vformat(item, (), params)
if isinstance(item, dict):
return {
_recursive_apply(key): _recursive_apply(value)
for key, value in item.items()
}
if isinstance(item, tuple):
return tuple(_recursive_apply(list(item)))
if isinstance(item, list):
return [_recursive_apply(x) for x in item]
return item
self.recipe = _recursive_apply(self.recipe)
|
Recursively apply dictionary entries in 'parameters' to {item}s in recipe
structure, leaving undefined {item}s as they are. A special case is a
{$REPLACE:item}, which replaces the string with a copy of the referenced
parameter item.
Examples:
parameters = { 'x':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '5': '{y}' }
parameters = { 'y':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '{x}': '5' }
parameters = { 'x':'3', 'y':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '3': '5' }
parameters = { 'l': [ 1, 2 ] }
apply_parameters( { 'x': '{$REPLACE:l}' }, parameters )
=> { 'x': [ 1, 2 ] }
|
def _compute_sources_for_target(self, target):
"""Computes and returns the sources (relative to buildroot) for the given target."""
def resolve_target_sources(target_sources):
resolved_sources = []
for tgt in target_sources:
if tgt.has_sources():
resolved_sources.extend(tgt.sources_relative_to_buildroot())
return resolved_sources
sources = [s for s in target.sources_relative_to_buildroot() if self._sources_predicate(s)]
# TODO: Make this less hacky. Ideally target.java_sources will point to sources, not targets.
if hasattr(target, 'java_sources') and target.java_sources:
sources.extend(resolve_target_sources(target.java_sources))
return sources
|
Computes and returns the sources (relative to buildroot) for the given target.
|
def from_string(string):
"""
Reads an PWInput object from a string.
Args:
string (str): PWInput string
Returns:
PWInput object
"""
lines = list(clean_lines(string.splitlines()))
def input_mode(line):
if line[0] == "&":
return ("sections", line[1:].lower())
elif "ATOMIC_SPECIES" in line:
return ("pseudo", )
elif "K_POINTS" in line:
return ("kpoints", line.split("{")[1][:-1])
elif "CELL_PARAMETERS" in line or "ATOMIC_POSITIONS" in line:
return ("structure", line.split("{")[1][:-1])
elif line == "/":
return None
else:
return mode
sections = {"control": {}, "system": {}, "electrons": {},
"ions": {}, "cell":{}}
pseudo = {}
pseudo_index = 0
lattice = []
species = []
coords = []
structure = None
site_properties = {"pseudo":[]}
mode = None
for line in lines:
mode = input_mode(line)
if mode == None:
pass
elif mode[0] == "sections":
section = mode[1]
m = re.match(r'(\w+)\(?(\d*?)\)?\s*=\s*(.*)', line)
if m:
key = m.group(1).strip()
key_ = m.group(2).strip()
val = m.group(3).strip()
if key_ != "":
if sections[section].get(key, None) == None:
val_ = [0.0]*20 # MAX NTYP DEFINITION
val_[int(key_)-1] = PWInput.proc_val(key, val)
sections[section][key] = val_
site_properties[key] = []
else:
sections[section][key][int(key_)-1] = PWInput.proc_val(key, val)
else:
sections[section][key] = PWInput.proc_val(key, val)
elif mode[0] == "pseudo":
m = re.match(r'(\w+)\s+(\d*.\d*)\s+(.*)', line)
if m:
pseudo[m.group(1).strip()] = {}
pseudo[m.group(1).strip()]["index"] = pseudo_index
pseudo[m.group(1).strip()]["pseudopot"] = m.group(3).strip()
pseudo_index += 1
elif mode[0] == "kpoints":
m = re.match(r'(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)', line)
if m:
kpoints_grid = (int(m.group(1)), int(m.group(2)), int(m.group(3)))
kpoints_shift = (int(m.group(4)), int(m.group(5)), int(m.group(6)))
else:
kpoints_mode = mode[1]
elif mode[0] == "structure":
m_l = re.match(r'(-?\d+\.?\d*)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)', line)
m_p = re.match(r'(\w+)\s+(-?\d+\.\d*)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)', line)
if m_l:
lattice += [ float(m_l.group(1)), float(m_l.group(2)), float(m_l.group(3)) ]
elif m_p:
site_properties["pseudo"].append(pseudo[m_p.group(1)]["pseudopot"])
species += [pseudo[m_p.group(1)]["pseudopot"].split(".")[0]]
coords += [[float(m_p.group(2)), float(m_p.group(3)), float(m_p.group(4))]]
for k, v in site_properties.items():
if k != "pseudo":
site_properties[k].append(sections['system'][k][pseudo[m_p.group(1)]["index"]])
if mode[1] == "angstrom":
coords_are_cartesian = True
elif mode[1] == "crystal":
coords_are_cartesian = False
structure = Structure(Lattice(lattice), species, coords,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties)
return PWInput(structure=structure, control=sections["control"],
system=sections["system"], electrons=sections["electrons"],
ions=sections["ions"], cell=sections["cell"], kpoints_mode=kpoints_mode,
kpoints_grid=kpoints_grid, kpoints_shift=kpoints_shift)
|
Reads an PWInput object from a string.
Args:
string (str): PWInput string
Returns:
PWInput object
|
def _from_dict(cls, _dict):
"""Initialize a WorkspaceSystemSettings object from a json dictionary."""
args = {}
if 'tooling' in _dict:
args['tooling'] = WorkspaceSystemSettingsTooling._from_dict(
_dict.get('tooling'))
if 'disambiguation' in _dict:
args[
'disambiguation'] = WorkspaceSystemSettingsDisambiguation._from_dict(
_dict.get('disambiguation'))
if 'human_agent_assist' in _dict:
args['human_agent_assist'] = _dict.get('human_agent_assist')
return cls(**args)
|
Initialize a WorkspaceSystemSettings object from a json dictionary.
|
def update_settings(self, settings, force=False, timeout=-1):
"""
Updates interconnect settings on the logical interconnect. Changes to interconnect settings are asynchronously
applied to all managed interconnects.
(This method is not available from API version 600 onwards)
Args:
settings: Interconnect settings
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Logical Interconnect
"""
data = settings.copy()
if 'ethernetSettings' in data:
ethernet_default_values = self._get_default_values(self.SETTINGS_ETHERNET_DEFAULT_VALUES)
data['ethernetSettings'] = merge_resources(data['ethernetSettings'],
ethernet_default_values)
uri = "{}/settings".format(self.data["uri"])
default_values = self._get_default_values(self.SETTINGS_DEFAULT_VALUES)
data = self._helper.update_resource_fields(data, default_values)
return self._helper.update(data, uri=uri, force=force, timeout=timeout)
|
Updates interconnect settings on the logical interconnect. Changes to interconnect settings are asynchronously
applied to all managed interconnects.
(This method is not available from API version 600 onwards)
Args:
settings: Interconnect settings
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Logical Interconnect
|
def get_html_string(self, **kwargs):
"""Return string representation of HTML formatted version of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
attributes - dictionary of name/value pairs to include as HTML attributes in the <table> tag
xhtml - print <br/> tags if True, <br> tags if false"""
options = self._get_options(kwargs)
if options["format"]:
string = self._get_formatted_html_string(options)
else:
string = self._get_simple_html_string(options)
return string
|
Return string representation of HTML formatted version of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
attributes - dictionary of name/value pairs to include as HTML attributes in the <table> tag
xhtml - print <br/> tags if True, <br> tags if false
|
def partial(self, *args):
"""
Partially apply a function by creating a version that has had some of
its arguments pre-filled, without changing its dynamic `this` context.
"""
def part(*args2):
args3 = args + args2
return self.obj(*args3)
return self._wrap(part)
|
Partially apply a function by creating a version that has had some of
its arguments pre-filled, without changing its dynamic `this` context.
|
def _CreateDatabase(self):
""" Create all database tables. """
goodlogging.Log.Info("DB", "Initialising new database", verbosity=self.logVerbosity)
with sqlite3.connect(self._dbPath) as db:
# Configuration tables
db.execute("CREATE TABLE Config ("
"Name TEXT UNIQUE NOT NULL, "
"Value TEXT)")
db.execute("CREATE TABLE IgnoredDir ("
"DirName TEXT UNIQUE NOT NULL)")
db.execute("CREATE TABLE SupportedFormat ("
"FileFormat TEXT UNIQUE NOT NULL)")
# Look-up tables
db.execute("CREATE TABLE TVLibrary ("
"ShowID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, "
"ShowName TEXT UNIQUE NOT NULL, "
"ShowDir TEXT UNIQUE)")
db.execute("CREATE TABLE FileName ("
"FileName TEXT UNIQUE NOT NULL, "
"ShowID INTEGER, "
"FOREIGN KEY (ShowID) REFERENCES ShowName(ShowID))")
db.execute("CREATE TABLE SeasonDir ("
"ShowID INTEGER, "
"Season INTEGER NOT NULL, "
"SeasonDir TEXT NOT NULL, "
"FOREIGN KEY (ShowID) REFERENCES ShowName(ShowID),"
"CONSTRAINT SeasonDirPK PRIMARY KEY (ShowID,Season))")
db.commit()
goodlogging.Log.Info("DB", "Database initialisation complete", verbosity=self.logVerbosity)
|
Create all database tables.
|
def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.getCash() >= account.getTotalValue() / self.buying_ratio):
return account.getTotalValue() / self.buying_ratio
else:
return 0
|
calculate the amount of money to buy stock
|
def write_main(self):
'''
Writes out a huge string representing the main section of the python
compiled toil script.
Currently looks at and writes 5 sections:
1. JSON Variables (includes importing and preparing files as tuples)
2. TSV Variables (includes importing and preparing files as tuples)
3. CSV Variables (includes importing and preparing files as tuples)
4. Wrapping each WDL "task" function as a toil job
5. List out children and encapsulated jobs by priority, then start job0.
This should create variable declarations necessary for function calls.
Map file paths appropriately and store them in the toil fileStore so
that they are persistent from job to job. Create job wrappers for toil.
And finally write out, and run the jobs in order of priority using the
addChild and encapsulate commands provided by toil.
:return: giant string containing the main def for the toil script.
'''
main_section = ''
# write out the main header
main_header = self.write_main_header()
main_section = main_section + main_header
# write out the workflow declarations
main_section = main_section + ' # WF Declarations\n'
wf_declarations_to_write = self.write_main_wfdeclarations()
main_section = main_section + wf_declarations_to_write
# write toil job wrappers with input vars
jobs_to_write = self.write_main_jobwrappers()
main_section = main_section + jobs_to_write
# loop to export all outputs to a cloud bucket
if self.destBucket:
main_destbucket = self.write_main_destbucket()
main_section = main_section + main_destbucket
return main_section
|
Writes out a huge string representing the main section of the python
compiled toil script.
Currently looks at and writes 5 sections:
1. JSON Variables (includes importing and preparing files as tuples)
2. TSV Variables (includes importing and preparing files as tuples)
3. CSV Variables (includes importing and preparing files as tuples)
4. Wrapping each WDL "task" function as a toil job
5. List out children and encapsulated jobs by priority, then start job0.
This should create variable declarations necessary for function calls.
Map file paths appropriately and store them in the toil fileStore so
that they are persistent from job to job. Create job wrappers for toil.
And finally write out, and run the jobs in order of priority using the
addChild and encapsulate commands provided by toil.
:return: giant string containing the main def for the toil script.
|
def inferRowCompat(self, distribution):
"""
Equivalent to the category inference of zeta1.TopLevel.
Computes the max_prod (maximum component of a component-wise multiply)
between the rows of the histogram and the incoming distribution.
May be slow if the result of clean_outcpd() is not valid.
:param distribution: Array of length equal to the number of columns.
:returns: array of length equal to the number of rows.
"""
if self.hack_ is None:
self.clean_outcpd()
return self.hack_.vecMaxProd(distribution)
|
Equivalent to the category inference of zeta1.TopLevel.
Computes the max_prod (maximum component of a component-wise multiply)
between the rows of the histogram and the incoming distribution.
May be slow if the result of clean_outcpd() is not valid.
:param distribution: Array of length equal to the number of columns.
:returns: array of length equal to the number of rows.
|
def compile_vocab(docs, limit=1e6, verbose=0, tokenizer=Tokenizer(stem=None, lower=None, strip=None)):
"""Get the set of words used anywhere in a sequence of documents and assign an integer id
This vectorizer is much faster than the scikit-learn version (and only requires low/constant RAM ?).
>>> gen = ('label: ' + chr(ord('A') + i % 3)*3 for i in range(11))
>>> d = compile_vocab(gen, verbose=0)
>>> d
<gensim.corpora.dictionary.Dictionary ...>
>>> print(d)
Dictionary(4 unique tokens: [u'AAA', u'BBB', u'CCC', u'label'])
>>> sorted(d.token2id.values())
[0, 1, 2, 3]
>>> sorted(d.token2id.keys())
[u'AAA', u'BBB', u'CCC', u'label']
"""
tokenizer = make_tokenizer(tokenizer)
d = Dictionary()
try:
limit = min(limit, docs.count())
docs = docs.iterator()
except (AttributeError, TypeError):
pass
for i, doc in enumerate(docs):
# if isinstance(doc, (tuple, list)) and len(doc) == 2 and isinstance(doc[1], int):
# doc, score = docs
try:
# in case docs is a values() queryset (dicts of records in a DB table)
doc = doc.values()
except AttributeError: # doc already is a values_list
if not isinstance(doc, str):
doc = ' '.join([str(v) for v in doc])
else:
doc = str(doc)
if i >= limit:
break
d.add_documents([list(tokenizer(doc))])
if verbose and not i % 100:
log.info('{}: {}'.format(i, repr(d)[:120]))
return d
|
Get the set of words used anywhere in a sequence of documents and assign an integer id
This vectorizer is much faster than the scikit-learn version (and only requires low/constant RAM ?).
>>> gen = ('label: ' + chr(ord('A') + i % 3)*3 for i in range(11))
>>> d = compile_vocab(gen, verbose=0)
>>> d
<gensim.corpora.dictionary.Dictionary ...>
>>> print(d)
Dictionary(4 unique tokens: [u'AAA', u'BBB', u'CCC', u'label'])
>>> sorted(d.token2id.values())
[0, 1, 2, 3]
>>> sorted(d.token2id.keys())
[u'AAA', u'BBB', u'CCC', u'label']
|
def search_messages(session, thread_id, query, limit=20,
offset=0, message_context_details=None,
window_above=None, window_below=None):
"""
Search for messages
"""
query = {
'thread_id': thread_id,
'query': query,
'limit': limit,
'offset': offset
}
if message_context_details:
query['message_context_details'] = message_context_details
if window_above:
query['window_above'] = window_above
if window_below:
query['window_below'] = window_below
# GET /api/messages/0.1/messages/search
response = make_get_request(session, 'messages/search', params_data=query)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise MessagesNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']
)
|
Search for messages
|
def prepare_dispatches():
"""Automatically creates dispatches for messages without them.
:return: list of Dispatch
:rtype: list
"""
dispatches = []
target_messages = Message.get_without_dispatches()
cache = {}
for message_model in target_messages:
if message_model.cls not in cache:
message_cls = get_registered_message_type(message_model.cls)
subscribers = message_cls.get_subscribers()
cache[message_model.cls] = (message_cls, subscribers)
else:
message_cls, subscribers = cache[message_model.cls]
dispatches.extend(message_cls.prepare_dispatches(message_model))
return dispatches
|
Automatically creates dispatches for messages without them.
:return: list of Dispatch
:rtype: list
|
def greaterThan(self, value):
"""
Sets the operator type to Query.Op.GreaterThan and sets the
value to the inputted value.
:param value <variant>
:return <Query>
:sa __gt__
:usage |>>> from orb import Query as Q
|>>> query = Q('test').greaterThan(1)
|>>> print query
|test greater_than 1
"""
newq = self.copy()
newq.setOp(Query.Op.GreaterThan)
newq.setValue(value)
return newq
|
Sets the operator type to Query.Op.GreaterThan and sets the
value to the inputted value.
:param value <variant>
:return <Query>
:sa __gt__
:usage |>>> from orb import Query as Q
|>>> query = Q('test').greaterThan(1)
|>>> print query
|test greater_than 1
|
def write_error(self, status_code, **kwargs):
'''
:param status_code:
:param kwargs:
:return:
'''
if "exc_info" in kwargs:
exc_info = kwargs["exc_info"]
error = exc_info[1]
errormessage = "%s: %s" % (status_code, error)
self.render("error.html", errormessage=errormessage)
else:
errormessage = "%s" % (status_code)
self.render("error.html", errormessage=errormessage)
|
:param status_code:
:param kwargs:
:return:
|
def macontrol(self, data: ['SASdata', str] = None,
ewmachart: str = None,
machart: str = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the MACONTROL procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=qcug&docsetTarget=qcug_macontrol_toc.htm&locale=en
:param data: SASdata object or string. This parameter is required.
:parm ewmachart: The ewmachart variable can only be a string type.
:parm machart: The machart variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
"""
|
Python method to call the MACONTROL procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=qcug&docsetTarget=qcug_macontrol_toc.htm&locale=en
:param data: SASdata object or string. This parameter is required.
:parm ewmachart: The ewmachart variable can only be a string type.
:parm machart: The machart variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
|
def get_edit_token(self):
"""
Can be called in order to retrieve the edit token from an instance of WDLogin
:return: returns the edit token
"""
if not self.edit_token or (time.time() - self.instantiation_time) > self.token_renew_period:
self.generate_edit_credentials()
self.instantiation_time = time.time()
return self.edit_token
|
Can be called in order to retrieve the edit token from an instance of WDLogin
:return: returns the edit token
|
async def disconnect(self):
"""
Shuts down and disconnects the websocket.
"""
self._is_shutdown = True
self.ready.clear()
self.update_state(NodeState.DISCONNECTING)
await self.player_manager.disconnect()
if self._ws is not None and self._ws.open:
await self._ws.close()
if self._listener_task is not None and not self.loop.is_closed():
self._listener_task.cancel()
self._state_handlers = []
_nodes.remove(self)
log.debug("Shutdown Lavalink WS.")
|
Shuts down and disconnects the websocket.
|
def lvscan(self):
"""
Probes the volume group for logical volumes and returns a list of
LogicalVolume instances::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg")
lvs = vg.lvscan()
*Raises:*
* HandleError
"""
self.open()
lv_list = []
lv_handles = lvm_vg_list_lvs(self.handle)
if not bool(lv_handles):
return lv_list
lvh = dm_list_first(lv_handles)
while lvh:
c = cast(lvh, POINTER(lvm_lv_list))
lv = LogicalVolume(self, lvh=c.contents.lv)
lv_list.append(lv)
if dm_list_end(lv_handles, lvh):
# end of linked list
break
lvh = dm_list_next(lv_handles, lvh)
self.close()
return lv_list
|
Probes the volume group for logical volumes and returns a list of
LogicalVolume instances::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg")
lvs = vg.lvscan()
*Raises:*
* HandleError
|
def save(self, filename=None, *, gzipped=None, byteorder=None):
"""Write the file at the specified location.
The `gzipped` keyword only argument indicates if the file should
be gzipped. The `byteorder` keyword only argument lets you
specify whether the file should be big-endian or little-endian.
If the method is called without any argument, it will default to
the instance attributes and use the file's `filename`,
`gzipped` and `byteorder` attributes. Calling the method without
a `filename` will raise a `ValueError` if the `filename` of the
file is `None`.
"""
if gzipped is None:
gzipped = self.gzipped
if filename is None:
filename = self.filename
if filename is None:
raise ValueError('No filename specified')
open_file = gzip.open if gzipped else open
with open_file(filename, 'wb') as buff:
self.write(buff, byteorder or self.byteorder)
|
Write the file at the specified location.
The `gzipped` keyword only argument indicates if the file should
be gzipped. The `byteorder` keyword only argument lets you
specify whether the file should be big-endian or little-endian.
If the method is called without any argument, it will default to
the instance attributes and use the file's `filename`,
`gzipped` and `byteorder` attributes. Calling the method without
a `filename` will raise a `ValueError` if the `filename` of the
file is `None`.
|
def server_exists(s_name, ip=None, s_state=None, **connection_args):
'''
Checks if a server exists
CLI Example:
.. code-block:: bash
salt '*' netscaler.server_exists 'serverName'
'''
server = _server_get(s_name, **connection_args)
if server is None:
return False
if ip is not None and ip != server.get_ipaddress():
return False
if s_state is not None and s_state.upper() != server.get_state():
return False
return True
|
Checks if a server exists
CLI Example:
.. code-block:: bash
salt '*' netscaler.server_exists 'serverName'
|
def _map_block_index_to_location(ir_blocks):
"""Associate each IR block with its corresponding location, by index."""
block_index_to_location = {}
# MarkLocation blocks occur after the blocks related to that location.
# The core approach here is to buffer blocks until their MarkLocation is encountered
# after which all buffered blocks can be associated with the encountered MarkLocation.location.
current_block_ixs = []
for num, ir_block in enumerate(ir_blocks):
if isinstance(ir_block, blocks.GlobalOperationsStart):
if len(current_block_ixs) > 0:
unassociated_blocks = [ir_blocks[ix] for ix in current_block_ixs]
raise AssertionError(
u'Unexpectedly encountered global operations before mapping blocks '
u'{} to their respective locations.'.format(unassociated_blocks))
break
current_block_ixs.append(num)
if isinstance(ir_block, blocks.MarkLocation):
for ix in current_block_ixs:
block_index_to_location[ix] = ir_block.location
current_block_ixs = []
return block_index_to_location
|
Associate each IR block with its corresponding location, by index.
|
def save_anim(self, fig, animate, init, bitrate=10000, fps=30):
"""Not functional -- TODO"""
from matplotlib import animation
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=360, interval=20)
FFMpegWriter = animation.writers['ffmpeg']
writer = FFMpegWriter(bitrate= bitrate, fps=fps)
# Save #
self.avi_path = self.base_dir + self.short_name + '.avi'
anim.save(self.avi_path, writer=writer, codec='x264')
|
Not functional -- TODO
|
def count(self):
"""Approximate number of results, according to the API"""
if self._total_count is None:
self._total_count = self._get_total_count()
return self._total_count
|
Approximate number of results, according to the API
|
def fetch(self, start=False, full_data=True):
""" Get the current job data and possibly flag it as started. """
if self.id is None:
return self
if full_data is True:
fields = None
elif isinstance(full_data, dict):
fields = full_data
else:
fields = {
"_id": 0,
"path": 1,
"params": 1,
"status": 1,
"retry_count": 1,
}
if start:
self.datestarted = datetime.datetime.utcnow()
self.set_data(self.collection.find_and_modify(
{
"_id": self.id,
"status": {"$nin": ["cancel", "abort", "maxretries"]}
},
{"$set": {
"status": "started",
"datestarted": self.datestarted,
"worker": self.worker.id
},
"$unset": {
"dateexpires": 1 # we don't want started jobs to expire unexpectedly
}},
projection=fields)
)
context.metric("jobs.status.started")
else:
self.set_data(self.collection.find_one({
"_id": self.id
}, projection=fields))
if self.data is None:
context.log.info(
"Job %s not found in MongoDB or status was cancelled!" %
self.id)
self.stored = True
return self
|
Get the current job data and possibly flag it as started.
|
def personal_sign(self, message, account, password=None):
"""https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_sign
:param message: Message for sign
:type message: str
:param account: Account address
:type account: str
:param password: Password of account (optional)
:type password: str
:return: signature
:rtype: str
"""
return (yield from self.rpc_call('personal_sign',
[message, account, password]))
|
https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_sign
:param message: Message for sign
:type message: str
:param account: Account address
:type account: str
:param password: Password of account (optional)
:type password: str
:return: signature
:rtype: str
|
def _cut_to_pieces(self, bunch_stack):
"""
:type bunch_stack: list of list of int
"""
stack_len = len(bunch_stack[0])
for i in xrange(0, stack_len, self.fragment_length):
yield np.array(map(lambda stack: stack[i: i + self.fragment_length], bunch_stack))
|
:type bunch_stack: list of list of int
|
async def setup_hostname() -> str:
"""
Intended to be run when the server starts. Sets the machine hostname.
The machine hostname is set from the systemd-generated machine-id, which
changes at every boot.
Once the hostname is set, we restart avahi.
This is a separate task from establishing and changing the opentrons
machine name, which is UTF-8 and stored in /etc/machine-info as the
PRETTY_HOSTNAME and used in the avahi service name.
:returns: the hostname
"""
machine_id = open('/etc/machine-id').read().strip()
hostname = machine_id[:6]
with open('/etc/hostname', 'w') as ehn:
ehn.write(f'{hostname}\n')
# First, we run hostnamed which will set the transient hostname
# and loaded static hostname from the value we just wrote to
# /etc/hostname
LOG.debug("Setting hostname")
proc = await asyncio.create_subprocess_exec(
'hostname', '-F', '/etc/hostname',
stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
ret = proc.returncode
if ret != 0:
LOG.error(
f'Error starting hostname: {ret} '
f'stdout: {stdout} stderr: {stderr}')
raise RuntimeError("Couldn't run hostname")
# Then, with the hostname set, we can restart avahi
LOG.debug("Restarting avahi")
proc = await asyncio.create_subprocess_exec(
'systemctl', 'restart', 'avahi-daemon',
stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
ret = proc.returncode
if ret != 0:
LOG.error(
f'Error restarting avahi-daemon: {ret} '
f'stdout: {stdout} stderr: {stderr}')
raise RuntimeError("Error restarting avahi")
LOG.debug("Updated hostname and restarted avahi OK")
return hostname
|
Intended to be run when the server starts. Sets the machine hostname.
The machine hostname is set from the systemd-generated machine-id, which
changes at every boot.
Once the hostname is set, we restart avahi.
This is a separate task from establishing and changing the opentrons
machine name, which is UTF-8 and stored in /etc/machine-info as the
PRETTY_HOSTNAME and used in the avahi service name.
:returns: the hostname
|
def sign(self, value):
"""Signs the given string and also attaches a time information."""
value = want_bytes(value)
timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
sep = want_bytes(self.sep)
value = value + sep + timestamp
return value + sep + self.get_signature(value)
|
Signs the given string and also attaches a time information.
|
def modification_time(self):
"""dfdatetime.DateTimeValues: modification time or None if not available."""
timestamp = getattr(
self._cpio_archive_file_entry, 'modification_time', None)
if timestamp is None:
return None
return dfdatetime_posix_time.PosixTime(timestamp=timestamp)
|
dfdatetime.DateTimeValues: modification time or None if not available.
|
def register_type(self, typename):
"""
Registers a type name so that it may be used to send and receive packages.
:param typename: Name of the packet type. A method with the same name and a
"on_" prefix should be added to handle incomming packets.
:raises ValueError: If there is a hash code collision.
"""
typekey = typehash(typename)
if typekey in self._type_register:
raise ValueError("Type name collision. Type %s has the same hash." % repr(self._type_register[typekey]))
self._type_register[typekey] = typename
|
Registers a type name so that it may be used to send and receive packages.
:param typename: Name of the packet type. A method with the same name and a
"on_" prefix should be added to handle incomming packets.
:raises ValueError: If there is a hash code collision.
|
def issue_instant_ok(self):
""" Check that the response was issued at a reasonable time """
upper = time_util.shift_time(time_util.time_in_a_while(days=1),
self.timeslack).timetuple()
lower = time_util.shift_time(time_util.time_a_while_ago(days=1),
-self.timeslack).timetuple()
# print("issue_instant: %s" % self.response.issue_instant)
# print("%s < x < %s" % (lower, upper))
issued_at = str_to_time(self.response.issue_instant)
return lower < issued_at < upper
|
Check that the response was issued at a reasonable time
|
def is_script(self, container):
"""Returns `True` if this styled text is super/subscript."""
try:
style = self._style(container)
return style.get_value('position',
container) != TextPosition.NORMAL
except StyleException:
return False
|
Returns `True` if this styled text is super/subscript.
|
def _parse_proxy(proxy):
"""Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:password@proxy.example.com')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:password@proxy.example.com:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:password@proxy.example.com/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:password@proxy.example.com:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:password@proxy.example.com')
('http', 'joe', 'password', 'proxy.example.com')
"""
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
|
Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:password@proxy.example.com')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:password@proxy.example.com:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:password@proxy.example.com/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:password@proxy.example.com:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:password@proxy.example.com')
('http', 'joe', 'password', 'proxy.example.com')
|
def wait_for_current_tasks(self):
"""Waits for all tasks in the task list to be completed, by waiting for their
AppFuture to be completed. This method will not necessarily wait for any tasks
added after cleanup has started (such as data stageout?)
"""
logger.info("Waiting for all remaining tasks to complete")
for task_id in self.tasks:
# .exception() is a less exception throwing way of
# waiting for completion than .result()
fut = self.tasks[task_id]['app_fu']
if not fut.done():
logger.debug("Waiting for task {} to complete".format(task_id))
fut.exception()
logger.info("All remaining tasks completed")
|
Waits for all tasks in the task list to be completed, by waiting for their
AppFuture to be completed. This method will not necessarily wait for any tasks
added after cleanup has started (such as data stageout?)
|
def symmetry_reduce(tensors, structure, tol=1e-8, **kwargs):
"""
Function that converts a list of tensors corresponding to a structure
and returns a dictionary consisting of unique tensor keys with symmop
values corresponding to transformations that will result in derivative
tensors from the original list
Args:
tensors (list of tensors): list of Tensor objects to test for
symmetrically-equivalent duplicates
structure (Structure): structure from which to get symmetry
tol (float): tolerance for tensor equivalence
kwargs: keyword arguments for the SpacegroupAnalyzer
returns:
dictionary consisting of unique tensors with symmetry operations
corresponding to those which will reconstruct the remaining
tensors as values
"""
sga = SpacegroupAnalyzer(structure, **kwargs)
symmops = sga.get_symmetry_operations(cartesian=True)
unique_mapping = TensorMapping([tensors[0]], [[]], tol=tol)
for tensor in tensors[1:]:
is_unique = True
for unique_tensor, symmop in itertools.product(unique_mapping, symmops):
if np.allclose(unique_tensor.transform(symmop), tensor, atol=tol):
unique_mapping[unique_tensor].append(symmop)
is_unique = False
break
if is_unique:
unique_mapping[tensor] = []
return unique_mapping
|
Function that converts a list of tensors corresponding to a structure
and returns a dictionary consisting of unique tensor keys with symmop
values corresponding to transformations that will result in derivative
tensors from the original list
Args:
tensors (list of tensors): list of Tensor objects to test for
symmetrically-equivalent duplicates
structure (Structure): structure from which to get symmetry
tol (float): tolerance for tensor equivalence
kwargs: keyword arguments for the SpacegroupAnalyzer
returns:
dictionary consisting of unique tensors with symmetry operations
corresponding to those which will reconstruct the remaining
tensors as values
|
def point_in_segment(ac, b):
'''
point_in_segment((a,b), c) yields True if point x is in segment (a,b) and False otherwise. Note
that this differs from point_on_segment in that a point that if c is equal to a or b it is
considered 'on' but not 'in' the segment.
'''
(a,c) = ac
abc = [np.asarray(u) for u in (a,b,c)]
if any(len(u.shape) > 1 for u in abc): (a,b,c) = [np.reshape(u,(len(u),-1)) for u in abc]
else: (a,b,c) = abc
vab = b - a
vbc = c - b
vac = c - a
dab = np.sqrt(np.sum(vab**2, axis=0))
dbc = np.sqrt(np.sum(vbc**2, axis=0))
dac = np.sqrt(np.sum(vac**2, axis=0))
return np.isclose(dab + dbc, dac) & ~np.isclose(dac,dab) & ~np.isclose(dac,dbc)
|
point_in_segment((a,b), c) yields True if point x is in segment (a,b) and False otherwise. Note
that this differs from point_on_segment in that a point that if c is equal to a or b it is
considered 'on' but not 'in' the segment.
|
def count_collisions(Collisions):
"""
Counts the number of unique collisions and gets the collision index.
Parameters
----------
Collisions : array_like
Array of booleans, containing true if during a collision event, false otherwise.
Returns
-------
CollisionCount : int
Number of unique collisions
CollisionIndicies : list
Indicies of collision occurance
"""
CollisionCount = 0
CollisionIndicies = []
lastval = True
for i, val in enumerate(Collisions):
if val == True and lastval == False:
CollisionIndicies.append(i)
CollisionCount += 1
lastval = val
return CollisionCount, CollisionIndicies
|
Counts the number of unique collisions and gets the collision index.
Parameters
----------
Collisions : array_like
Array of booleans, containing true if during a collision event, false otherwise.
Returns
-------
CollisionCount : int
Number of unique collisions
CollisionIndicies : list
Indicies of collision occurance
|
def exists(name=None, region=None, key=None, keyid=None, profile=None,
vpc_id=None, vpc_name=None, group_id=None):
'''
Check to see if a security group exists.
CLI example::
salt myminion boto_secgroup.exists mysecgroup
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
group = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
group_id=group_id, region=region, key=key, keyid=keyid,
profile=profile)
if group:
return True
else:
return False
|
Check to see if a security group exists.
CLI example::
salt myminion boto_secgroup.exists mysecgroup
|
def print_results(self):
"""Print results of the package command."""
# Updates
if self.package_data.get('updates'):
print('\n{}{}Updates:'.format(c.Style.BRIGHT, c.Fore.BLUE))
for p in self.package_data['updates']:
print(
'{!s:<20}{}{} {!s:<50}'.format(
p.get('action'), c.Style.BRIGHT, c.Fore.CYAN, p.get('output')
)
)
# Packaging
print('\n{}{}Package:'.format(c.Style.BRIGHT, c.Fore.BLUE))
for p in self.package_data['package']:
if isinstance(p.get('output'), list):
n = 5
list_data = p.get('output')
print(
'{!s:<20}{}{} {!s:<50}'.format(
p.get('action'), c.Style.BRIGHT, c.Fore.CYAN, ', '.join(p.get('output')[:n])
)
)
del list_data[:n]
for data in [
list_data[i : i + n] for i in range(0, len(list_data), n) # noqa: E203
]:
print(
'{!s:<20}{}{} {!s:<50}'.format(
'', c.Style.BRIGHT, c.Fore.CYAN, ', '.join(data)
)
)
else:
print(
'{!s:<20}{}{} {!s:<50}'.format(
p.get('action'), c.Style.BRIGHT, c.Fore.CYAN, p.get('output')
)
)
# Bundle
if self.package_data.get('bundle'):
print('\n{}{}Bundle:'.format(c.Style.BRIGHT, c.Fore.BLUE))
for p in self.package_data['bundle']:
print(
'{!s:<20}{}{} {!s:<50}'.format(
p.get('action'), c.Style.BRIGHT, c.Fore.CYAN, p.get('output')
)
)
# ignore exit code
if not self.args.ignore_validation:
print('\n') # separate errors from normal output
# print all errors
for error in self.package_data.get('errors'):
print('{}{}'.format(c.Fore.RED, error))
self.exit_code = 1
|
Print results of the package command.
|
def NDP_Attack_DAD_DoS_via_NA(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None):
"""
Perform the DAD DoS attack using NS described in section 4.1.3 of RFC
3756. This is done by listening incoming NS messages *sent from the
unspecified address* and sending a NA reply for the target address,
leading the peer to believe that another node is also performing DAD
for that address.
By default, the fake NA sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the target address found in received NS.
- as IPv6 destination address: the link-local solicited-node multicast
address derived from the target address in received NS.
- the mac address of the interface as source (or reply_mac, see below).
- the multicast mac address derived from the solicited node multicast
address used as IPv6 destination address.
- A Target Link-Layer address option (ICMPv6NDOptDstLLAddr) filled
with the mac address used as source of the NA.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface. This
address will also be used in the Target Link-Layer Address option.
"""
def na_reply_callback(req, reply_mac, iface):
"""
Callback that reply to a NS with a NA
"""
# Let's build a reply and send it
mac = req[Ether].src
dst = req[IPv6].dst
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac) / IPv6(src=tgt, dst=dst)
rep /= ICMPv6ND_NA(tgt=tgt, S=0, R=0, O=1) # noqa: E741
rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac)
sendp(rep, iface=iface, verbose=0)
print("Reply NA for target address %s (received from %s)" % (tgt, mac))
_NDP_Attack_DAD_DoS(na_reply_callback, iface, mac_src_filter,
tgt_filter, reply_mac)
|
Perform the DAD DoS attack using NS described in section 4.1.3 of RFC
3756. This is done by listening incoming NS messages *sent from the
unspecified address* and sending a NA reply for the target address,
leading the peer to believe that another node is also performing DAD
for that address.
By default, the fake NA sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the target address found in received NS.
- as IPv6 destination address: the link-local solicited-node multicast
address derived from the target address in received NS.
- the mac address of the interface as source (or reply_mac, see below).
- the multicast mac address derived from the solicited node multicast
address used as IPv6 destination address.
- A Target Link-Layer address option (ICMPv6NDOptDstLLAddr) filled
with the mac address used as source of the NA.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface. This
address will also be used in the Target Link-Layer Address option.
|
def cover(self, match_set):
"""Return a new classifier rule that can be added to the match set,
with a condition that matches the situation of the match set and an
action selected to avoid duplication of the actions already
contained therein. The match_set argument is a MatchSet instance
representing the match set to which the returned rule may be added.
Usage:
match_set = model.match(situation)
if model.algorithm.covering_is_required(match_set):
new_rule = model.algorithm.cover(match_set)
assert new_rule.condition(situation)
model.add(new_rule)
match_set = model.match(situation)
Arguments:
match_set: A MatchSet instance.
Return:
A new ClassifierRule instance, appropriate for the addition to
match_set and to the classifier set from which match_set was
drawn.
"""
assert isinstance(match_set, MatchSet)
assert match_set.model.algorithm is self
# Create a new condition that matches the situation.
condition = bitstrings.BitCondition.cover(
match_set.situation,
self.wildcard_probability
)
# Pick a random action that (preferably) isn't already suggested by
# some other rule for this situation.
action_candidates = (
frozenset(match_set.model.possible_actions) -
frozenset(match_set)
)
if not action_candidates:
action_candidates = match_set.model.possible_actions
action = random.choice(list(action_candidates))
# Create the new rule.
return XCSClassifierRule(
condition,
action,
self,
match_set.time_stamp
)
|
Return a new classifier rule that can be added to the match set,
with a condition that matches the situation of the match set and an
action selected to avoid duplication of the actions already
contained therein. The match_set argument is a MatchSet instance
representing the match set to which the returned rule may be added.
Usage:
match_set = model.match(situation)
if model.algorithm.covering_is_required(match_set):
new_rule = model.algorithm.cover(match_set)
assert new_rule.condition(situation)
model.add(new_rule)
match_set = model.match(situation)
Arguments:
match_set: A MatchSet instance.
Return:
A new ClassifierRule instance, appropriate for the addition to
match_set and to the classifier set from which match_set was
drawn.
|
def matching_tokens(self, text, start=0):
"""Retrieve all token definitions matching the beginning of a text.
Args:
text (str): the text to test
start (int): the position where matches should be searched in the
string (see re.match(rx, txt, pos))
Yields:
(token_class, re.Match): all token class whose regexp matches the
text, and the related re.Match object.
"""
for token_class, regexp in self._tokens:
match = regexp.match(text, pos=start)
if match:
yield token_class, match
|
Retrieve all token definitions matching the beginning of a text.
Args:
text (str): the text to test
start (int): the position where matches should be searched in the
string (see re.match(rx, txt, pos))
Yields:
(token_class, re.Match): all token class whose regexp matches the
text, and the related re.Match object.
|
def install(args: List[str]) -> None:
"""`pip install` as a function.
Accepts a list of pip arguments.
.. code-block:: py
>>> install(['numpy', '--target', 'site-packages'])
Collecting numpy
Downloading numpy-1.13.3-cp35-cp35m-manylinux1_x86_64.whl (16.9MB)
100% || 16.9MB 53kB/s
Installing collected packages: numpy
Successfully installed numpy-1.13.3
"""
with clean_pip_env():
# if being invoked as a pyz, we must ensure we have access to our own
# site-packages when subprocessing since there is no guarantee that pip
# will be available
subprocess_env = os.environ.copy()
sitedir_index = _first_sitedir_index()
_extend_python_path(subprocess_env, sys.path[sitedir_index:])
process = subprocess.Popen(
[sys.executable, "-m", "pip", "--disable-pip-version-check", "install"] + args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=subprocess_env,
)
for output in process.stdout:
if output:
click.echo(output.decode().rstrip())
if process.wait() > 0:
sys.exit(PIP_INSTALL_ERROR)
|
`pip install` as a function.
Accepts a list of pip arguments.
.. code-block:: py
>>> install(['numpy', '--target', 'site-packages'])
Collecting numpy
Downloading numpy-1.13.3-cp35-cp35m-manylinux1_x86_64.whl (16.9MB)
100% || 16.9MB 53kB/s
Installing collected packages: numpy
Successfully installed numpy-1.13.3
|
def build_index_from_design(df, design, remove_prefix=None, types=None, axis=1, auto_convert_numeric=True, unmatched_columns='index'):
"""
Build a MultiIndex from a design table.
Supply with a table with column headings for the new multiindex
and a index containing the labels to search for in the data.
:param df:
:param design:
:param remove:
:param types:
:param axis:
:param auto_convert_numeric:
:return:
"""
df = df.copy()
if 'Label' not in design.index.names:
design = design.set_index('Label')
if remove_prefix is None:
remove_prefix = []
if type(remove_prefix) is str:
remove_prefix=[remove_prefix]
unmatched_for_index = []
names = design.columns.values
idx_levels = len(names)
indexes = []
# Convert numeric only columns_to_combine; except index
if auto_convert_numeric:
design = design.apply(pd.to_numeric, errors="ignore")
# The match columns are always strings, so the index must also be
design.index = design.index.astype(str)
# Apply type settings
if types:
for n, t in types.items():
if n in design.columns.values:
design[n] = design[n].astype(t)
# Build the index
for lo in df.columns.values:
l = copy(lo)
for s in remove_prefix:
l = l.replace(s, '')
# Remove trailing/forward spaces
l = l.strip()
# Convert to numeric if possible
l = numeric(l)
# Attempt to match to the labels
try:
# Index
idx = design.loc[str(l)]
except:
if unmatched_columns:
unmatched_for_index.append(lo)
else:
# No match, fill with None
idx = tuple([None] * idx_levels)
indexes.append(idx)
else:
# We have a matched row, store it
idx = tuple(idx.values)
indexes.append(idx)
if axis == 0:
df.index = pd.MultiIndex.from_tuples(indexes, names=names)
else:
# If using unmatched for index, append
if unmatched_columns == 'index':
df = df.set_index(unmatched_for_index, append=True)
elif unmatched_columns == 'drop':
df = df.drop(unmatched_for_index, axis=1)
df.columns = pd.MultiIndex.from_tuples(indexes, names=names)
df = df.sort_index(axis=1)
return df
|
Build a MultiIndex from a design table.
Supply with a table with column headings for the new multiindex
and a index containing the labels to search for in the data.
:param df:
:param design:
:param remove:
:param types:
:param axis:
:param auto_convert_numeric:
:return:
|
def ics2task():
"""Command line tool to convert from iCalendar to Taskwarrior"""
from argparse import ArgumentParser, FileType
from sys import stdin
parser = ArgumentParser(description='Converter from iCalendar to Taskwarrior syntax.')
parser.add_argument('infile', nargs='?', type=FileType('r'), default=stdin,
help='Input iCalendar file (default: stdin)')
parser.add_argument('outdir', nargs='?', help='Output Taskwarrior directory (default to ~/.task)', default=expanduser('~/.task'))
args = parser.parse_args()
vobject = readOne(args.infile.read())
task = IcsTask(args.outdir)
for todo in vobject.vtodo_list:
task.to_task(todo)
|
Command line tool to convert from iCalendar to Taskwarrior
|
def _addToSegmentUpdates(self, c, i, segUpdate):
"""
Store a dated potential segment update. The "date" (iteration index) is used
later to determine whether the update is too old and should be forgotten.
This is controlled by parameter ``segUpdateValidDuration``.
:param c: TODO: document
:param i: TODO: document
:param segUpdate: TODO: document
"""
# Sometimes we might be passed an empty update
if segUpdate is None or len(segUpdate.activeSynapses) == 0:
return
key = (c, i) # key = (column index, cell index in column)
# TODO: scan list of updates for that cell and consolidate?
# But watch out for dates!
if self.segmentUpdates.has_key(key):
self.segmentUpdates[key] += [(self.lrnIterationIdx, segUpdate)]
else:
self.segmentUpdates[key] = [(self.lrnIterationIdx, segUpdate)]
|
Store a dated potential segment update. The "date" (iteration index) is used
later to determine whether the update is too old and should be forgotten.
This is controlled by parameter ``segUpdateValidDuration``.
:param c: TODO: document
:param i: TODO: document
:param segUpdate: TODO: document
|
def obj_box_zoom(
im, classes=None, coords=None, zoom_range=(0.9,
1.1), row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1, is_rescale=False, is_center=False, is_random=False, thresh_wh=0.02, thresh_wh2=12.
):
"""Zoom in and out of a single image, randomly or non-randomly, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...].
zoom_range row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.zoom``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid. (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes.
"""
if classes is None:
classes = []
if coords is None:
coords = []
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range)
if is_random:
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
tl.logging.info(" random_zoom : not zoom in/out")
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
else:
zx, zy = zoom_range
# tl.logging.info(zx, zy)
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = im.shape[row_index], im.shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
im_new = affine_transform(im, transform_matrix, channel_index, fill_mode, cval, order)
# modified from obj_box_crop
def _get_coord(coord):
"""Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates,
before getting the new coordinates.
Boxes outsides the cropped image will be removed.
"""
if is_center:
coord = obj_box_coord_centroid_to_upleft(coord)
# ======= pixel unit format and upleft, w, h ==========
x = (coord[0] - im.shape[1] / 2) / zy + im.shape[1] / 2 # only change this
y = (coord[1] - im.shape[0] / 2) / zx + im.shape[0] / 2 # only change this
w = coord[2] / zy # only change this
h = coord[3] / zx # only change thisS
if x < 0:
if x + w <= 0:
return None
w = w + x
x = 0
elif x > im_new.shape[1]: # object outside the cropped image
return None
if y < 0:
if y + h <= 0:
return None
h = h + y
y = 0
elif y > im_new.shape[0]: # object outside the cropped image
return None
if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image
w = im_new.shape[1] - x
if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image
h = im_new.shape[0] - y
if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow
# tl.logging.info('xx', w, h)
return None
if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) <
thresh_wh): # object shape strange: too narrow
# tl.logging.info('yy', w, im_new.shape[1], h, im_new.shape[0])
return None
coord = [x, y, w, h]
# convert back if input format is center.
if is_center:
coord = obj_box_coord_upleft_to_centroid(coord)
return coord
coords_new = list()
classes_new = list()
for i, _ in enumerate(coords):
coord = coords[i]
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
# for scaled coord, upscaled before process and scale back in the end.
coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)
coord = _get_coord(coord)
if coord is not None:
coord = obj_box_coord_rescale(coord, im_new.shape)
coords_new.append(coord)
classes_new.append(classes[i])
else:
coord = _get_coord(coord)
if coord is not None:
coords_new.append(coord)
classes_new.append(classes[i])
return im_new, classes_new, coords_new
|
Zoom in and out of a single image, randomly or non-randomly, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...].
zoom_range row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.zoom``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid. (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes.
|
def concentric_circles_path(size):
"""
Yields a set of paths that are concentric circles, moving outwards, about the center of the image.
:param size: The (width, height) of the image
:return: Yields individual circles, where each circle is a generator that yields pixel coordinates.
"""
width, height = size
x0, y0 = width // 2, height // 2
max_radius = int(sqrt(2) * max(height, width))
yield from fill_concentric_circles(radius=max_radius, center=(x0, y0), size=size)
|
Yields a set of paths that are concentric circles, moving outwards, about the center of the image.
:param size: The (width, height) of the image
:return: Yields individual circles, where each circle is a generator that yields pixel coordinates.
|
def getOverlayInputMethod(self, ulOverlayHandle):
"""Returns the current input settings for the specified overlay."""
fn = self.function_table.getOverlayInputMethod
peInputMethod = VROverlayInputMethod()
result = fn(ulOverlayHandle, byref(peInputMethod))
return result, peInputMethod
|
Returns the current input settings for the specified overlay.
|
def str(self, var, default=NOTSET, multiline=False):
"""
:rtype: str
"""
value = self.get_value(var, default=default)
if multiline:
return value.replace('\\n', '\n')
return value
|
:rtype: str
|
def decompose_by_component(model, observed_time_series, parameter_samples):
"""Decompose an observed time series into contributions from each component.
This method decomposes a time series according to the posterior represention
of a structural time series model. In particular, it:
- Computes the posterior marginal mean and covariances over the additive
model's latent space.
- Decomposes the latent posterior into the marginal blocks for each
model component.
- Maps the per-component latent posteriors back through each component's
observation model, to generate the time series modeled by that component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
observed_time_series: `float` `Tensor` of shape
`batch_shape + [num_timesteps, 1]` (omitting the trailing unit dimension
is also supported when `num_timesteps > 1`), specifying an observed time
series. May optionally be an instance of `tfp.sts.MaskedTimeSeries`, which
includes a mask `Tensor` to specify timesteps with missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
#### Examples
Suppose we've built a model and fit it to data:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
To extract the contributions of individual components, pass the time series
and sampled parameters into `decompose_by_component`:
```python
component_dists = decompose_by_component(
model,
observed_time_series=observed_time_series,
parameter_samples=samples)
# Component mean and stddev have shape `[len(observed_time_series)]`.
day_of_week_effect_mean = component_dists[day_of_week].mean()
day_of_week_effect_stddev = component_dists[day_of_week].stddev()
```
Using the component distributions, we can visualize the uncertainty for
each component:
```
from matplotlib import pylab as plt
num_components = len(component_dists)
xs = np.arange(len(observed_time_series))
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_dists.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
```
"""
with tf.compat.v1.name_scope('decompose_by_component',
values=[observed_time_series]):
[
observed_time_series,
is_missing
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run smoothing over the training timesteps to extract the
# posterior on latents.
num_timesteps = dist_util.prefer_static_value(
tf.shape(input=observed_time_series))[-2]
ssm = model.make_state_space_model(num_timesteps=num_timesteps,
param_vals=parameter_samples)
posterior_means, posterior_covs = ssm.posterior_marginals(
observed_time_series, mask=is_missing)
return _decompose_from_posterior_marginals(
model, posterior_means, posterior_covs, parameter_samples)
|
Decompose an observed time series into contributions from each component.
This method decomposes a time series according to the posterior represention
of a structural time series model. In particular, it:
- Computes the posterior marginal mean and covariances over the additive
model's latent space.
- Decomposes the latent posterior into the marginal blocks for each
model component.
- Maps the per-component latent posteriors back through each component's
observation model, to generate the time series modeled by that component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
observed_time_series: `float` `Tensor` of shape
`batch_shape + [num_timesteps, 1]` (omitting the trailing unit dimension
is also supported when `num_timesteps > 1`), specifying an observed time
series. May optionally be an instance of `tfp.sts.MaskedTimeSeries`, which
includes a mask `Tensor` to specify timesteps with missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
#### Examples
Suppose we've built a model and fit it to data:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
To extract the contributions of individual components, pass the time series
and sampled parameters into `decompose_by_component`:
```python
component_dists = decompose_by_component(
model,
observed_time_series=observed_time_series,
parameter_samples=samples)
# Component mean and stddev have shape `[len(observed_time_series)]`.
day_of_week_effect_mean = component_dists[day_of_week].mean()
day_of_week_effect_stddev = component_dists[day_of_week].stddev()
```
Using the component distributions, we can visualize the uncertainty for
each component:
```
from matplotlib import pylab as plt
num_components = len(component_dists)
xs = np.arange(len(observed_time_series))
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_dists.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
```
|
def img2img_transformer2d_n31():
"""Set of hyperparameters."""
hparams = img2img_transformer2d_base()
hparams.batch_size = 1
hparams.num_encoder_layers = 6
hparams.num_decoder_layers = 12
hparams.num_heads = 8
hparams.query_shape = (16, 32)
hparams.memory_flange = (16, 32)
return hparams
|
Set of hyperparameters.
|
def parse_media_range(range):
"""Carves up a media range and returns a tuple of the
(type, subtype, params) where 'params' is a dictionary
of all the parameters for the media range.
For example, the media range 'application/*;q=0.5' would
get parsed into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there
is a value for 'q' in the params dictionary, filling it
in with a proper default if necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if 'q' not in params or not params['q'] or \
float(params['q']) > 1 or float(params['q']) < 0:
params['q'] = '1'
return (type, subtype, params)
|
Carves up a media range and returns a tuple of the
(type, subtype, params) where 'params' is a dictionary
of all the parameters for the media range.
For example, the media range 'application/*;q=0.5' would
get parsed into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there
is a value for 'q' in the params dictionary, filling it
in with a proper default if necessary.
|
def add_node(self, info):
""" Handles adding a Node to the graph.
"""
if not info.initialized:
return
graph = self._request_graph(info.ui.control)
if graph is None:
return
IDs = [v.ID for v in graph.nodes]
node = Node(ID=make_unique_name("node", IDs))
graph.nodes.append(node)
retval = node.edit_traits(parent=info.ui.control, kind="livemodal")
if not retval.result:
graph.nodes.remove(node)
|
Handles adding a Node to the graph.
|
def check(self, completed, failed=None):
"""check whether our dependencies have been met."""
if len(self) == 0:
return True
against = set()
if self.success:
against = completed
if failed is not None and self.failure:
against = against.union(failed)
if self.all:
return self.issubset(against)
else:
return not self.isdisjoint(against)
|
check whether our dependencies have been met.
|
def init():
"""
Initializes the GLFW library.
Wrapper for:
int glfwInit(void);
"""
cwd = _getcwd()
res = _glfw.glfwInit()
os.chdir(cwd)
return res
|
Initializes the GLFW library.
Wrapper for:
int glfwInit(void);
|
def _get_struct_dropshadowfilter(self):
"""Get the values for the DROPSHADOWFILTER record."""
obj = _make_object("DropShadowFilter")
obj.DropShadowColor = self._get_struct_rgba()
obj.BlurX = unpack_fixed16(self._src)
obj.BlurY = unpack_fixed16(self._src)
obj.Angle = unpack_fixed16(self._src)
obj.Distance = unpack_fixed16(self._src)
obj.Strength = unpack_fixed8(self._src)
bc = BitConsumer(self._src)
obj.InnerShadow = bc.u_get(1)
obj.Knockout = bc.u_get(1)
obj.CompositeSource = bc.u_get(1)
obj.Passes = bc.u_get(5)
return obj
|
Get the values for the DROPSHADOWFILTER record.
|
def setattr(self, name, val):
"""
Change the attribute value of the UI element. Not all attributes can be casted to text. If changing the
immutable attributes or attributes which do not exist, the InvalidOperationException exception is raised.
Args:
name: attribute name
val: new attribute value to cast
Raises:
InvalidOperationException: when it fails to set the attribute on UI element
"""
nodes = self._do_query(multiple=False)
try:
return self.poco.agent.hierarchy.setAttr(nodes, name, val)
except UnableToSetAttributeException as e:
raise InvalidOperationException('"{}" of "{}"'.format(str(e), self))
|
Change the attribute value of the UI element. Not all attributes can be casted to text. If changing the
immutable attributes or attributes which do not exist, the InvalidOperationException exception is raised.
Args:
name: attribute name
val: new attribute value to cast
Raises:
InvalidOperationException: when it fails to set the attribute on UI element
|
def _set_lldp(self, v, load=False):
"""
Setter method for lldp, mapped from YANG variable /protocol/lldp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lldp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lldp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=lldp.lldp, is_container='container', presence=True, yang_name="lldp", rest_name="lldp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Link Layer Discovery Protocol(LLDP)', u'callpoint': u'lldp_global_conf', u'sort-priority': u'49', u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'cli-mode-name': u'conf-lldp'}}, namespace='urn:brocade.com:mgmt:brocade-lldp', defining_module='brocade-lldp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lldp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=lldp.lldp, is_container='container', presence=True, yang_name="lldp", rest_name="lldp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Link Layer Discovery Protocol(LLDP)', u'callpoint': u'lldp_global_conf', u'sort-priority': u'49', u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'cli-mode-name': u'conf-lldp'}}, namespace='urn:brocade.com:mgmt:brocade-lldp', defining_module='brocade-lldp', yang_type='container', is_config=True)""",
})
self.__lldp = t
if hasattr(self, '_set'):
self._set()
|
Setter method for lldp, mapped from YANG variable /protocol/lldp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lldp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lldp() directly.
|
def list_overlay_names(self):
"""Return list of overlay names."""
overlay_names = []
for fname in _ls(self._overlays_abspath):
name, ext = os.path.splitext(fname)
overlay_names.append(name)
return overlay_names
|
Return list of overlay names.
|
def read_client_secrets():
'''for private or protected registries, a client secrets file is required
to be located at .sregistry. If no secrets are found, we use default
of Singularity Hub, and return a dummy secrets.
'''
client_secrets = _default_client_secrets()
# If token file not provided, check environment
secrets = get_secrets_file()
# If exists, load
if secrets is not None:
client_secrets = read_json(secrets)
# Otherwise, initialize
else:
from sregistry.defaults import SREGISTRY_CLIENT_SECRETS
write_json(client_secrets, SREGISTRY_CLIENT_SECRETS)
return client_secrets
|
for private or protected registries, a client secrets file is required
to be located at .sregistry. If no secrets are found, we use default
of Singularity Hub, and return a dummy secrets.
|
def infos(self, type=None, failed=False):
"""Get all infos created by the participants nodes.
Return a list of infos produced by nodes associated with the
participant. If specified, ``type`` filters by class. By default, failed
infos are excluded, to include only failed nodes use ``failed=True``,
for all nodes use ``failed=all``. Note that failed filters the infos,
not the nodes - infos from all nodes (whether failed or not) can be
returned.
"""
nodes = self.nodes(failed="all")
infos = []
for n in nodes:
infos.extend(n.infos(type=type, failed=failed))
return infos
|
Get all infos created by the participants nodes.
Return a list of infos produced by nodes associated with the
participant. If specified, ``type`` filters by class. By default, failed
infos are excluded, to include only failed nodes use ``failed=True``,
for all nodes use ``failed=all``. Note that failed filters the infos,
not the nodes - infos from all nodes (whether failed or not) can be
returned.
|
def build_message(self, checker):
"""Builds the checker's error message to report"""
solution = ' (%s)' % checker.solution if self.with_solutions else ''
return '{} {}{}'.format(checker.code,
checker.msg,
solution)
|
Builds the checker's error message to report
|
def filter_composite_from_subgroups(s):
"""
Given a sorted list of subgroups, return a string appropriate to provide as
the a composite track's `filterComposite` argument
>>> import trackhub
>>> trackhub.helpers.filter_composite_from_subgroups(['cell', 'ab', 'lab', 'knockdown'])
'dimA dimB'
Parameters
----------
s : list
A list representing the ordered subgroups, ideally the same list
provided to `dimensions_from_subgroups`. The values are not actually
used, just the number of items.
"""
dims = []
for letter, sg in zip('ABCDEFGHIJKLMNOPQRSTUVWZ', s[2:]):
dims.append('dim{0}'.format(letter))
if dims:
return ' '.join(dims)
|
Given a sorted list of subgroups, return a string appropriate to provide as
the a composite track's `filterComposite` argument
>>> import trackhub
>>> trackhub.helpers.filter_composite_from_subgroups(['cell', 'ab', 'lab', 'knockdown'])
'dimA dimB'
Parameters
----------
s : list
A list representing the ordered subgroups, ideally the same list
provided to `dimensions_from_subgroups`. The values are not actually
used, just the number of items.
|
def status_counter(self):
"""
Returns a `Counter` object that counts the number of task with
given status (use the string representation of the status as key).
"""
counter = collections.Counter()
for task in self:
counter[str(task.status)] += 1
return counter
|
Returns a `Counter` object that counts the number of task with
given status (use the string representation of the status as key).
|
def get_reference_line_numeration_marker_patterns(prefix=u''):
"""Return a list of compiled regex patterns used to search for the marker.
Marker of a reference line in a full-text document.
:param prefix: (string) the possible prefix to a reference line
:return: (list) of compiled regex patterns.
"""
title = u""
if type(prefix) in (str, unicode):
title = prefix
g_name = u'(?P<mark>'
g_close = u')'
space = r'\s*'
patterns = [
# [1]
space + title + g_name + r'\[\s*(?P<marknum>\d+)\s*\]' + g_close,
# [<letters and numbers]
space + title + g_name + r'\[\s*[a-zA-Z:-]+\+?\s?(\d{1,4}[A-Za-z:-]?)?\s*\]' + g_close, # noqa
# {1}
space + title + g_name + r'\{\s*(?P<marknum>\d+)\s*\}' + g_close,
# (1)
space + title + g_name + r'\<\s*(?P<marknum>\d+)\s*\>' + g_close,
space + title + g_name + r'\(\s*(?P<marknum>\d+)\s*\)' + g_close,
space + title + g_name + r'(?P<marknum>\d+)\s*\.(?!\d)' + g_close,
space + title + g_name + r'(?P<marknum>\d+)\s+' + g_close,
space + title + g_name + r'(?P<marknum>\d+)\s*\]' + g_close,
# 1]
space + title + g_name + r'(?P<marknum>\d+)\s*\}' + g_close,
# 1}
space + title + g_name + r'(?P<marknum>\d+)\s*\)' + g_close,
# 1)
space + title + g_name + r'(?P<marknum>\d+)\s*\>' + g_close,
# [1.1]
space + title + g_name + r'\[\s*\d+\.\d+\s*\]' + g_close,
# [ ]
space + title + g_name + r'\[\s*\]' + g_close,
# *
space + title + g_name + r'\*' + g_close,
]
return [re.compile(p, re.I | re.UNICODE) for p in patterns]
|
Return a list of compiled regex patterns used to search for the marker.
Marker of a reference line in a full-text document.
:param prefix: (string) the possible prefix to a reference line
:return: (list) of compiled regex patterns.
|
def gather_dilated_memory_blocks(x,
num_memory_blocks,
gap_size,
query_block_size,
memory_block_size,
gather_indices,
direction="left"):
"""Gathers blocks with gaps in between.
Args:
x: Tensor of shape [length, batch, heads, depth]
num_memory_blocks: how many memory blocks to look in "direction". Each will
be separated by gap_size.
gap_size: an integer indicating the gap size
query_block_size: an integer indicating size of query block
memory_block_size: an integer indicating the size of a memory block.
gather_indices: The indices to gather from.
direction: left or right
Returns:
Tensor of shape [batch, heads, blocks, block_length, depth]
"""
gathered_blocks = []
# gathering memory blocks
for block_id in range(num_memory_blocks):
block_end_index = -(query_block_size + gap_size *
(block_id + 1) + memory_block_size * block_id)
block_start_index = (
(memory_block_size + gap_size) * (num_memory_blocks - (block_id + 1)))
if direction != "left":
[block_end_index,
block_start_index] = [-block_start_index, -block_end_index]
if block_end_index == 0:
x_block = x[block_start_index:]
else:
x_block = x[block_start_index:block_end_index]
def gather_dilated_1d_blocks(x, gather_indices):
x_new = tf.gather(x, gather_indices)
# [batch, heads, blocks, block_length, dim]
return tf.transpose(x_new, [2, 3, 0, 1, 4])
gathered_blocks.append(gather_dilated_1d_blocks(x_block, gather_indices))
return tf.concat(gathered_blocks, 3)
|
Gathers blocks with gaps in between.
Args:
x: Tensor of shape [length, batch, heads, depth]
num_memory_blocks: how many memory blocks to look in "direction". Each will
be separated by gap_size.
gap_size: an integer indicating the gap size
query_block_size: an integer indicating size of query block
memory_block_size: an integer indicating the size of a memory block.
gather_indices: The indices to gather from.
direction: left or right
Returns:
Tensor of shape [batch, heads, blocks, block_length, depth]
|
def Nu_cylinder_Zukauskas(Re, Pr, Prw=None):
r'''Calculates Nusselt number for crossflow across a single tube at a
specified Re. Method from [1]_, also shown without modification in [2]_.
.. math::
Nu_{D}=CRe^{m}Pr^{n}\left(\frac{Pr}{Pr_s}\right)^{1/4}
Parameters
----------
Re : float
Reynolds number with respect to cylinder diameter, [-]
Pr : float
Prandtl number at free stream temperature [-]
Prw : float, optional
Prandtl number at wall temperature, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Notes
-----
If Prandtl number at wall are not provided, the Prandtl number correction
is not used and left to an outside function.
n is 0.37 if Pr <= 10; otherwise n is 0.36.
C and m are from the following table. If Re is outside of the ranges shown,
the nearest range is used blindly.
+---------+-------+-----+
| Re | C | m |
+=========+=======+=====+
| 1-40 | 0.75 | 0.4 |
+---------+-------+-----+
| 40-1E3 | 0.51 | 0.5 |
+---------+-------+-----+
| 1E3-2E5 | 0.26 | 0.6 |
+---------+-------+-----+
| 2E5-1E6 | 0.076 | 0.7 |
+---------+-------+-----+
Examples
--------
Example 7.3 in [2]_, matches.
>>> Nu_cylinder_Zukauskas(7992, 0.707, 0.69)
50.523612661934386
References
----------
.. [1] Zukauskas, A. Heat transfer from tubes in crossflow. In T.F. Irvine,
Jr. and J. P. Hartnett, editors, Advances in Heat Transfer, volume 8,
pages 93-160. Academic Press, Inc., New York, 1972.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
'''
if Re <= 40:
c, m = 0.75, 0.4
elif Re < 1E3:
c, m = 0.51, 0.5
elif Re < 2E5:
c, m = 0.26, 0.6
else:
c, m = 0.076, 0.7
if Pr <= 10:
n = 0.37
else:
n = 0.36
Nu = c*Re**m*Pr**n
if Prw:
Nu = Nu*(Pr/Prw)**0.25
return Nu
|
r'''Calculates Nusselt number for crossflow across a single tube at a
specified Re. Method from [1]_, also shown without modification in [2]_.
.. math::
Nu_{D}=CRe^{m}Pr^{n}\left(\frac{Pr}{Pr_s}\right)^{1/4}
Parameters
----------
Re : float
Reynolds number with respect to cylinder diameter, [-]
Pr : float
Prandtl number at free stream temperature [-]
Prw : float, optional
Prandtl number at wall temperature, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Notes
-----
If Prandtl number at wall are not provided, the Prandtl number correction
is not used and left to an outside function.
n is 0.37 if Pr <= 10; otherwise n is 0.36.
C and m are from the following table. If Re is outside of the ranges shown,
the nearest range is used blindly.
+---------+-------+-----+
| Re | C | m |
+=========+=======+=====+
| 1-40 | 0.75 | 0.4 |
+---------+-------+-----+
| 40-1E3 | 0.51 | 0.5 |
+---------+-------+-----+
| 1E3-2E5 | 0.26 | 0.6 |
+---------+-------+-----+
| 2E5-1E6 | 0.076 | 0.7 |
+---------+-------+-----+
Examples
--------
Example 7.3 in [2]_, matches.
>>> Nu_cylinder_Zukauskas(7992, 0.707, 0.69)
50.523612661934386
References
----------
.. [1] Zukauskas, A. Heat transfer from tubes in crossflow. In T.F. Irvine,
Jr. and J. P. Hartnett, editors, Advances in Heat Transfer, volume 8,
pages 93-160. Academic Press, Inc., New York, 1972.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
|
def sources(self):
"""
Returns a dictionary of source methods found on this object,
keyed on method name. Source methods are identified by
(self, context) arguments on this object. For example:
.. code-block:: python
def f(self, context):
...
is a source method, but
.. code-block:: python
def f(self, ctx):
...
is not.
"""
try:
return self._sources
except AttributeError:
self._sources = find_sources(self)
return self._sources
|
Returns a dictionary of source methods found on this object,
keyed on method name. Source methods are identified by
(self, context) arguments on this object. For example:
.. code-block:: python
def f(self, context):
...
is a source method, but
.. code-block:: python
def f(self, ctx):
...
is not.
|
def variations(iterable, optional=lambda x: False):
""" Returns all possible variations of a sequence with optional items.
"""
# For example: variations(["A?", "B?", "C"], optional=lambda s: s.endswith("?"))
# defines a sequence where constraint A and B are optional:
# [("A?", "B?", "C"), ("B?", "C"), ("A?", "C"), ("C")]
iterable = tuple(iterable)
# Create a boolean sequence where True means optional:
# ("A?", "B?", "C") => [True, True, False]
o = [optional(x) for x in iterable]
# Find all permutations of the boolean sequence:
# [True, False, True], [True, False, False], [False, False, True], [False, False, False].
# Map to sequences of constraints whose index in the boolean sequence yields True.
a = set()
for p in product([False, True], repeat=sum(o)):
p = list(p)
v = [b and (b and p.pop(0)) for b in o]
v = tuple(iterable[i] for i in xrange(len(v)) if not v[i])
a.add(v)
# Longest-first.
return sorted(a, cmp=lambda x, y: len(y) - len(x))
|
Returns all possible variations of a sequence with optional items.
|
def update_Broyden_J(self):
"""Execute a Broyden update of J"""
CLOG.debug('Broyden update.')
delta_vals = self.param_vals - self._last_vals
delta_residuals = self.calc_residuals() - self._last_residuals
nrm = np.sqrt(np.dot(delta_vals, delta_vals))
direction = delta_vals / nrm
vals = delta_residuals / nrm
self._rank_1_J_update(direction, vals)
self.JTJ = np.dot(self.J, self.J.T)
|
Execute a Broyden update of J
|
def start(self):
"""
Confirm that we may access the target cluster.
"""
version = self.request("get", "/version")
if version != 2:
raise GanetiApiError("Can't work with Ganeti RAPI version %d" %
version)
logging.info("Accessing Ganeti RAPI, version %d" % version)
self.version = version
try:
features = self.request("get", "/2/features")
except NotOkayError, noe:
if noe.code == 404:
# Okay, let's calm down, this is totally reasonable. Certain
# older Ganeti RAPIs don't have a list of features.
features = []
else:
# No, wait, panic was the correct thing to do.
raise
logging.info("RAPI features: %r" % (features,))
self.features = features
|
Confirm that we may access the target cluster.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.