code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def main(argv=None):
"""Entry point for the `simpl` command."""
#
# `simpl server`
#
logging.basicConfig(level=logging.INFO)
server_func = functools.partial(server.main, argv=argv)
server_parser = server.attach_parser(default_subparser())
server_parser.set_defaults(_func=server_func)
# the following code shouldn't need to change when
# we add a new subcommand.
args = default_parser().parse_args(argv)
args._func()
|
Entry point for the `simpl` command.
|
def _patch_prebuild(cls):
"""Patch a setuptools command to depend on `prebuild`"""
orig_run = cls.run
def new_run(self):
self.run_command("prebuild")
orig_run(self)
cls.run = new_run
|
Patch a setuptools command to depend on `prebuild`
|
def _dcm_to_q(self, dcm):
"""
Create q from dcm
Reference:
- Shoemake, Quaternions,
http://www.cs.ucr.edu/~vbz/resources/quatut.pdf
:param dcm: 3x3 dcm array
returns: quaternion array
"""
assert(dcm.shape == (3, 3))
q = np.zeros(4)
tr = np.trace(dcm)
if tr > 0:
s = np.sqrt(tr + 1.0)
q[0] = s * 0.5
s = 0.5 / s
q[1] = (dcm[2][1] - dcm[1][2]) * s
q[2] = (dcm[0][2] - dcm[2][0]) * s
q[3] = (dcm[1][0] - dcm[0][1]) * s
else:
dcm_i = np.argmax(np.diag(dcm))
dcm_j = (dcm_i + 1) % 3
dcm_k = (dcm_i + 2) % 3
s = np.sqrt((dcm[dcm_i][dcm_i] - dcm[dcm_j][dcm_j] -
dcm[dcm_k][dcm_k]) + 1.0)
q[dcm_i + 1] = s * 0.5
s = 0.5 / s
q[dcm_j + 1] = (dcm[dcm_i][dcm_j] + dcm[dcm_j][dcm_i]) * s
q[dcm_k + 1] = (dcm[dcm_k][dcm_i] + dcm[dcm_i][dcm_k]) * s
q[0] = (dcm[dcm_k][dcm_j] - dcm[dcm_j][dcm_k]) * s
return q
|
Create q from dcm
Reference:
- Shoemake, Quaternions,
http://www.cs.ucr.edu/~vbz/resources/quatut.pdf
:param dcm: 3x3 dcm array
returns: quaternion array
|
def error(message, *args, **kwargs):
"""
write a message to stderr
"""
if 'end' in kwargs:
end = kwargs['end']
else:
end = '\n'
if len(args) == 0:
sys.stderr.write(message)
else:
sys.stderr.write(message % args)
sys.stderr.write(end)
sys.stderr.flush()
|
write a message to stderr
|
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the encoding of the LongInteger from the input stream.
Args:
istream (stream): A buffer containing the encoded bytes of a
LongInteger. Usually a BytearrayStream object. Required.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidPrimitiveLength: if the long integer encoding read in has
an invalid encoded length.
"""
super(LongInteger, self).read(istream, kmip_version=kmip_version)
if self.length is not LongInteger.LENGTH:
raise exceptions.InvalidPrimitiveLength(
"invalid long integer length read; "
"expected: {0}, observed: {1}".format(
LongInteger.LENGTH, self.length))
self.value = unpack('!q', istream.read(self.length))[0]
self.validate()
|
Read the encoding of the LongInteger from the input stream.
Args:
istream (stream): A buffer containing the encoded bytes of a
LongInteger. Usually a BytearrayStream object. Required.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidPrimitiveLength: if the long integer encoding read in has
an invalid encoded length.
|
def do_repl(self):
"""REPL for rTorrent XMLRPC commands."""
from prompt_toolkit import prompt
from prompt_toolkit.history import FileHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.contrib.completers import WordCompleter
self.options.quiet = False
proxy = self.open()
ps1 = proxy.session.name() + u'> '
words = ['help', 'stats', 'exit']
words += [x + '=' for x in proxy.system.listMethods()]
history_file = os.path.join(config.config_dir, '.rtxmlrpc_history')
while True:
try:
try:
cmd = prompt(ps1, completer=WordCompleter(words),
auto_suggest=AutoSuggestFromHistory(),
history=FileHistory(history_file))
except KeyboardInterrupt:
cmd = ''
if not cmd:
print("Enter '?' or 'help' for usage information, 'Ctrl-D' to exit.")
if cmd in {'?', 'help'}:
self.repl_usage()
continue
elif cmd in {'', 'stats'}:
print(repr(proxy).split(None, 1)[1])
continue
elif cmd in {'exit'}:
raise EOFError()
try:
method, raw_args = cmd.split('=', 1)
except ValueError:
print("ERROR: '=' not found")
continue
raw_args = raw_args.split(',')
args = self.cooked(raw_args)
self.execute(proxy, method, args)
except EOFError:
print('Bye from {!r}'.format(proxy))
break
|
REPL for rTorrent XMLRPC commands.
|
def add_virtual_columns_proper_motion_gal2eq(self, long_in="ra", lat_in="dec", pm_long="pm_l", pm_lat="pm_b", pm_long_out="pm_ra", pm_lat_out="pm_dec",
name_prefix="__proper_motion_gal2eq",
right_ascension_galactic_pole=192.85,
declination_galactic_pole=27.12,
propagate_uncertainties=False,
radians=False):
"""Transform/rotate proper motions from galactic to equatorial coordinates.
Inverse of :py:`add_virtual_columns_proper_motion_eq2gal`
"""
kwargs = dict(**locals())
kwargs.pop('self')
kwargs['inverse'] = True
self.add_virtual_columns_proper_motion_eq2gal(**kwargs)
|
Transform/rotate proper motions from galactic to equatorial coordinates.
Inverse of :py:`add_virtual_columns_proper_motion_eq2gal`
|
def run_cli(
executable,
mets_url=None,
resolver=None,
workspace=None,
page_id=None,
log_level=None,
input_file_grp=None,
output_file_grp=None,
parameter=None,
working_dir=None,
):
"""
Create a workspace for mets_url and run MP CLI through it
"""
workspace = _get_workspace(workspace, resolver, mets_url, working_dir)
args = [executable, '--working-dir', workspace.directory]
args += ['--mets', mets_url]
if log_level:
args += ['--log-level', log_level]
if page_id:
args += ['--page-id', page_id]
if input_file_grp:
args += ['--input-file-grp', input_file_grp]
if output_file_grp:
args += ['--output-file-grp', output_file_grp]
if parameter:
args += ['--parameter', parameter]
log.debug("Running subprocess '%s'", ' '.join(args))
return subprocess.call(args)
|
Create a workspace for mets_url and run MP CLI through it
|
def transformByDistance(wV, subModel, alphabetSize=4):
"""
transform wV by given substitution matrix
"""
nc = [0.0]*alphabetSize
for i in xrange(0, alphabetSize):
j = wV[i]
k = subModel[i]
for l in xrange(0, alphabetSize):
nc[l] += j * k[l]
return nc
|
transform wV by given substitution matrix
|
def array_to_image(
arr, mask=None, img_format="png", color_map=None, **creation_options
):
"""
Translate numpy ndarray to image buffer using GDAL.
Usage
-----
tile, mask = rio_tiler.utils.tile_read(......)
with open('test.jpg', 'wb') as f:
f.write(array_to_image(tile, mask, img_format="jpeg"))
Attributes
----------
arr : numpy ndarray
Image array to encode.
mask: numpy ndarray, optional
Mask array
img_format: str, optional
Image format to return (default: 'png').
List of supported format by GDAL: https://www.gdal.org/formats_list.html
color_map: numpy.ndarray or dict, optional
color_map can be either a (256, 3) array or RGB triplet
(e.g. [[255, 255, 255],...]) mapping each 1D pixel value rescaled
from 0 to 255
OR
it can be a dictionary of discrete values
(e.g. { 1.3: [255, 255, 255], 2.5: [255, 0, 0]}) mapping any pixel value to a triplet
creation_options: dict, optional
Image driver creation options to pass to GDAL
Returns
-------
bytes
"""
img_format = img_format.lower()
if len(arr.shape) < 3:
arr = np.expand_dims(arr, axis=0)
if color_map is not None and isinstance(color_map, dict):
arr = _apply_discrete_colormap(arr, color_map)
elif color_map is not None:
arr = np.transpose(color_map[arr][0], [2, 0, 1]).astype(np.uint8)
# WEBP doesn't support 1band dataset so we must hack to create a RGB dataset
if img_format == "webp" and arr.shape[0] == 1:
arr = np.repeat(arr, 3, axis=0)
if mask is not None and img_format != "jpeg":
nbands = arr.shape[0] + 1
else:
nbands = arr.shape[0]
output_profile = dict(
driver=img_format,
dtype=arr.dtype,
count=nbands,
height=arr.shape[1],
width=arr.shape[2],
)
output_profile.update(creation_options)
with MemoryFile() as memfile:
with memfile.open(**output_profile) as dst:
dst.write(arr, indexes=list(range(1, arr.shape[0] + 1)))
# Use Mask as an alpha band
if mask is not None and img_format != "jpeg":
dst.write(mask.astype(arr.dtype), indexes=nbands)
return memfile.read()
|
Translate numpy ndarray to image buffer using GDAL.
Usage
-----
tile, mask = rio_tiler.utils.tile_read(......)
with open('test.jpg', 'wb') as f:
f.write(array_to_image(tile, mask, img_format="jpeg"))
Attributes
----------
arr : numpy ndarray
Image array to encode.
mask: numpy ndarray, optional
Mask array
img_format: str, optional
Image format to return (default: 'png').
List of supported format by GDAL: https://www.gdal.org/formats_list.html
color_map: numpy.ndarray or dict, optional
color_map can be either a (256, 3) array or RGB triplet
(e.g. [[255, 255, 255],...]) mapping each 1D pixel value rescaled
from 0 to 255
OR
it can be a dictionary of discrete values
(e.g. { 1.3: [255, 255, 255], 2.5: [255, 0, 0]}) mapping any pixel value to a triplet
creation_options: dict, optional
Image driver creation options to pass to GDAL
Returns
-------
bytes
|
def _Import(self, t):
""" Handle "import xyz.foo".
"""
self._fill("import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname)
|
Handle "import xyz.foo".
|
def stop_apppool(name):
'''
Stop an IIS application pool.
.. versionadded:: 2017.7.0
Args:
name (str): The name of the App Pool to stop.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.stop_apppool name='MyTestPool'
'''
ps_cmd = ['Stop-WebAppPool', r"'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
return cmd_ret['retcode'] == 0
|
Stop an IIS application pool.
.. versionadded:: 2017.7.0
Args:
name (str): The name of the App Pool to stop.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.stop_apppool name='MyTestPool'
|
def register(cls, associations, backend, style_aliases={}):
"""
Register the supplied dictionary of associations between
elements and plotting classes to the specified backend.
"""
if backend not in cls.registry:
cls.registry[backend] = {}
cls.registry[backend].update(associations)
groups = Options._option_groups
if backend not in cls._options:
cls._options[backend] = OptionTree([], groups=groups)
if backend not in cls._custom_options:
cls._custom_options[backend] = {}
for view_class, plot in cls.registry[backend].items():
expanded_opts = [opt for key in plot.style_opts
for opt in style_aliases.get(key, [])]
style_opts = sorted(set(opt for opt in (expanded_opts + plot.style_opts)
if opt not in plot._disabled_opts))
plot_opts = [k for k in plot.params().keys() if k not in ['name']]
with param.logging_level('CRITICAL'):
plot.style_opts = style_opts
plot_opts = Keywords(plot_opts, target=view_class.__name__)
style_opts = Keywords(style_opts, target=view_class.__name__)
opt_groups = {'plot': Options(allowed_keywords=plot_opts),
'output': Options(allowed_keywords=Options._output_allowed_kws),
'style': Options(allowed_keywords=style_opts),
'norm': Options(framewise=False, axiswise=False,
allowed_keywords=['framewise',
'axiswise'])}
name = view_class.__name__
cls._options[backend][name] = opt_groups
|
Register the supplied dictionary of associations between
elements and plotting classes to the specified backend.
|
def spev(t_int, C, deg, x, cov_C=None, M_spline=False, I_spline=False, n=0):
"""Evaluate a B-, M- or I-spline with the specified internal knots, order and coefficients.
`deg` boundary knots are appended at both sides of the domain.
The zeroth order basis functions are modified to ensure continuity at the
right-hand boundary.
Note that the I-splines include the :math:`i=0` case in order to have a "DC
offset". This way your functions do not have to start at zero. If you want
to not include this, simply set the first coefficient in `C` to zero.
Parameters
----------
t_int : array of float, (`M`,)
The internal knot locations. Must be monotonic (this is NOT checked).
C : array of float, (`M + deg - 1`,)
The coefficients applied to the basis functions.
deg : nonnegative int
The polynomial degree to use.
x : array of float, (`N`,)
The locations to evaluate the spline at.
cov_C : array of float, (`M + deg - 1`,) or (`M + deg - 1`, `M + deg - 1`), optional
The covariance matrix of the coefficients. If a 1d array is passed, this
is treated as the variance. If None, then the uncertainty is not
computed.
M_spline : bool, optional
If True, compute the M-spline instead of the B-spline. M-splines are
normalized to integrate to unity, as opposed to B-splines which sum to
unity at all points. Default is False (compute B-spline).
I_spline : bool, optional
If True, compute the I-spline instead of the B-spline. Note that this
will override `M_spline`. I-splines are the integrals of the M-splines,
and hence ensure curves are monotonic if all coefficients are of the
same sign. Note that the I-splines returned will be of polynomial degree
`deg` (i.e., the integral of what is returned from calling the function
with `deg=deg-1` and `M_spline=True`. Default is False (compute B-spline
or M-spline).
n : int, optional
The derivative order to compute. Default is 0. If `n>d`, all zeros are
returned (i.e., the discontinuities are not included).
Returns
-------
`y` or (`y`, `cov_y`): The values (and possibly uncertainties) of the spline
at the specified locations.
"""
C = scipy.asarray(C, dtype=float)
t_int = scipy.asarray(t_int, dtype=float)
if (t_int != scipy.sort(t_int)).any():
raise ValueError("Knots must be in increasing order!")
# if len(scipy.unique(t_int)) != len(t_int):
# raise ValueError("Knots must be unique!")
if n > deg:
return scipy.zeros_like(x, dtype=float)
if I_spline:
# I_{i,k} = int_L^x M_{i,k}(u)du, so just take the derivative of the
# underlying M-spline. Discarding the first coefficient dumps the "DC
# offset" term.
if cov_C is not None:
cov_C = scipy.asarray(cov_C)
if cov_C.ndim == 1:
cov_C = cov_C[1:]
elif cov_C.ndim == 2:
cov_C = cov_C[1:, 1:]
if n > 0:
return spev(
t_int, C[1:], deg - 1, x,
cov_C=cov_C, M_spline=True, I_spline=False, n=n - 1
)
M_spline = True
if n > 0:
if M_spline:
t = scipy.concatenate(([t_int[0]] * deg, t_int, [t_int[-1]] * deg))
C = (deg + 1.0) * (
C[1:] / (t[deg + 2:len(t_int) + 2 * deg] - t[1:len(t_int) + deg - 1]) -
C[:-1] / (t[deg + 1:len(t_int) + 2 * deg - 1] - t[:len(t_int) + deg - 2])
)
else:
C = C[1:] - C[:-1]
return spev(
t_int, C, deg - 1, x,
cov_C=cov_C, M_spline=True, I_spline=False, n=n - 1
)
if len(C) != len(t_int) + deg - 1:
raise ValueError("Length of C must be equal to M + deg - 1!")
# Append the external knots directly at the boundary:
t = scipy.concatenate(([t_int[0]] * deg, t_int, [t_int[-1]] * deg))
# Compute the different orders:
B = scipy.zeros((deg + 1, len(t) - 1, len(x)))
# NOTE: The first dimension is indexed by deg, and is zero-indexed.
# Zeroth order: constant function
d = 0
for i in xrange(deg, deg + len(t_int) - 2 + 1):
# The second condition contains a hack to make the basis functions
# continuous at the right-hand edge.
mask = (t[i] <= x) & (
(x < t[i + 1]) | ((i == deg + len(t_int) - 2) & (x == t[-1]))
)
B[d, i, mask] = 1.0 / (t[i + 1] - t[i]) if M_spline else 1.0
# Loop over other orders:
for d in xrange(1, deg + 1):
for i in xrange(deg - d, deg + len(t_int) - 2 + 1):
if t[i + d] != t[i]:
v = (x - t[i]) * B[d - 1, i, :]
if not M_spline:
v /= t[i + d] - t[i]
B[d, i, :] += v
if t[i + d + 1] != t[i + 1]:
v = (t[i + d + 1] - x) * B[d - 1, i + 1, :]
if not M_spline:
v /= t[i + d + 1] - t[i + 1]
B[d, i, :] += v
if M_spline and ((t[i + d] != t[i]) or (t[i + d + 1] != t[i + 1])):
B[d, i, :] *= (d + 1) / (d * (t[i + d + 1] - t[i]))
B = B[deg, 0:len(C), :].T
# Now compute the I-splines, if needed:
if I_spline:
I = scipy.zeros_like(B)
for i in xrange(0, len(C)):
for m in xrange(i, len(C)):
I[:, i] += (t[m + deg + 1] - t[m]) * B[:, m] / (deg + 1.0)
B = I
y = B.dot(C)
if cov_C is not None:
cov_C = scipy.asarray(cov_C)
# If there are no covariances, promote cov_C to a diagonal matrix
if cov_C.ndim == 1:
cov_C = scipy.diag(cov_C)
cov_y = B.dot(cov_C).dot(B.T)
return (y, cov_y)
else:
return y
|
Evaluate a B-, M- or I-spline with the specified internal knots, order and coefficients.
`deg` boundary knots are appended at both sides of the domain.
The zeroth order basis functions are modified to ensure continuity at the
right-hand boundary.
Note that the I-splines include the :math:`i=0` case in order to have a "DC
offset". This way your functions do not have to start at zero. If you want
to not include this, simply set the first coefficient in `C` to zero.
Parameters
----------
t_int : array of float, (`M`,)
The internal knot locations. Must be monotonic (this is NOT checked).
C : array of float, (`M + deg - 1`,)
The coefficients applied to the basis functions.
deg : nonnegative int
The polynomial degree to use.
x : array of float, (`N`,)
The locations to evaluate the spline at.
cov_C : array of float, (`M + deg - 1`,) or (`M + deg - 1`, `M + deg - 1`), optional
The covariance matrix of the coefficients. If a 1d array is passed, this
is treated as the variance. If None, then the uncertainty is not
computed.
M_spline : bool, optional
If True, compute the M-spline instead of the B-spline. M-splines are
normalized to integrate to unity, as opposed to B-splines which sum to
unity at all points. Default is False (compute B-spline).
I_spline : bool, optional
If True, compute the I-spline instead of the B-spline. Note that this
will override `M_spline`. I-splines are the integrals of the M-splines,
and hence ensure curves are monotonic if all coefficients are of the
same sign. Note that the I-splines returned will be of polynomial degree
`deg` (i.e., the integral of what is returned from calling the function
with `deg=deg-1` and `M_spline=True`. Default is False (compute B-spline
or M-spline).
n : int, optional
The derivative order to compute. Default is 0. If `n>d`, all zeros are
returned (i.e., the discontinuities are not included).
Returns
-------
`y` or (`y`, `cov_y`): The values (and possibly uncertainties) of the spline
at the specified locations.
|
def _find_lib_path():
"""Find mxnet library."""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
amalgamation_lib_path = os.path.join(curr_path, '../../lib/libmxnet_predict.so')
if os.path.exists(amalgamation_lib_path) and os.path.isfile(amalgamation_lib_path):
lib_path = [amalgamation_lib_path]
return lib_path
else:
logging.info('Cannot find libmxnet_predict.so. Will search for MXNet library using libinfo.py then.')
try:
from mxnet.libinfo import find_lib_path
lib_path = find_lib_path()
return lib_path
except ImportError:
libinfo_path = os.path.join(curr_path, '../../python/mxnet/libinfo.py')
if os.path.exists(libinfo_path) and os.path.isfile(libinfo_path):
libinfo = {'__file__': libinfo_path}
exec(compile(open(libinfo_path, "rb").read(), libinfo_path, 'exec'), libinfo, libinfo)
lib_path = libinfo['find_lib_path']()
return lib_path
else:
raise RuntimeError('Cannot find libinfo.py at %s.' % libinfo_path)
|
Find mxnet library.
|
def _fromiter(it, dtype, count, progress, log):
"""Utility function to load an array from an iterator."""
if progress > 0:
it = _iter_withprogress(it, progress, log)
if count is not None:
a = np.fromiter(it, dtype=dtype, count=count)
else:
a = np.fromiter(it, dtype=dtype)
return a
|
Utility function to load an array from an iterator.
|
def enrich_sentences_with_NLP(self, all_sentences):
"""
Enrich a list of fonduer Sentence objects with NLP features. We merge
and process the text of all Sentences for higher efficiency.
:param all_sentences: List of fonduer Sentence objects for one document
:return:
"""
if not self.has_NLP_support():
raise NotImplementedError(
f"Language {self.lang} not available in spacy beyond tokenization"
)
if len(all_sentences) == 0:
return # Nothing to parse
if self.model.has_pipe("sentencizer"):
self.model.remove_pipe("sentencizer")
self.logger.debug(
f"Removed sentencizer ('sentencizer') from model. "
f"Now in pipeline: {self.model.pipe_names}"
)
if self.model.has_pipe("sentence_boundary_detector"):
self.model.remove_pipe(name="sentence_boundary_detector")
self.model.add_pipe(
set_custom_boundary, before="parser", name="sentence_boundary_detector"
)
sentence_batches = self._split_sentences_by_char_limit(
all_sentences, self.model.max_length
)
# TODO: We could do this in parallel. Test speedup in the future
for sentence_batch in sentence_batches:
custom_tokenizer = TokenPreservingTokenizer(self.model.vocab)
# we circumvent redundant tokenization by using a custom
# tokenizer that directly uses the already separated words
# of each sentence as tokens
doc = custom_tokenizer(sentence_batch)
doc.user_data = sentence_batch
for name, proc in self.model.pipeline: # iterate over components in order
doc = proc(doc)
try:
assert doc.is_parsed
except Exception:
self.logger.exception(f"{doc} was not parsed")
for sent, current_sentence_obj in zip(doc.sents, sentence_batch):
parts = defaultdict(list)
for i, token in enumerate(sent):
parts["lemmas"].append(token.lemma_)
parts["pos_tags"].append(token.tag_)
parts["ner_tags"].append(
token.ent_type_ if token.ent_type_ else "O"
)
head_idx = (
0 if token.head is token else token.head.i - sent[0].i + 1
)
parts["dep_parents"].append(head_idx)
parts["dep_labels"].append(token.dep_)
current_sentence_obj.pos_tags = parts["pos_tags"]
current_sentence_obj.lemmas = parts["lemmas"]
current_sentence_obj.ner_tags = parts["ner_tags"]
current_sentence_obj.dep_parents = parts["dep_parents"]
current_sentence_obj.dep_labels = parts["dep_labels"]
yield current_sentence_obj
|
Enrich a list of fonduer Sentence objects with NLP features. We merge
and process the text of all Sentences for higher efficiency.
:param all_sentences: List of fonduer Sentence objects for one document
:return:
|
def emit(
self,
record):
"""emit
Emit handler for queue-ing message for
the helper thread to send to Splunk on the ``self.sleep_interval``
:param record: LogRecord to send to Splunk
https://docs.python.org/3/library/logging.html
"""
self.debug_log('emit - start')
try:
record = self.format_record(
record)
except Exception as e:
self.write_log(
'exception in Splunk logging handler: %s' % str(e))
self.write_log(
traceback.format_exc())
return
if not self.is_shutting_down(shutdown_event=self.shutdown_event) \
and self.sleep_interval > 0.1:
try:
self.debug_log(
'writing to queue={}'.format(
self.queue))
# Put log message into queue; worker thread will pick up
self.queue.put(
record)
except Exception as e:
self.write_log(
'log queue full; log data will be dropped.')
else:
# Publish immediately because there is no worker
self.publish_to_splunk(
payload=record)
self.debug_log('emit - done')
|
emit
Emit handler for queue-ing message for
the helper thread to send to Splunk on the ``self.sleep_interval``
:param record: LogRecord to send to Splunk
https://docs.python.org/3/library/logging.html
|
def preprocessFastqs(fastqFNs, seqFNPrefix, offsetFN, abtFN, areUniform, logger):
'''
This function does the grunt work behind string extraction for fastq files
@param fastqFNs - a list of .fq filenames for parsing
@param seqFNPrefix - this is always of the form '<DIR>/seqs.npy'
@param offsetFN - this is always of the form '<DIR>/offsets.npy'
@param abtFN - this is always of the form '<DIR>/about.npy'
@param areUniform - True if all sequences are of uniform length
@param logger - logger object for output
'''
#create a seqArray
seqArray = []
tempFileId = 0
seqsPerFile = 1000000
maxSeqLen = -1
numSeqs = 0
subSortFNs = []
for fnID, fn in enumerate(fastqFNs):
#open the file and read in starting form the second, every 4th line
logger.info('Loading \''+fn+'\'...')
if fn.endswith('.gz'):
fp = gzip.open(fn, 'r')
else:
fp = open(fn, 'r')
i = -1
#go through each line
for line in fp:
if i % 4 == 0:
seqArray.append((line.strip('\n')+'$', fnID, i/4))
if len(seqArray) == seqsPerFile:
if not areUniform or maxSeqLen == -1:
maxSeqLen = 0
for seq, fID, seqID in seqArray:
if len(seq) > maxSeqLen:
maxSeqLen = len(seq)
tempFN = seqFNPrefix+'.sortTemp.'+str(tempFileId)+'.npy'
subSortFNs.append(tempFN)
tempArray = np.lib.format.open_memmap(tempFN, 'w+', 'a'+str(maxSeqLen)+',<u1,<u8', (len(seqArray),))
tempArray[:] = sorted(seqArray)
numSeqs += len(seqArray)
del tempArray
tempFileId += 1
seqArray = []
i += 1
fp.close()
if len(seqArray) > 0:
if not areUniform or maxSeqLen == -1:
maxSeqLen = 0
for seq, fID, seqID in seqArray:
if len(seq) > maxSeqLen:
maxSeqLen = len(seq)
tempFN = seqFNPrefix+'.sortTemp.'+str(tempFileId)+'.npy'
subSortFNs.append(tempFN)
tempArray = np.lib.format.open_memmap(tempFN, 'w+', 'a'+str(maxSeqLen)+',<u1,<u8', (len(seqArray),))
tempArray[:] = sorted(seqArray)
numSeqs += len(seqArray)
del tempArray
tempFileId += 1
seqArray = []
logger.info('Pre-sorting '+str(numSeqs)+' sequences...')
iters = []
for fn in subSortFNs:
iters.append(customiter(np.load(fn, 'r')))
#save it
tempFN = seqFNPrefix+'.temp.npy'
fp = open(tempFN, 'w+')
aboutFile = np.lib.format.open_memmap(abtFN, 'w+', '<u1,<u8', (numSeqs,))
ind = 0
for tup in heapq.merge(*iters):
(seq, fID, seqID) = tup
aboutFile[ind] = (fID, seqID)
fp.write(seq)
ind += 1
fp.close()
#clean up disk space
for fn in subSortFNs:
os.remove(fn)
#convert the sequences into uint8s and then save it
del seqArray
seqArray = np.memmap(tempFN)
if areUniform:
uniformLength = maxSeqLen
else:
uniformLength = 0
logger.info('Saving sorted sequences for BWT construction...')
MSBWTGen.writeSeqsToFiles(seqArray, seqFNPrefix, offsetFN, uniformLength)
#wipe this
del seqArray
os.remove(tempFN)
|
This function does the grunt work behind string extraction for fastq files
@param fastqFNs - a list of .fq filenames for parsing
@param seqFNPrefix - this is always of the form '<DIR>/seqs.npy'
@param offsetFN - this is always of the form '<DIR>/offsets.npy'
@param abtFN - this is always of the form '<DIR>/about.npy'
@param areUniform - True if all sequences are of uniform length
@param logger - logger object for output
|
def parse_url(url):
"""Parse a Elk connection string """
scheme, dest = url.split('://')
host = None
ssl_context = None
if scheme == 'elk':
host, port = dest.split(':') if ':' in dest else (dest, 2101)
elif scheme == 'elks':
host, port = dest.split(':') if ':' in dest else (dest, 2601)
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ssl_context.verify_mode = ssl.CERT_NONE
elif scheme == 'serial':
host, port = dest.split(':') if ':' in dest else (dest, 115200)
else:
raise ValueError("Invalid scheme '%s'" % scheme)
return (scheme, host, int(port), ssl_context)
|
Parse a Elk connection string
|
def site_url(self, url):
"""URL setter and validator for site_url property.
Parameters:
url (str): URL of on Moebooru/Danbooru based sites.
Raises:
PybooruError: When URL scheme or URL are invalid.
"""
# Regular expression to URL validate
regex = re.compile(
r'^(?:http|https)://' # Scheme only HTTP/HTTPS
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?| \
[A-Z0-9-]{2,}(?<!-)\.?)|' # Domain
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # or ipv6
r'(?::\d+)?' # Port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
# Validate URL
if re.match('^(?:http|https)://', url):
if re.search(regex, url):
self.__site_url = url
else:
raise PybooruError("Invalid URL: {0}".format(url))
else:
raise PybooruError(
"Invalid URL scheme, use HTTP or HTTPS: {0}".format(url))
|
URL setter and validator for site_url property.
Parameters:
url (str): URL of on Moebooru/Danbooru based sites.
Raises:
PybooruError: When URL scheme or URL are invalid.
|
def get_version(extension, workflow_file):
'''Determines the version of a .py, .wdl, or .cwl file.'''
if extension == 'py' and two_seven_compatible(workflow_file):
return '2.7'
elif extension == 'cwl':
return yaml.load(open(workflow_file))['cwlVersion']
else: # Must be a wdl file.
# Borrowed from https://github.com/Sage-Bionetworks/synapse-orchestrator/blob/develop/synorchestrator/util.py#L142
try:
return [l.lstrip('version') for l in workflow_file.splitlines() if 'version' in l.split(' ')][0]
except IndexError:
return 'draft-2'
|
Determines the version of a .py, .wdl, or .cwl file.
|
def main():
"""main."""
config = context.config
config.set_main_option("sqlalchemy.url", config.get_main_option('url'))
run_migrations_online(config)
|
main.
|
def fetch_ticker(self) -> Ticker:
"""Fetch the market ticker."""
return self._fetch('ticker', self.market.code)(self._ticker)()
|
Fetch the market ticker.
|
def expand_to_one_hot(data,expand = True,use_alternative=False):
header_dict = {'ALCABUS':0,'PRIRCAT':1,'TMSRVC':2,'SEX1':3,'RACE':4,'RELTYP':5,'age_1st_arrest':6,'DRUGAB':7,'Class':8,'RLAGE':9,'NFRCTNS':10}
new_data = []
for entry in data:
temp = {}
if expand == True:
if entry[header_dict["SEX1"]] == "FEMALE":
temp['female'] = 1
else:
temp['female'] = 0
if entry[header_dict["ALCABUS"]] == 'INMATE IS AN ALCOHOL ABUSER':
temp['prior_alcohol_abuse'] = 1
else:
temp['prior_alcohol_abuse'] = 0
if entry[header_dict['DRUGAB']] == 'INMATE IS A DRUG ABUSER':
temp['prior_drug_abuse'] = 1
else:
temp['prior_drug_abuse'] = 0
if entry[header_dict['NFRCTNS']] == 'INMATE HAS RECORD':
temp['infraction_in_prison'] = 1
else:
temp['infraction_in_prison'] = 0
race_cats = ['WHITE','BLACK','AMERICAN INDIAN/ALEUTIAN','ASIAN/PACIFIC ISLANDER','OTHER','UNKNOWN']
for cat in race_cats:
if entry[header_dict['RACE']] == cat:
temp['race_'+cat] = 1
else:
temp['race_'+cat] = 0
release_age_cats = ['14 TO 17 YEARS OLD','18 TO 24 YEARS OLD', '25 TO 29 YEARS OLD', \
'30 TO 34 YEARS OLD','35 TO 39 YEARS OLD','40 TO 44 YEARS OLD','45 YEARS OLD AND OLDER']
for cat in release_age_cats:
if entry[header_dict['RLAGE']] == cat:
temp['release_age_'+cat] = 1
else:
temp['release_age_'+cat] = 0
time_served_cats = ['None','1 TO 6 MONTHS','13 TO 18 MONTHS','19 TO 24 MONTHS','25 TO 30 MONTHS', \
'31 TO 36 MONTHS','37 TO 60 MONTHS','61 MONTHS AND HIGHER','7 TO 12 MONTHS']
for cat in time_served_cats:
if entry[header_dict['TMSRVC']] == cat:
temp['time_served_'+cat] = 1
else:
temp['time_served_'+cat] = 0
prior_arrest_cats = ['None','1 PRIOR ARREST','11 TO 15 PRIOR ARRESTS','16 TO HI PRIOR ARRESTS','2 PRIOR ARRESTS', \
'3 PRIOR ARRESTS','4 PRIOR ARRESTS','5 PRIOR ARRESTS','6 PRIOR ARRESTS','7 TO 10 PRIOR ARRESTS']
for cat in prior_arrest_cats:
if entry[header_dict['PRIRCAT']] == cat:
temp['prior_arrest_'+cat] = 1
else:
temp['prior_arrest_'+cat] = 0
conditional_release =['PAROLE BOARD DECISION-SERVED NO MINIMUM','MANDATORY PAROLE RELEASE', 'PROBATION RELEASE-SHOCK PROBATION', \
'OTHER CONDITIONAL RELEASE']
unconditional_release = ['EXPIRATION OF SENTENCE','COMMUTATION-PARDON','RELEASE TO CUSTODY, DETAINER, OR WARRANT', \
'OTHER UNCONDITIONAL RELEASE']
other_release = ['NATURAL CAUSES','SUICIDE','HOMICIDE BY ANOTHER INMATE','OTHER HOMICIDE','EXECUTION','OTHER TYPE OF DEATH', \
'TRANSFER','RELEASE ON APPEAL OR BOND','OTHER TYPE OF RELEASE','ESCAPE','ACCIDENTAL INJURY TO SELF','UNKNOWN']
if entry[header_dict['RELTYP']] in conditional_release:
temp['released_conditional'] = 1
temp['released_unconditional'] = 0
temp['released_other'] = 0
elif entry[header_dict['RELTYP']] in unconditional_release:
temp['released_conditional'] = 0
temp['released_unconditional'] = 1
temp['released_other'] = 0
else:
temp['released_conditional'] = 0
temp['released_unconditional'] = 0
temp['released_other'] = 1
first_arrest_cats = ['UNDER 17','BETWEEN 18 AND 24','BETWEEN 25 AND 29','BETWEEN 30 AND 39','OVER 40']
for cat in first_arrest_cats:
if entry[header_dict['age_1st_arrest']] == cat:
temp['age_first_arrest_'+cat] = 1
else:
temp['age_first_arrest_'+cat] = 0
else:
temp['SEX1'] = entry['SEX1']
temp['RELTYP'] = entry['RELTYP']
temp['PRIRCAT'] = entry['PRIRCAT']
temp['ALCABUS'] = entry['ALCABUS']
temp['DRUGAB'] = entry['DRUGAB']
temp['RLAGE'] = entry['RLAGE']
temp['TMSRVC'] = entry['TMSRVC']
temp['NFRCTNS'] = entry['NFRCTNS']
temp['RACE'] = entry['RACE']
try:
bdate = datetime.date(int(entry['YEAROB2']),int(entry['MNTHOB2']), int(entry['DAYOB2']))
first_arrest = datetime.date(int(entry['A001YR']),int(entry['A001MO']),int(entry['A001DA']))
first_arrest_age = first_arrest - bdate
temp['age_1st_arrest'] = first_arrest_age.days
except:
temp['age_1st_arrest'] = 0
new_data.append(temp)
# convert from dictionary to list of lists
fin = [[int(entry[key]) for key in entry.keys()] for entry in new_data]
"""
with open("brandon_testing/test_"+str(time.clock())+".csv","w") as f:
writer = csv.writer(f,delimiter=",")
for row in fin:
writer.writerow(row)
"""
return fin
|
with open("brandon_testing/test_"+str(time.clock())+".csv","w") as f:
writer = csv.writer(f,delimiter=",")
for row in fin:
writer.writerow(row)
|
def create_game(
self,
map_name,
bot_difficulty=sc_pb.VeryEasy,
bot_race=sc_common.Random,
bot_first=False):
"""Create a game, one remote agent vs the specified bot.
Args:
map_name: The map to use.
bot_difficulty: The difficulty of the bot to play against.
bot_race: The race for the bot.
bot_first: Whether the bot should be player 1 (else is player 2).
"""
self._controller.ping()
# Form the create game message.
map_inst = maps.get(map_name)
map_data = map_inst.data(self._run_config)
if map_name not in self._saved_maps:
self._controller.save_map(map_inst.path, map_data)
self._saved_maps.add(map_name)
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(map_path=map_inst.path, map_data=map_data),
disable_fog=False)
# Set up for one bot, one agent.
if not bot_first:
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(
type=sc_pb.Computer, race=bot_race, difficulty=bot_difficulty)
if bot_first:
create.player_setup.add(type=sc_pb.Participant)
# Create the game.
self._controller.create_game(create)
|
Create a game, one remote agent vs the specified bot.
Args:
map_name: The map to use.
bot_difficulty: The difficulty of the bot to play against.
bot_race: The race for the bot.
bot_first: Whether the bot should be player 1 (else is player 2).
|
def dict_copy(func):
"copy dict args, to avoid modifying caller's copy"
def proxy(*args, **kwargs):
new_args = []
new_kwargs = {}
for var in kwargs:
if isinstance(kwargs[var], dict):
new_kwargs[var] = dict(kwargs[var])
else:
new_kwargs[var] = kwargs[var]
for arg in args:
if isinstance(arg, dict):
new_args.append(dict(arg))
else:
new_args.append(arg)
return func(*new_args, **new_kwargs)
return proxy
|
copy dict args, to avoid modifying caller's copy
|
def get_json_result(results, n=10):
"""Return the top `n` results as a JSON list.
>>> results = [{'probability': 0.65,
... 'whatever': 'bar'},
... {'probability': 0.21,
... 'whatever': 'bar'},
... {'probability': 0.05,
... 'whatever': 'bar'},]
>>> get_json_result(results, n=10)
[{'\\alpha': 0.65}, {'\\propto': 0.25}, {'\\varpropto': 0.0512}]
"""
s = []
last = -1
for res in results[:min(len(results), n)]:
if res['probability'] < last*0.5 and res['probability'] < 0.05:
break
if res['probability'] < 0.01:
break
s.append(res)
last = res['probability']
return json.dumps(s)
|
Return the top `n` results as a JSON list.
>>> results = [{'probability': 0.65,
... 'whatever': 'bar'},
... {'probability': 0.21,
... 'whatever': 'bar'},
... {'probability': 0.05,
... 'whatever': 'bar'},]
>>> get_json_result(results, n=10)
[{'\\alpha': 0.65}, {'\\propto': 0.25}, {'\\varpropto': 0.0512}]
|
async def readline(self) -> bytes:
"""
Reads one line
>>> # Keeps waiting for a linefeed incase there is none in the buffer
>>> await test.readline()
:returns: bytes forming a line
"""
while True:
line = self._serial_instance.readline()
if not line:
await asyncio.sleep(self._asyncio_sleep_time)
else:
return line
|
Reads one line
>>> # Keeps waiting for a linefeed incase there is none in the buffer
>>> await test.readline()
:returns: bytes forming a line
|
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")): # namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif headers == "keys" and len(rows) > 0: # keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(headers)
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
|
Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
|
def set_frequency(self, host, sem=None, interval=None):
"""Set frequency for host with sem and interval."""
# single sem or global sem
sem = sem or self.sem
interval = self.interval if interval is None else interval
frequency = Frequency(sem, interval, host)
frequencies = {host: frequency}
self.update_frequency(frequencies)
return frequency
|
Set frequency for host with sem and interval.
|
def unblock_pin(ctx, puk, new_pin):
"""
Unblock the PIN.
Reset the PIN using the PUK code.
"""
controller = ctx.obj['controller']
if not puk:
puk = click.prompt(
'Enter PUK', default='', show_default=False,
hide_input=True, err=True)
if not new_pin:
new_pin = click.prompt(
'Enter a new PIN', default='',
show_default=False, hide_input=True, err=True)
controller.unblock_pin(puk, new_pin)
|
Unblock the PIN.
Reset the PIN using the PUK code.
|
def __get_html(self, body=None):
"""
Returns the html content with given body tag content.
:param body: Body tag content.
:type body: unicode
:return: Html.
:rtype: unicode
"""
output = []
output.append("<html>")
output.append("<head>")
for javascript in (self.__jquery_javascript,
self.__crittercism_javascript,
self.__reporter_javascript):
output.append("<script type=\"text/javascript\">")
output.append(javascript)
output.append("</script>")
output.append("<style type=\"text/css\">")
output.append(self.__style)
output.append("</style>")
output.append("</head>")
if body is not None:
output.append(body)
else:
output.append("<body>")
output.append("<div id=\"report\">")
output.append("</div>")
output.append("</body>")
output.append("</html>")
return "\n".join(output)
|
Returns the html content with given body tag content.
:param body: Body tag content.
:type body: unicode
:return: Html.
:rtype: unicode
|
def commitAndCloseEditor(self):
"""Commit and close editor"""
editor = self.sender()
# Avoid a segfault with PyQt5. Variable value won't be changed
# but at least Spyder won't crash. It seems generated by a bug in sip.
try:
self.commitData.emit(editor)
except AttributeError:
pass
self.closeEditor.emit(editor, QAbstractItemDelegate.NoHint)
|
Commit and close editor
|
def _dictlist_to_lists(dl, *keys):
''' convert a list of dictionaries to a dictionary of lists
>>> dl = [{'a': 'test', 'b': 3}, {'a': 'zaz', 'b': 444},
{'a': 'wow', 'b': 300}]
>>> _dictlist_to_lists(dl)
(['test', 'zaz', 'wow'], [3, 444, 300])
'''
lists = []
for k in keys:
lists.append([])
for item in dl:
for i, key in enumerate(keys):
x = item[key]
if isinstance(x, unicode):
x = str(x)
lists[i].append(x)
return lists
|
convert a list of dictionaries to a dictionary of lists
>>> dl = [{'a': 'test', 'b': 3}, {'a': 'zaz', 'b': 444},
{'a': 'wow', 'b': 300}]
>>> _dictlist_to_lists(dl)
(['test', 'zaz', 'wow'], [3, 444, 300])
|
def union_overlapping(intervals):
"""Union any overlapping intervals in the given set."""
disjoint_intervals = []
for interval in intervals:
if disjoint_intervals and disjoint_intervals[-1].overlaps(interval):
disjoint_intervals[-1] = disjoint_intervals[-1].union(interval)
else:
disjoint_intervals.append(interval)
return disjoint_intervals
|
Union any overlapping intervals in the given set.
|
def tqdm(self, desc, total, leave, initial=0):
"""
Extension point. Override to provide custom options to tqdm_notebook initializer.
:param desc: Description string
:param total: Total number of updates
:param leave: Leave progress bar when done
:return: new progress bar
:param initial: Initial counter state
"""
return tqdm_notebook(desc=desc, total=total, leave=leave, initial=initial)
|
Extension point. Override to provide custom options to tqdm_notebook initializer.
:param desc: Description string
:param total: Total number of updates
:param leave: Leave progress bar when done
:return: new progress bar
:param initial: Initial counter state
|
def _variable_inputs(self, op):
""" Return which inputs of this operation are variable (i.e. depend on the model inputs).
"""
if op.name not in self._vinputs:
self._vinputs[op.name] = np.array([t.op in self.between_ops or t in self.model_inputs for t in op.inputs])
return self._vinputs[op.name]
|
Return which inputs of this operation are variable (i.e. depend on the model inputs).
|
def move(self, x, y):
"""
Move the drawing cursor to the specified position.
:param x: The column (x coord) for the location to check.
:param y: The line (y coord) for the location to check.
"""
self._x = int(round(x * 2, 0))
self._y = int(round(y * 2, 0))
|
Move the drawing cursor to the specified position.
:param x: The column (x coord) for the location to check.
:param y: The line (y coord) for the location to check.
|
def prepare_inventory(self):
"""
Prepares the inventory default under ``private_data_dir`` if it's not overridden by the constructor.
"""
if self.inventory is None:
self.inventory = os.path.join(self.private_data_dir, "inventory")
|
Prepares the inventory default under ``private_data_dir`` if it's not overridden by the constructor.
|
def _get_game_number(cls, gid_path):
"""
Game Number
:param gid_path: game logs directory path
:return: game number(int)
"""
game_number = str(gid_path[len(gid_path)-2:len(gid_path)-1])
if game_number.isdigit():
return int(game_number)
else:
for char in reversed(gid_path):
if char.isdigit():
return int(char)
raise MlbAmException('Illegal Game Number:(gid:{gid_path})'.format(gid_path))
|
Game Number
:param gid_path: game logs directory path
:return: game number(int)
|
def get_context(request, context=None):
"""Returns common context data for network topology views."""
if context is None:
context = {}
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
context['launch_instance_allowed'] = policy.check(
(("compute", "os_compute_api:servers:create"),), request)
context['instance_quota_exceeded'] = _quota_exceeded(request, 'instances')
context['create_network_allowed'] = policy.check(
(("network", "create_network"),), request)
context['network_quota_exceeded'] = _quota_exceeded(request, 'network')
context['create_router_allowed'] = (
network_config.get('enable_router', True) and
policy.check((("network", "create_router"),), request))
context['router_quota_exceeded'] = _quota_exceeded(request, 'router')
context['console_type'] = getattr(settings, 'CONSOLE_TYPE', 'AUTO')
context['show_ng_launch'] = (
base.is_service_enabled(request, 'compute') and
getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', True))
context['show_legacy_launch'] = (
base.is_service_enabled(request, 'compute') and
getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', False))
return context
|
Returns common context data for network topology views.
|
def shutdown(self):
"""close socket, immediately."""
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
|
close socket, immediately.
|
def get_writer_factory_for(self, name, *, format=None):
"""
Returns a callable to build a writer for the provided filename, eventually forcing a format.
:param name: filename
:param format: format
:return: type
"""
return self.get_factory_for(WRITER, name, format=format)
|
Returns a callable to build a writer for the provided filename, eventually forcing a format.
:param name: filename
:param format: format
:return: type
|
def pvpc_procesa_datos_dia(_, response, verbose=True):
"""Procesa la información JSON descargada y forma el dataframe de los datos de un día."""
try:
d_data = response['PVPC']
df = _process_json_pvpc_hourly_data(pd.DataFrame(d_data))
return df, 0
except Exception as e:
if verbose:
print('ERROR leyendo información de web: {}'.format(e))
return None, -2
|
Procesa la información JSON descargada y forma el dataframe de los datos de un día.
|
def dumpLines(self):
"""
For debugging dump all line and their content
"""
for i, line in enumerate(self.lines):
logger.debug("Line %d:", i)
logger.debug(line.dumpFragments())
|
For debugging dump all line and their content
|
def check_exists(self):
'''
Check if resource exists, update self.exists, returns
Returns:
None: sets self.exists
'''
response = self.repo.api.http_request('HEAD', self.uri)
self.status_code = response.status_code
# resource exists
if self.status_code == 200:
self.exists = True
# resource no longer here
elif self.status_code == 410:
self.exists = False
# resource not found
elif self.status_code == 404:
self.exists = False
return self.exists
|
Check if resource exists, update self.exists, returns
Returns:
None: sets self.exists
|
def acknowledge_time(self):
"""
Processor time when the alarm was acknowledged.
:type: :class:`~datetime.datetime`
"""
if (self.is_acknowledged and
self._proto.acknowledgeInfo.HasField('acknowledgeTime')):
return parse_isostring(self._proto.acknowledgeInfo.acknowledgeTime)
return None
|
Processor time when the alarm was acknowledged.
:type: :class:`~datetime.datetime`
|
def long_click(self, duration=2.0):
"""
Perform the long click action on the UI element(s) represented by the UI proxy. If this UI proxy represents a
set of UI elements, the first one in the set is clicked and the anchor point of the UI element is used as the
default one. Similar to click but press the screen for the given time interval and then release.
Args:
duration (:py:obj:`float`): whole action duration.
Return:
the same as :py:meth:`poco.pocofw.Poco.long_click`, depending on poco agent implementation.
"""
try:
duration = float(duration)
except ValueError:
raise ValueError('Argument `duration` should be <float>. Got {}'.format(repr(duration)))
pos_in_percentage = self.get_position(self._focus or 'anchor')
self.poco.pre_action('long_click', self, pos_in_percentage)
ret = self.poco.long_click(pos_in_percentage, duration)
self.poco.post_action('long_click', self, pos_in_percentage)
return ret
|
Perform the long click action on the UI element(s) represented by the UI proxy. If this UI proxy represents a
set of UI elements, the first one in the set is clicked and the anchor point of the UI element is used as the
default one. Similar to click but press the screen for the given time interval and then release.
Args:
duration (:py:obj:`float`): whole action duration.
Return:
the same as :py:meth:`poco.pocofw.Poco.long_click`, depending on poco agent implementation.
|
def quantize_weights(full_precision_model,
nbits,
quantization_mode="linear",
sample_data=None,
**kwargs):
"""
Utility function to convert a full precision (float) MLModel to a
nbit quantized MLModel (float16).
:param full_precision_model: MLModel
Model which will be converted to half precision. Currently conversion
for only neural network models is supported. If a pipeline model is
passed in then all embedded neural network models embedded within
will be converted.
:param nbits: Int
Number of bits per quantized weight. Only 8-bit and lower
quantization is supported
:param quantization_mode: str
One of:
"linear":
Simple linear quantization with scale and bias
"linear_lut":
Simple linear quantization represented as a lookup table
"kmeans_lut":
LUT based quantization, where LUT is generated by K-Means clustering
"custom_lut":
LUT quantization where LUT and quantized weight params are
calculated using a custom function. If this mode is selected then
a custom function must be passed in kwargs with key lut_function.
The function must have input params (nbits, wp) where nbits is the
number of quantization bits and wp is the list of weights for a
given layer. The function should return two parameters (lut, qw)
where lut is an array of length (2^nbits)containing LUT values and
qw is the list of quantized weight parameters. See
_get_linear_lookup_table_and_weight for a sample implementation.
:param sample_data: str | [dict]
Data used to characterize performance of the quantized model in
comparison to the full precision model. Either a list of sample input
dictionaries or an absolute path to a directory containing images.
Path to a directory containing images is only valid for models with
one image input. For all other models a list of sample inputs must be
provided.
:param **kwargs:
See below
:Keyword Arguments:
* *lut_function* (``callable function``) --
A callable function provided when quantization mode is set to
_QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE. See quantization_mode for
more details
Returns
-------
model: MLModel
The quantized MLModel instance if running on macOS 10.14 or later,
otherwise the quantized model specification is returned
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import quantization_utils
>>> model = coremltools.models.MLModel('my_model.mlmodel')
>>> quantized_model = quantization_utils.quantize_weights(model, 8, "linear")
"""
qmode_mapping = {
"linear": _QUANTIZATION_MODE_LINEAR_QUANTIZATION,
"kmeans": _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS,
"linear_lut": _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR,
"custom_lut": _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE,
"dequantization": _QUANTIZATION_MODE_DEQUANTIZE
}
try:
qmode = qmode_mapping[quantization_mode]
except KeyError:
raise Exception("Invalid quantization mode. Quantization mode must be "
"one of {}".format(qmode_mapping))
print("Quantizing using {} quantization".format(quantization_mode))
spec = full_precision_model.get_spec()
qspec = quantize_spec_weights(spec, nbits, qmode, **kwargs)
if macos_version() < (10, 14):
print("WARNING! Unable to return a quantized MLModel instance since OS != macOS 10.14 or later")
print("Returning quantized model specification instead")
return qspec
quantized_model = _get_model(qspec)
if not sample_data:
return quantized_model
compare_models(full_precision_model, quantized_model, sample_data)
return quantized_model
|
Utility function to convert a full precision (float) MLModel to a
nbit quantized MLModel (float16).
:param full_precision_model: MLModel
Model which will be converted to half precision. Currently conversion
for only neural network models is supported. If a pipeline model is
passed in then all embedded neural network models embedded within
will be converted.
:param nbits: Int
Number of bits per quantized weight. Only 8-bit and lower
quantization is supported
:param quantization_mode: str
One of:
"linear":
Simple linear quantization with scale and bias
"linear_lut":
Simple linear quantization represented as a lookup table
"kmeans_lut":
LUT based quantization, where LUT is generated by K-Means clustering
"custom_lut":
LUT quantization where LUT and quantized weight params are
calculated using a custom function. If this mode is selected then
a custom function must be passed in kwargs with key lut_function.
The function must have input params (nbits, wp) where nbits is the
number of quantization bits and wp is the list of weights for a
given layer. The function should return two parameters (lut, qw)
where lut is an array of length (2^nbits)containing LUT values and
qw is the list of quantized weight parameters. See
_get_linear_lookup_table_and_weight for a sample implementation.
:param sample_data: str | [dict]
Data used to characterize performance of the quantized model in
comparison to the full precision model. Either a list of sample input
dictionaries or an absolute path to a directory containing images.
Path to a directory containing images is only valid for models with
one image input. For all other models a list of sample inputs must be
provided.
:param **kwargs:
See below
:Keyword Arguments:
* *lut_function* (``callable function``) --
A callable function provided when quantization mode is set to
_QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE. See quantization_mode for
more details
Returns
-------
model: MLModel
The quantized MLModel instance if running on macOS 10.14 or later,
otherwise the quantized model specification is returned
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import quantization_utils
>>> model = coremltools.models.MLModel('my_model.mlmodel')
>>> quantized_model = quantization_utils.quantize_weights(model, 8, "linear")
|
def exclude_matches(self, matches):
"""Filter any matches that match an exclude pattern.
:param matches: a list of possible completions
"""
for match in matches:
for exclude_pattern in self.exclude_patterns:
if re.match(exclude_pattern, match) is not None:
break
else:
yield match
|
Filter any matches that match an exclude pattern.
:param matches: a list of possible completions
|
def max(self):
"""Maximum, ignorning nans."""
if "max" not in self.attrs.keys():
def f(dataset, s):
return np.nanmax(dataset[s])
self.attrs["max"] = np.nanmax(list(self.chunkwise(f).values()))
return self.attrs["max"]
|
Maximum, ignorning nans.
|
def sample(self, size=1):
""" Sample rigid transform random variables.
Parameters
----------
size : int
number of sample to take
Returns
-------
:obj:`list` of :obj:`RigidTransform`
sampled rigid transformations
"""
samples = []
for i in range(size):
# sample random pose
xi = self._r_xi_rv.rvs(size=1)
S_xi = skew(xi)
R_sample = scipy.linalg.expm(S_xi)
t_sample = self._t_rv.rvs(size=1)
samples.append(RigidTransform(rotation=R_sample,
translation=t_sample,
from_frame=self._from_frame,
to_frame=self._to_frame))
# not a list if only 1 sample
if size == 1 and len(samples) > 0:
return samples[0]
return samples
|
Sample rigid transform random variables.
Parameters
----------
size : int
number of sample to take
Returns
-------
:obj:`list` of :obj:`RigidTransform`
sampled rigid transformations
|
def subalignment(alnfle, subtype, alntype="fasta"):
"""
Subset synonymous or fourfold degenerate sites from an alignment
input should be a codon alignment
"""
aln = AlignIO.read(alnfle, alntype)
alnlen = aln.get_alignment_length()
nseq = len(aln)
subaln = None
subalnfile = alnfle.rsplit(".", 1)[0] + "_{0}.{1}".format(subtype, alntype)
if subtype == "synonymous":
for j in range( 0, alnlen, 3 ):
aa = None
for i in range(nseq):
codon = str(aln[i, j: j + 3].seq)
if codon not in CODON_TRANSLATION:
break
if aa and CODON_TRANSLATION[codon] != aa:
break
else:
aa = CODON_TRANSLATION[codon]
else:
if subaln is None:
subaln = aln[:, j: j + 3]
else:
subaln += aln[:, j: j + 3]
if subtype == "fourfold":
for j in range( 0, alnlen, 3 ):
for i in range(nseq):
codon = str(aln[i, j: j + 3].seq)
if codon not in FOURFOLD:
break
else:
if subaln is None:
subaln = aln[:, j: j + 3]
else:
subaln += aln[:, j: j + 3]
if subaln:
AlignIO.write(subaln, subalnfile, alntype)
return subalnfile
else:
print("No sites {0} selected.".format(subtype), file=sys.stderr)
return None
|
Subset synonymous or fourfold degenerate sites from an alignment
input should be a codon alignment
|
def _get_unit_factor(cls, unit):
"""
Returns the unit factor depending on the unit constant
:param int unit: the unit of the factor requested
:returns: a function to convert the raw sensor value to the given unit
:rtype: lambda function
:raises UnsupportedUnitError: if the unit is not supported
"""
try:
if isinstance(unit, str):
unit = cls.UNIT_FACTOR_NAMES[unit]
return cls.UNIT_FACTORS[unit]
except KeyError:
raise UnsupportedUnitError()
|
Returns the unit factor depending on the unit constant
:param int unit: the unit of the factor requested
:returns: a function to convert the raw sensor value to the given unit
:rtype: lambda function
:raises UnsupportedUnitError: if the unit is not supported
|
def certificate_issuer_id(self, certificate_issuer_id):
"""
Sets the certificate_issuer_id of this CreateCertificateIssuerConfig.
The ID of the certificate issuer.
:param certificate_issuer_id: The certificate_issuer_id of this CreateCertificateIssuerConfig.
:type: str
"""
if certificate_issuer_id is None:
raise ValueError("Invalid value for `certificate_issuer_id`, must not be `None`")
if certificate_issuer_id is not None and len(certificate_issuer_id) > 32:
raise ValueError("Invalid value for `certificate_issuer_id`, length must be less than or equal to `32`")
self._certificate_issuer_id = certificate_issuer_id
|
Sets the certificate_issuer_id of this CreateCertificateIssuerConfig.
The ID of the certificate issuer.
:param certificate_issuer_id: The certificate_issuer_id of this CreateCertificateIssuerConfig.
:type: str
|
def count_replica(self, partition):
"""Return count of replicas of given partition."""
return sum(1 for b in partition.replicas if b in self.brokers)
|
Return count of replicas of given partition.
|
def frac_vol_floc_initial(ConcAluminum, ConcClay, coag, material):
"""Return the volume fraction of flocs initially present, accounting for both suspended particles and coagulant precipitates.
:param ConcAluminum: Concentration of aluminum in solution
:type ConcAluminum: float
:param ConcClay: Concentration of particle in suspension
:type ConcClay: float
:param coag: Type of coagulant in solution
:type coag: float
:param material: Type of particles in suspension, e.g. floc_model.Clay
:type material: floc_model.Material
:return: Volume fraction of particles initially present
:rtype: float
"""
return ((conc_precipitate(ConcAluminum, coag).magnitude/coag.PrecipDensity)
+ (ConcClay / material.Density))
|
Return the volume fraction of flocs initially present, accounting for both suspended particles and coagulant precipitates.
:param ConcAluminum: Concentration of aluminum in solution
:type ConcAluminum: float
:param ConcClay: Concentration of particle in suspension
:type ConcClay: float
:param coag: Type of coagulant in solution
:type coag: float
:param material: Type of particles in suspension, e.g. floc_model.Clay
:type material: floc_model.Material
:return: Volume fraction of particles initially present
:rtype: float
|
def _wrap_result(self, func):
""" Wrap result in Parser instance """
def wrapper(*args):
result = func(*args)
if hasattr(result, '__iter__') and not isinstance(result, etree._Element):
return [self._wrap_element(element) for element in result]
else:
return self._wrap_element(result)
return wrapper
|
Wrap result in Parser instance
|
def has_length(self, value, q, strict=False):
"""if value has a length of q"""
value = stringify(value)
if value is not None:
if len(value) == q:
return
self.shout('Value %r not matching length %r', strict, value, q)
|
if value has a length of q
|
def _parse_keys(row, line_num):
""" Perform some sanity checks on they keys
Each key in the row should not be named None cause
(that's an overrun). A key named `type` MUST be
present on the row & have a string value.
:param row: dict
:param line_num: int
"""
link = 'tools.ietf.org/html/rfc4180#section-2'
none_keys = [key for key in row.keys() if key is None]
if none_keys:
fail('You have more fields defined on row number {} '
'than field headers in your CSV data. Please fix '
'your request body.'.format(line_num), link)
elif not row.get('type'):
fail('Row number {} does not have a type value defined. '
'Please fix your request body.'.format(line_num), link)
|
Perform some sanity checks on they keys
Each key in the row should not be named None cause
(that's an overrun). A key named `type` MUST be
present on the row & have a string value.
:param row: dict
:param line_num: int
|
def add_post_process(self, name, post_process, description=""):
"""add a post-process
Parameters
----------
name : str
name of the post-traitment
post_process : callback (function of a class with a __call__ method
or a streamz.Stream).
this callback have to accept the simulation state as parameter
and return the modifield simulation state.
if a streamz.Stream is provided, it will me plugged_in with the
previous streamz (and ultimately to the initial_stream). All these
stream accept and return the simulation state.
description : str, optional, Default is "".
give extra information about the post-processing
"""
self._pprocesses.append(PostProcess(name=name,
function=post_process,
description=description))
self._pprocesses[-1].function(self)
|
add a post-process
Parameters
----------
name : str
name of the post-traitment
post_process : callback (function of a class with a __call__ method
or a streamz.Stream).
this callback have to accept the simulation state as parameter
and return the modifield simulation state.
if a streamz.Stream is provided, it will me plugged_in with the
previous streamz (and ultimately to the initial_stream). All these
stream accept and return the simulation state.
description : str, optional, Default is "".
give extra information about the post-processing
|
def group_get(auth=None, **kwargs):
'''
Get a single group
CLI Example:
.. code-block:: bash
salt '*' keystoneng.group_get name=group1
salt '*' keystoneng.group_get name=group2 domain_id=b62e76fbeeff4e8fb77073f591cf211e
salt '*' keystoneng.group_get name=0e4febc2a5ab4f2c8f374b054162506d
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.get_group(**kwargs)
|
Get a single group
CLI Example:
.. code-block:: bash
salt '*' keystoneng.group_get name=group1
salt '*' keystoneng.group_get name=group2 domain_id=b62e76fbeeff4e8fb77073f591cf211e
salt '*' keystoneng.group_get name=0e4febc2a5ab4f2c8f374b054162506d
|
def __add_flag (rule_or_module, variable_name, condition, values):
""" Adds a new flag setting with the specified values.
Does no checking.
"""
assert isinstance(rule_or_module, basestring)
assert isinstance(variable_name, basestring)
assert is_iterable_typed(condition, property_set.PropertySet)
assert is_iterable(values) and all(
isinstance(v, (basestring, type(None))) for v in values)
f = Flag(variable_name, values, condition, rule_or_module)
# Grab the name of the module
m = __re_first_segment.match (rule_or_module)
assert m
module = m.group(1)
__module_flags.setdefault(module, []).append(f)
__flags.setdefault(rule_or_module, []).append(f)
|
Adds a new flag setting with the specified values.
Does no checking.
|
def do_down(self, arg):
"""d(own) [count]
Move the current frame count (default one) levels down in the
stack trace (to a newer frame).
"""
if self.curindex + 1 == len(self.stack):
self.error('Newest frame')
return
try:
count = int(arg or 1)
except ValueError:
self.error('Invalid frame count (%s)' % arg)
return
if count < 0:
newframe = len(self.stack) - 1
else:
newframe = min(len(self.stack) - 1, self.curindex + count)
self._select_frame(newframe)
|
d(own) [count]
Move the current frame count (default one) levels down in the
stack trace (to a newer frame).
|
def FromBinary(cls, record_data, record_count=1):
"""Create an UpdateRecord subclass from binary record data.
This should be called with a binary record blob (NOT including the
record type header) and it will decode it into a ReflashTileRecord.
Args:
record_data (bytearray): The raw record data that we wish to parse
into an UpdateRecord subclass NOT including its 8 byte record header.
record_count (int): The number of records included in record_data.
Raises:
ArgumentError: If the record_data is malformed and cannot be parsed.
Returns:
ReflashTileRecord: The decoded reflash tile record.
"""
if len(record_data) < ReflashTileRecord.RecordHeaderLength:
raise ArgumentError("Record was too short to contain a full reflash record header",
length=len(record_data), header_length=ReflashTileRecord.RecordHeaderLength)
offset, data_length, raw_target, hardware_type = struct.unpack_from("<LL8sB3x", record_data)
bindata = record_data[ReflashTileRecord.RecordHeaderLength:]
if len(bindata) != data_length:
raise ArgumentError("Embedded firmware length did not agree with actual length of embeded data",
length=len(bindata), embedded_length=data_length)
target = _parse_target(raw_target)
if target['controller']:
raise ArgumentError("Invalid targetting information, you "
"cannot reflash a controller with a ReflashTileRecord", target=target)
return ReflashTileRecord(target['slot'], bindata, offset, hardware_type)
|
Create an UpdateRecord subclass from binary record data.
This should be called with a binary record blob (NOT including the
record type header) and it will decode it into a ReflashTileRecord.
Args:
record_data (bytearray): The raw record data that we wish to parse
into an UpdateRecord subclass NOT including its 8 byte record header.
record_count (int): The number of records included in record_data.
Raises:
ArgumentError: If the record_data is malformed and cannot be parsed.
Returns:
ReflashTileRecord: The decoded reflash tile record.
|
def check_bearer_validity(self, token: dict, connect_mtd) -> dict:
"""Check API Bearer token validity.
Isogeo ID delivers authentication bearers which are valid during
a certain time. So this method checks the validity of the token
with a 30 mn anticipation limit, and renews it if necessary.
See: http://tools.ietf.org/html/rfc6750#section-2
FI: 24h = 86400 seconds, 30 mn = 1800, 5 mn = 300
:param tuple token: auth bearer to check.
Structure: (bearer, expiration_date)
:param isogeo_pysdk.connect connect_mtd: method herited
from Isogeo PySDK to get new bearer
"""
warnings.warn(
"Method is now executed as a decorator within the main SDK class. Will be removed in future versions.",
DeprecationWarning,
)
if datetime.now() < token.get("expires_at"):
token = connect_mtd
logging.debug("Token was about to expire, so has been renewed.")
else:
logging.debug("Token is still valid.")
pass
# end of method
return token
|
Check API Bearer token validity.
Isogeo ID delivers authentication bearers which are valid during
a certain time. So this method checks the validity of the token
with a 30 mn anticipation limit, and renews it if necessary.
See: http://tools.ietf.org/html/rfc6750#section-2
FI: 24h = 86400 seconds, 30 mn = 1800, 5 mn = 300
:param tuple token: auth bearer to check.
Structure: (bearer, expiration_date)
:param isogeo_pysdk.connect connect_mtd: method herited
from Isogeo PySDK to get new bearer
|
def new_comment(self, string, start, end, line):
""" Possibly add a new comment.
Only adds a new comment if this comment is the only thing on the line.
Otherwise, it extends the noncomment block.
"""
prefix = line[:start[1]]
if prefix.strip():
# Oops! Trailing comment, not a comment block.
self.current_block.add(string, start, end, line)
else:
# A comment block.
block = Comment(start[0], end[0], string)
self.blocks.append(block)
self.current_block = block
|
Possibly add a new comment.
Only adds a new comment if this comment is the only thing on the line.
Otherwise, it extends the noncomment block.
|
def is_usable(host, port, timeout=3):
"""测试代理是否可用
params
----------
host ip地址
port 端口号
timeout 默认值为3,通过设置这个参数可以过滤掉一些速度慢的代理
example
----------
is_usable('222.180.24.13', '808', timeout=3)
"""
try:
proxies = {
'http': 'http://%s:%s' % (host, port),
'https': 'https://%s:%s' % (host, port)
}
requests.get('http://www.baidu.com/', proxies=proxies, timeout=timeout)
except HTTPError:
print('failed: ', host, port)
return False
else:
print('success: ', host, port)
return True
|
测试代理是否可用
params
----------
host ip地址
port 端口号
timeout 默认值为3,通过设置这个参数可以过滤掉一些速度慢的代理
example
----------
is_usable('222.180.24.13', '808', timeout=3)
|
def dollars_to_cents(s, allow_negative=False):
"""
Given a string or integer representing dollars, return an integer of
equivalent cents, in an input-resilient way.
This works by stripping any non-numeric characters before attempting to
cast the value.
Examples::
>>> dollars_to_cents('$1')
100
>>> dollars_to_cents('1')
100
>>> dollars_to_cents(1)
100
>>> dollars_to_cents('1e2')
10000
>>> dollars_to_cents('-1$', allow_negative=True)
-100
>>> dollars_to_cents('1 dollar')
100
"""
# TODO: Implement cents_to_dollars
if not s:
return
if isinstance(s, string_types):
s = ''.join(RE_NUMBER.findall(s))
dollars = int(round(float(s) * 100))
if not allow_negative and dollars < 0:
raise ValueError('Negative values not permitted.')
return dollars
|
Given a string or integer representing dollars, return an integer of
equivalent cents, in an input-resilient way.
This works by stripping any non-numeric characters before attempting to
cast the value.
Examples::
>>> dollars_to_cents('$1')
100
>>> dollars_to_cents('1')
100
>>> dollars_to_cents(1)
100
>>> dollars_to_cents('1e2')
10000
>>> dollars_to_cents('-1$', allow_negative=True)
-100
>>> dollars_to_cents('1 dollar')
100
|
def get_credits_by_section_and_regid(section, regid):
"""
Returns a uw_sws.models.Registration object
for the section and regid passed in.
"""
deprecation("Use get_credits_by_reg_url")
# note trailing comma in URL, it's required for the optional dup_code param
url = "{}{},{},{},{},{},{},.json".format(
reg_credits_url_prefix,
section.term.year,
section.term.quarter,
re.sub(' ', '%20', section.curriculum_abbr),
section.course_number,
section.section_id,
regid
)
reg_data = get_resource(url)
try:
return Decimal(reg_data['Credits'].strip())
except InvalidOperation:
pass
|
Returns a uw_sws.models.Registration object
for the section and regid passed in.
|
def access_func(self, id_, lineno, scope=None, default_type=None):
"""
Since ZX BASIC allows access to undeclared functions, we must allow
and *implicitly* declare them if they are not declared already.
This function just checks if the id_ exists and returns its entry if so.
Otherwise, creates an implicit declared variable entry and returns it.
"""
assert default_type is None or isinstance(default_type, symbols.TYPEREF)
result = self.get_entry(id_, scope)
if result is None:
if default_type is None:
if global_.DEFAULT_IMPLICIT_TYPE == TYPE.auto:
default_type = symbols.TYPEREF(self.basic_types[TYPE.auto], lineno, implicit=True)
else:
default_type = symbols.TYPEREF(self.basic_types[global_.DEFAULT_TYPE], lineno, implicit=True)
return self.declare_func(id_, lineno, default_type)
if not self.check_class(id_, CLASS.function, lineno, scope):
return None
return result
|
Since ZX BASIC allows access to undeclared functions, we must allow
and *implicitly* declare them if they are not declared already.
This function just checks if the id_ exists and returns its entry if so.
Otherwise, creates an implicit declared variable entry and returns it.
|
def append(self, cert):
"""
Appends a cert to the path. This should be a cert issued by the last
cert in the path.
:param cert:
An asn1crypto.x509.Certificate object
:return:
The current ValidationPath object, for chaining
"""
if not isinstance(cert, x509.Certificate):
if not isinstance(cert, byte_cls):
raise TypeError(pretty_message(
'''
cert must be a byte string or an
asn1crypto.x509.Certificate object, not %s
''',
type_name(cert)
))
if pem.detect(cert):
_, _, cert = pem.unarmor(cert)
cert = x509.Certificate.load(cert)
if cert.issuer_serial in self._cert_hashes:
raise DuplicateCertificateError()
self._cert_hashes.add(cert.issuer_serial)
self._certs.append(cert)
return self
|
Appends a cert to the path. This should be a cert issued by the last
cert in the path.
:param cert:
An asn1crypto.x509.Certificate object
:return:
The current ValidationPath object, for chaining
|
def process_user_input(self):
"""
Gets the next single character and decides what to do with it
"""
user_input = self.get_input()
try:
num = int(user_input)
except Exception:
return
if 0 < num < len(self.items) + 1:
self.current_option = num - 1
self.select()
return user_input
|
Gets the next single character and decides what to do with it
|
def truncate(text, length=50, ellipsis='...'):
"""
Returns a truncated version of the inputted text.
:param text | <str>
length | <int>
ellipsis | <str>
:return <str>
"""
text = nativestring(text)
return text[:length] + (text[length:] and ellipsis)
|
Returns a truncated version of the inputted text.
:param text | <str>
length | <int>
ellipsis | <str>
:return <str>
|
def minimum_pitch(self):
""" Returns the minimal pitch between two neighboring nodes of the mesh in each direction.
:return: Minimal pitch in each direction.
"""
pitch = self.pitch
minimal_pitch = []
for p in pitch:
minimal_pitch.append(min(p))
return min(minimal_pitch)
|
Returns the minimal pitch between two neighboring nodes of the mesh in each direction.
:return: Minimal pitch in each direction.
|
def date_time(self, tzinfo=None, end_datetime=None):
"""
Get a datetime object for a date between January 1, 1970 and now
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2005-08-16 20:39:21')
:return datetime
"""
# NOTE: On windows, the lowest value you can get from windows is 86400
# on the first day. Known python issue:
# https://bugs.python.org/issue30684
return datetime(1970, 1, 1, tzinfo=tzinfo) + \
timedelta(seconds=self.unix_time(end_datetime=end_datetime))
|
Get a datetime object for a date between January 1, 1970 and now
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2005-08-16 20:39:21')
:return datetime
|
def advance(self, length):
"""Advance the cursor in data buffer 'length' bytes."""
new_position = self._position + length
if new_position < 0 or new_position > len(self._data):
raise Exception('Invalid advance amount (%s) for cursor. '
'Position=%s' % (length, new_position))
self._position = new_position
|
Advance the cursor in data buffer 'length' bytes.
|
def send_command_return_multilines(self, obj, command, *arguments):
""" Send command and wait for multiple lines output. """
index_command = obj._build_index_command(command, *arguments)
return self.chassis_list[obj.chassis].sendQuery(index_command, True)
|
Send command and wait for multiple lines output.
|
def run_symmetrized_readout(self, program: Program, trials: int) -> np.ndarray:
"""
Run a quil program in such a way that the readout error is made collectively symmetric
This means the probability of a bitstring ``b`` being mistaken for a bitstring ``c`` is
the same as the probability of ``not(b)`` being mistaken for ``not(c)``
A more general symmetrization would guarantee that the probability of ``b`` being
mistaken for ``c`` depends only on which bit of ``c`` are different from ``b``. This
would require choosing random subsets of bits to flip.
In a noisy device, the probability of accurately reading the 0 state might be higher
than that of the 1 state. This makes correcting for readout more difficult. This
function runs the program normally ``(trials//2)`` times. The other half of the time,
it will insert an ``X`` gate prior to any ``MEASURE`` instruction and then flip the
measured classical bit back.
See :py:func:`run` for this function's parameter descriptions.
"""
flipped_program = _get_flipped_protoquil_program(program)
if trials % 2 != 0:
raise ValueError("Using symmetrized measurement functionality requires that you "
"take an even number of trials.")
half_trials = trials // 2
flipped_program = flipped_program.wrap_in_numshots_loop(shots=half_trials)
flipped_executable = self.compile(flipped_program)
executable = self.compile(program.wrap_in_numshots_loop(half_trials))
samples = self.run(executable)
flipped_samples = self.run(flipped_executable)
double_flipped_samples = np.logical_not(flipped_samples).astype(int)
results = np.concatenate((samples, double_flipped_samples), axis=0)
np.random.shuffle(results)
return results
|
Run a quil program in such a way that the readout error is made collectively symmetric
This means the probability of a bitstring ``b`` being mistaken for a bitstring ``c`` is
the same as the probability of ``not(b)`` being mistaken for ``not(c)``
A more general symmetrization would guarantee that the probability of ``b`` being
mistaken for ``c`` depends only on which bit of ``c`` are different from ``b``. This
would require choosing random subsets of bits to flip.
In a noisy device, the probability of accurately reading the 0 state might be higher
than that of the 1 state. This makes correcting for readout more difficult. This
function runs the program normally ``(trials//2)`` times. The other half of the time,
it will insert an ``X`` gate prior to any ``MEASURE`` instruction and then flip the
measured classical bit back.
See :py:func:`run` for this function's parameter descriptions.
|
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists it will be returned as multiple
ranges.
Returns:
ranges, singles where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
|
Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists it will be returned as multiple
ranges.
Returns:
ranges, singles where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
|
def arrow_(self, xloc, yloc, text, orientation="v", arrowstyle='->'):
"""
Returns an arrow for a chart. Params: the text, xloc and yloc are
coordinates to position the arrow. Orientation is the way to display
the arrow: possible values are ``[<, ^, >, v]``. Arrow style is the
graphic style of the arrow:
possible values: ``[-, ->, -[, -|>, <->, <|-|>]``
"""
try:
arrow = hv.Arrow(
xloc,
yloc,
text,
orientation,
arrowstyle=arrowstyle)
return arrow
except Exception as e:
self.err(e, self.arrow_, "Can not draw arrow chart")
|
Returns an arrow for a chart. Params: the text, xloc and yloc are
coordinates to position the arrow. Orientation is the way to display
the arrow: possible values are ``[<, ^, >, v]``. Arrow style is the
graphic style of the arrow:
possible values: ``[-, ->, -[, -|>, <->, <|-|>]``
|
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
|
Delete given loc(-s) from block in-place.
|
def create_audio_mp3_profile(apps, schema_editor):
""" Create audio_mp3 profile """
Profile = apps.get_model('edxval', 'Profile')
Profile.objects.get_or_create(profile_name=AUDIO_MP3_PROFILE)
|
Create audio_mp3 profile
|
def _make_association(self, clk=None, rst=None) -> None:
"""
Associate this object with specified clk/rst
"""
if clk is not None:
assert self._associatedClk is None
self._associatedClk = clk
if rst is not None:
assert self._associatedRst is None
self._associatedRst = rst
|
Associate this object with specified clk/rst
|
def _write(self, data):
"""
Writes string data out to Scratch
"""
total_sent = 0
length = len(data)
while total_sent < length:
try:
sent = self.socket.send(data[total_sent:])
except socket.error as (err, msg):
self.connected = False
raise ScratchError("[Errno %d] %s" % (err, msg))
if sent == 0:
self.connected = False
raise ScratchConnectionError("Connection broken")
total_sent += sent
|
Writes string data out to Scratch
|
def update(cls, cluster_id_label, cluster_info):
"""
Update the cluster with id/label `cluster_id_label` using information provided in
`cluster_info`.
"""
conn = Qubole.agent(version="v2")
return conn.put(cls.element_path(cluster_id_label), data=cluster_info)
|
Update the cluster with id/label `cluster_id_label` using information provided in
`cluster_info`.
|
def get_path_matching(name):
"""Get path matching a name.
Parameters
----------
name : string
Name to search for.
Returns
-------
string
Full filepath.
"""
# first try looking in the user folder
p = os.path.join(os.path.expanduser("~"), name)
# then try expanding upwards from cwd
if not os.path.isdir(p):
p = None
drive, folders = os.path.splitdrive(os.getcwd())
folders = folders.split(os.sep)
folders.insert(0, os.sep)
if name in folders:
p = os.path.join(drive, *folders[: folders.index(name) + 1])
# TODO: something more robust to catch the rest of the cases?
return p
|
Get path matching a name.
Parameters
----------
name : string
Name to search for.
Returns
-------
string
Full filepath.
|
def _pypi_head_package(dependency):
"""Hit pypi with a http HEAD to check if pkg_name exists."""
if dependency.specs:
_, version = dependency.specs[0]
url = BASE_PYPI_URL_WITH_VERSION.format(name=dependency.project_name, version=version)
else:
url = BASE_PYPI_URL.format(name=dependency.project_name)
logger.debug("Doing HEAD requests against %s", url)
req = request.Request(url, method='HEAD')
try:
response = request.urlopen(req)
except HTTPError as http_error:
if http_error.code == HTTP_STATUS_NOT_FOUND:
return False
else:
raise
if response.status == HTTP_STATUS_OK:
logger.debug("%r exists in PyPI.", dependency)
return True
else:
# Maybe we are getting somethink like a redirect. In this case we are only
# warning to the user and trying to install the dependency.
# In the worst scenery fades will fail to install it.
logger.warning("Got a (unexpected) HTTP_STATUS=%r and reason=%r checking if %r exists",
response.status, response.reason, dependency)
return True
|
Hit pypi with a http HEAD to check if pkg_name exists.
|
def download(self, path, file):
"""Download remote file to disk."""
resp = self._sendRequest("GET", path)
if resp.status_code == 200:
with open(file, "wb") as f:
f.write(resp.content)
else:
raise YaDiskException(resp.status_code, resp.content)
|
Download remote file to disk.
|
def _build_index(self):
"""Itera todos los datasets, distribucioens y fields indexandolos."""
datasets_index = {}
distributions_index = {}
fields_index = {}
# recorre todos los datasets
for dataset_index, dataset in enumerate(self.datasets):
if "identifier" in dataset:
datasets_index[dataset["identifier"]] = {
"dataset_index": dataset_index
}
# recorre las distribuciones del dataset
for distribution_index, distribution in enumerate(
dataset.get("distribution", [])):
if "identifier" in distribution:
distributions_index[distribution["identifier"]] = {
"distribution_index": distribution_index,
"dataset_identifier": dataset["identifier"]
}
# recorre los fields de la distribucion
for field_index, field in enumerate(
distribution.get("field", [])):
if "id" in field:
fields_index[field["id"]] = {
"field_index":
field_index,
"dataset_identifier":
dataset["identifier"],
"distribution_identifier":
distribution["identifier"]
}
setattr(self, "_distributions_index", distributions_index)
setattr(self, "_datasets_index", datasets_index)
setattr(self, "_fields_index", fields_index)
|
Itera todos los datasets, distribucioens y fields indexandolos.
|
def get_ajax(self, request, *args, **kwargs):
""" Called when accessed via AJAX on the request method specified by the Datatable. """
response_data = self.get_json_response_object(self._datatable)
response = HttpResponse(self.serialize_to_json(response_data),
content_type="application/json")
return response
|
Called when accessed via AJAX on the request method specified by the Datatable.
|
def create_network(self):
"""Create a new network by reading the configuration file."""
class_ = getattr(networks, self.network_class)
return class_(max_size=self.quorum)
|
Create a new network by reading the configuration file.
|
def __r1_hungarian(self, word, vowels, digraphs):
"""
Return the region R1 that is used by the Hungarian stemmer.
If the word begins with a vowel, R1 is defined as the region
after the first consonant or digraph (= two letters stand for
one phoneme) in the word. If the word begins with a consonant,
it is defined as the region after the first vowel in the word.
If the word does not contain both a vowel and consonant, R1
is the null region at the end of the word.
:param word: The Hungarian word whose region R1 is determined.
:type word: str or unicode
:param vowels: The Hungarian vowels that are used to determine
the region R1.
:type vowels: unicode
:param digraphs: The digraphs that are used to determine the
region R1.
:type digraphs: tuple
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
HungarianStemmer. It is not to be invoked directly!
"""
r1 = ""
if word[0] in vowels:
for digraph in digraphs:
if digraph in word[1:]:
r1 = word[word.index(digraph[-1])+1:]
return r1
for i in range(1, len(word)):
if word[i] not in vowels:
r1 = word[i+1:]
break
else:
for i in range(1, len(word)):
if word[i] in vowels:
r1 = word[i+1:]
break
return r1
|
Return the region R1 that is used by the Hungarian stemmer.
If the word begins with a vowel, R1 is defined as the region
after the first consonant or digraph (= two letters stand for
one phoneme) in the word. If the word begins with a consonant,
it is defined as the region after the first vowel in the word.
If the word does not contain both a vowel and consonant, R1
is the null region at the end of the word.
:param word: The Hungarian word whose region R1 is determined.
:type word: str or unicode
:param vowels: The Hungarian vowels that are used to determine
the region R1.
:type vowels: unicode
:param digraphs: The digraphs that are used to determine the
region R1.
:type digraphs: tuple
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
HungarianStemmer. It is not to be invoked directly!
|
def delete(self, photo, **kwds):
"""
Endpoint: /photo/<id>/delete.json
Deletes a photo.
Returns True if successful.
Raises a TroveboxError if not.
"""
return self._client.post("/photo/%s/delete.json" %
self._extract_id(photo),
**kwds)["result"]
|
Endpoint: /photo/<id>/delete.json
Deletes a photo.
Returns True if successful.
Raises a TroveboxError if not.
|
def extern_project_multi(self, context_handle, val, field_str_ptr, field_str_len):
"""Given a Key for `obj`, and a field name, project the field as a list of Keys."""
c = self._ffi.from_handle(context_handle)
obj = c.from_value(val[0])
field_name = self.to_py_str(field_str_ptr, field_str_len)
return c.vals_buf(tuple(c.to_value(p) for p in getattr(obj, field_name)))
|
Given a Key for `obj`, and a field name, project the field as a list of Keys.
|
def to_excel(self,
workbook=None,
worksheet=None,
xl_app=None,
clear=True,
rename=True,
resize_columns=True):
"""
Writes worksheet to an Excel Worksheet COM object.
Requires :py:module:`pywin32` to be installed.
:param workbook: xltable.Workbook this sheet belongs to.
:param worksheet: Excel COM Worksheet instance to write to.
:param xl_app: Excel COM Excel Application to write to.
:param bool clear: If a worksheet is provided, clear worksheet before writing.
:param bool rename: If a worksheet is provided, rename self to match the worksheet.
:param bool resize_columns: Resize sheet columns after writing.
"""
from win32com.client import Dispatch, constants, gencache
if xl_app is None:
if worksheet is not None:
xl_app = worksheet.Parent.Application
elif workbook is not None and hasattr(workbook.workbook_obj, "Application"):
xl_app = workbook.workbook_obj.Application
else:
xl_app = Dispatch("Excel.Application")
xl = xl_app = gencache.EnsureDispatch(xl_app)
# Create a workbook if there isn't one already
if not workbook:
from .workbook import Workbook
workbook = Workbook(worksheets=[self])
if worksheet is None:
# If there's no worksheet then call Workbook.to_excel which will create one
return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns)
if rename:
self.__name = worksheet.Name
# set manual calculation and turn off screen updating while we update the cells
calculation = xl.Calculation
screen_updating = xl.ScreenUpdating
xl.Calculation = constants.xlCalculationManual
xl.ScreenUpdating = False
try:
# clear the worksheet and reset the styles
if clear:
worksheet.Cells.ClearContents()
worksheet.Cells.Font.Bold = False
worksheet.Cells.Font.Size = 11
worksheet.Cells.Font.Color = 0x000000
worksheet.Cells.Interior.ColorIndex = 0
worksheet.Cells.NumberFormat = "General"
# get any array formula tables
array_formula_tables = []
for table, (row, col) in self.__tables.values():
if isinstance(table, ArrayFormula):
array_formula_tables.append((row, col, row + table.height, col + table.width))
def _is_in_array_formula_table(row, col):
"""returns True if this formula cell is part of an array formula table"""
for top, left, bottom, right in array_formula_tables:
if bottom >= row >= top and left <= col <= right:
return True
return False
origin = worksheet.Range("A1")
xl_cell = origin
for r, row in enumerate(self.iterrows(workbook)):
row = _to_pywintypes(row)
# set the value and formulae to the excel range (it's much quicker to
# write a row at a time and update the formula than it is it do it
# cell by cell)
if clear:
xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row)))
xl_row.Value = row
else:
for c, value in enumerate(row):
if value is not None:
xl_cell.Offset(1, 1 + c).Value = value
for c, value in enumerate(row):
if isinstance(value, str):
if value.startswith("="):
formula_value = self.__formula_values.get((r, c), 0)
xl_cell.Offset(1, 1 + c).Value = formula_value
xl_cell.Offset(1, 1 + c).Formula = value
elif value.startswith("{=") \
and not _is_in_array_formula_table(r, c):
formula_value = self.__formula_values.get((r, c), 0)
xl_cell.Offset(1, 1 + c).Value = formula_value
xl_cell.Offset(1, 1 + c).FormulaArray = value
# move to the next row
xl_cell = xl_cell.Offset(2, 1)
# set any array formulas
for table, (row, col) in self.__tables.values():
if isinstance(table, ArrayFormula):
data = table.get_data(workbook, row, col)
height, width = data.shape
upper_left = origin.Offset(row+1, col+1)
lower_right = origin.Offset(row + height, col + width)
xl_range = worksheet.Range(upper_left, lower_right)
xl_range.FormulaArray = table.formula.get_formula(workbook, row, col)
# set any formatting
for (row, col), style in self._get_all_styles().items():
r = origin.Offset(1 + row, 1 + col)
if style.bold:
r.Font.Bold = True
if style.excel_number_format is not None:
r.NumberFormat = style.excel_number_format
if style.size is not None:
r.Font.Size = style.size
if style.text_color is not None:
r.Font.Color = _to_bgr(style.text_color)
if style.bg_color is not None:
r.Interior.Color = _to_bgr(style.bg_color)
if style.text_wrap or style.border:
raise Exception("text wrap and border not implemented")
# add any charts
for chart, (row, col) in self.__charts:
top_left = origin.Offset(1 + row, 1 + col)
xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart
xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype)
if chart.title:
xl_chart.ChartTitle = chart.title
for series in chart.iter_series(self, row, col):
xl_series = xl_chart.SeriesCollection().NewSeries()
xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("="))
if "categories" in series:
xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("="))
if "name" in series:
xl_series.Name = series["name"]
finally:
xl.ScreenUpdating = screen_updating
xl.Calculation = calculation
if resize_columns:
try:
worksheet.Cells.EntireColumn.AutoFit()
except:
pass
|
Writes worksheet to an Excel Worksheet COM object.
Requires :py:module:`pywin32` to be installed.
:param workbook: xltable.Workbook this sheet belongs to.
:param worksheet: Excel COM Worksheet instance to write to.
:param xl_app: Excel COM Excel Application to write to.
:param bool clear: If a worksheet is provided, clear worksheet before writing.
:param bool rename: If a worksheet is provided, rename self to match the worksheet.
:param bool resize_columns: Resize sheet columns after writing.
|
def content(self):
"""Function returns the body of the as2 payload as a bytes object"""
if not self.payload:
return ''
if self.payload.is_multipart():
message_bytes = mime_to_bytes(
self.payload, 0).replace(b'\n', b'\r\n')
boundary = b'--' + self.payload.get_boundary().encode('utf-8')
temp = message_bytes.split(boundary)
temp.pop(0)
return boundary + boundary.join(temp)
else:
content = self.payload.get_payload()
if isinstance(content, str_cls):
content = content.encode('utf-8')
return content
|
Function returns the body of the as2 payload as a bytes object
|
def parse(out):
'''
Extract json from out.
Parameter
out: Type string. The data returned by the
ssh command.
'''
jsonret = []
in_json = False
for ln_ in out.split('\n'):
if '{' in ln_:
in_json = True
if in_json:
jsonret.append(ln_)
if '}' in ln_:
in_json = False
return salt.utils.json.loads('\n'.join(jsonret))
|
Extract json from out.
Parameter
out: Type string. The data returned by the
ssh command.
|
def filtered_context(context):
"""Filters a context
This will return a new context with only the resources that
are actually available for use. Uses tags and command line
options to make determination."""
ctx = Context(context.opt)
for resource in context.resources():
if resource.child:
continue
if resource.filtered():
ctx.add(resource)
return ctx
|
Filters a context
This will return a new context with only the resources that
are actually available for use. Uses tags and command line
options to make determination.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.