code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def write_file_to_zip_with_neutral_metadata(zfile, filename, content):
"""
Write the string `content` to `filename` in the open ZipFile `zfile`.
Args:
zfile (ZipFile): open ZipFile to write the content into
filename (str): the file path within the zip file to write into
content (str): the content to write into the zip
Returns: None
"""
info = zipfile.ZipInfo(filename, date_time=(2015, 10, 21, 7, 28, 0))
info.compress_type = zipfile.ZIP_DEFLATED
info.comment = "".encode()
info.create_system = 0
zfile.writestr(info, content)
|
Write the string `content` to `filename` in the open ZipFile `zfile`.
Args:
zfile (ZipFile): open ZipFile to write the content into
filename (str): the file path within the zip file to write into
content (str): the content to write into the zip
Returns: None
|
def next(cls):
"""Return next available record identifier."""
try:
with db.session.begin_nested():
obj = cls()
db.session.add(obj)
except IntegrityError: # pragma: no cover
with db.session.begin_nested():
# Someone has likely modified the table without using the
# models API. Let's fix the problem.
cls._set_sequence(cls.max())
obj = cls()
db.session.add(obj)
return obj.recid
|
Return next available record identifier.
|
def upload_html(destination, html, name=None):
"""
Uploads the HTML to a file on the server
"""
[project, path, n] = parse_destination(destination)
try:
dxfile = dxpy.upload_string(html, media_type="text/html", project=project, folder=path, hidden=True, name=name or None)
return dxfile.get_id()
except dxpy.DXAPIError as ex:
parser.error("Could not upload HTML report to DNAnexus server! ({ex})".format(ex=ex))
|
Uploads the HTML to a file on the server
|
def call_for_each_tower(
towers, func, devices=None, use_vs=None):
"""
Run `func` on all GPUs (towers) and return the results.
Args:
towers (list[int]): a list of GPU id.
func: a lambda to be called inside each tower
devices: a list of devices to be used. By default will use '/gpu:{tower}'
use_vs (list[bool]): list of use_vs to passed to TowerContext
Returns:
List of outputs of ``func``, evaluated on each tower.
"""
ret = []
if devices is not None:
assert len(devices) == len(towers)
if use_vs is not None:
assert len(use_vs) == len(towers)
tower_names = ['tower{}'.format(idx) for idx in range(len(towers))]
for idx, t in enumerate(towers):
device = devices[idx] if devices is not None else '/gpu:{}'.format(t)
usevs = use_vs[idx] if use_vs is not None else False
reuse = not usevs and idx > 0
with tfv1.device(device), _maybe_reuse_vs(reuse), TrainTowerContext(
tower_names[idx],
vs_name=tower_names[idx] if usevs else '',
index=idx, total=len(towers)):
if len(str(device)) < 10: # a device function doesn't have good string description
logger.info("Building graph for training tower {} on device {} ...".format(idx, device))
else:
logger.info("Building graph for training tower {} ...".format(idx))
# When use_vs is True, use LOCAL_VARIABLES,
# so these duplicated variables won't be saved by default.
with override_to_local_variable(enable=usevs):
ret.append(func())
return ret
|
Run `func` on all GPUs (towers) and return the results.
Args:
towers (list[int]): a list of GPU id.
func: a lambda to be called inside each tower
devices: a list of devices to be used. By default will use '/gpu:{tower}'
use_vs (list[bool]): list of use_vs to passed to TowerContext
Returns:
List of outputs of ``func``, evaluated on each tower.
|
def split_and_strip_without(string, exclude, separator_regexp=None):
"""Split a string into items, and trim any excess spaces
Any items in exclude are not in the returned list
>>> split_and_strip_without('fred, was, here ', ['was'])
['fred', 'here']
"""
result = split_and_strip(string, separator_regexp)
if not exclude:
return result
return [x for x in result if x not in exclude]
|
Split a string into items, and trim any excess spaces
Any items in exclude are not in the returned list
>>> split_and_strip_without('fred, was, here ', ['was'])
['fred', 'here']
|
def dfs_postorder(self, reverse=False):
"""Generator that returns each element of the tree in Postorder order.
Keyword arguments:
reverse -- if true, the search is done from right to left."""
stack = deque()
stack.append(self)
visited = set()
while stack:
node = stack.pop()
if node in visited:
yield node
else:
visited.add(node)
stack.append(node)
if hasattr(node, "childs"):
if reverse:
stack.extend(node.childs)
else:
stack.extend(node.childs[::-1])
|
Generator that returns each element of the tree in Postorder order.
Keyword arguments:
reverse -- if true, the search is done from right to left.
|
def match(self, pattern):
"""Perform regex match at index."""
m = pattern.match(self._string, self._index)
if m:
self._index = m.end()
return m
|
Perform regex match at index.
|
def tredparse(args):
"""
%prog tredparse
Compare performances of various variant callers on simulated STR datasets.
Adds coverage comparisons as panel C and D.
"""
p = OptionParser(tredparse.__doc__)
p.add_option('--maxinsert', default=300, type="int",
help="Maximum number of repeats")
add_simulate_options(p)
opts, args, iopts = p.set_image_options(args, figsize="10x10")
if len(args) != 0:
sys.exit(not p.print_help())
depth = opts.depth
max_insert = opts.maxinsert
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(ncols=2, nrows=2,
figsize=(iopts.w, iopts.h))
plt.tight_layout(pad=3)
# ax1: lobSTR vs TREDPARSE with haploid model
lobstr_results = parse_results("lobstr_results_homo-20x-150bp-500bp.txt")
tredparse_results = parse_results("tredparse_results_homo-20x-150bp-500bp.txt")
title = SIMULATED_HAPLOID + r" (Depth=$%s\times$)" % depth
plot_compare(ax1, title, tredparse_results, lobstr_results,
max_insert=max_insert)
# ax2: lobSTR vs TREDPARSE with diploid model (depth=20x)
lobstr_results = parse_results("lobstr_results_het-20x-150bp-500bp.txt", exclude=20)
tredparse_results = parse_results("tredparse_results_het-20x-150bp-500bp.txt", exclude=20)
title = SIMULATED_DIPLOID + r" (Depth=$%s\times$)" % depth
plot_compare(ax2, title, tredparse_results, lobstr_results,
max_insert=max_insert)
# ax3: lobSTR vs TREDPARSE with diploid model (depth=5x)
lobstr_results = parse_results("lobstr_results_het-5x-150bp-500bp.txt", exclude=20)
tredparse_results = parse_results("tredparse_results_het-5x-150bp-500bp.txt", exclude=20)
title = SIMULATED_DIPLOID + r" (Depth=$%s\times$)" % 5
plot_compare(ax3, title, tredparse_results, lobstr_results,
max_insert=max_insert)
# ax4: lobSTR vs TREDPARSE with diploid model (depth=80x)
lobstr_results = parse_results("lobstr_results_het-80x-150bp-500bp.txt", exclude=20)
tredparse_results = parse_results("tredparse_results_het-80x-150bp-500bp.txt", exclude=20)
title = SIMULATED_DIPLOID + r" (Depth=$%s\times$)" % 80
plot_compare(ax4, title, tredparse_results, lobstr_results,
max_insert=max_insert)
for ax in (ax1, ax2, ax3, ax4):
ax.set_xlim(0, max_insert)
ax.set_ylim(0, max_insert)
root = fig.add_axes([0, 0, 1, 1])
pad = .03
panel_labels(root, ((pad / 2, 1 - pad, "A"), (1 / 2., 1 - pad, "B"),
(pad / 2, 1 / 2. , "C"), (1 / 2., 1 / 2. , "D")))
normalize_axes(root)
image_name = "tredparse." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
|
%prog tredparse
Compare performances of various variant callers on simulated STR datasets.
Adds coverage comparisons as panel C and D.
|
def _common_query_parameters(self, doc_type, includes, owner,
promulgated_only, series, sort):
'''
Extract common query parameters between search and list into slice.
@param includes What metadata to return in results (e.g. charm-config).
@param doc_type Filter to this type: bundle or charm.
@param promulgated_only Whether to filter to only promulgated charms.
@param sort Sorting the result based on the sort string provided
which can be name, author, series and - in front for descending.
@param owner Optional owner. If provided, search results will only
include entities that owner can view.
@param series The series to filter; can be a list of series or a
single series.
'''
queries = []
if includes is not None:
queries.extend([('include', include) for include in includes])
if doc_type is not None:
queries.append(('type', doc_type))
if promulgated_only:
queries.append(('promulgated', 1))
if owner is not None:
queries.append(('owner', owner))
if series is not None:
if type(series) is list:
series = ','.join(series)
queries.append(('series', series))
if sort is not None:
queries.append(('sort', sort))
return queries
|
Extract common query parameters between search and list into slice.
@param includes What metadata to return in results (e.g. charm-config).
@param doc_type Filter to this type: bundle or charm.
@param promulgated_only Whether to filter to only promulgated charms.
@param sort Sorting the result based on the sort string provided
which can be name, author, series and - in front for descending.
@param owner Optional owner. If provided, search results will only
include entities that owner can view.
@param series The series to filter; can be a list of series or a
single series.
|
def divrank_scipy(G, alpha=0.25, d=0.85, personalization=None,
max_iter=100, tol=1.0e-6, nstart=None, weight='weight',
dangling=None):
'''
Returns the DivRank (Diverse Rank) of the nodes in the graph.
This code is based on networkx.pagerank_scipy
'''
import scipy.sparse
N = len(G)
if N == 0:
return {}
nodelist = G.nodes()
M = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
dtype=float)
S = scipy.array(M.sum(axis=1)).flatten()
S[S != 0] = 1.0 / S[S != 0]
Q = scipy.sparse.spdiags(S.T, 0, *M.shape, format='csr')
M = Q * M
# self-link (DivRank)
M = scipy.sparse.lil_matrix(M)
M.setdiag(0.0)
M = alpha * M
M.setdiag(1.0 - alpha)
#print M.sum(axis=1)
# initial vector
x = scipy.repeat(1.0 / N, N)
# Personalization vector
if personalization is None:
p = scipy.repeat(1.0 / N, N)
else:
missing = set(nodelist) - set(personalization)
if missing:
raise NetworkXError('Personalization vector dictionary '
'must have a value for every node. '
'Missing nodes %s' % missing)
p = scipy.array([personalization[n] for n in nodelist],
dtype=float)
p = p / p.sum()
# Dangling nodes
if dangling is None:
dangling_weights = p
else:
missing = set(nodelist) - set(dangling)
if missing:
raise NetworkXError('Dangling node dictionary '
'must have a value for every node. '
'Missing nodes %s' % missing)
# Convert the dangling dictionary into an array in nodelist order
dangling_weights = scipy.array([dangling[n] for n in nodelist],
dtype=float)
dangling_weights /= dangling_weights.sum()
is_dangling = scipy.where(S == 0)[0]
# power iteration: make up to max_iter iterations
for _ in range(max_iter):
xlast = x
D_t = M * x
x = (
d * (x / D_t * M * x + sum(x[is_dangling]) * dangling_weights)
+ (1.0 - d) * p
)
# check convergence, l1 norm
err = scipy.absolute(x - xlast).sum()
if err < N * tol:
return dict(zip(nodelist, map(float, x)))
raise NetworkXError('divrank_scipy: power iteration failed to converge '
'in %d iterations.' % max_iter)
|
Returns the DivRank (Diverse Rank) of the nodes in the graph.
This code is based on networkx.pagerank_scipy
|
def compare(self, statement_a, statement_b):
"""
Return the calculated similarity of two
statements based on the Jaccard index.
"""
# Make both strings lowercase
document_a = self.nlp(statement_a.text.lower())
document_b = self.nlp(statement_b.text.lower())
statement_a_lemmas = set([
token.lemma_ for token in document_a if not token.is_stop
])
statement_b_lemmas = set([
token.lemma_ for token in document_b if not token.is_stop
])
# Calculate Jaccard similarity
numerator = len(statement_a_lemmas.intersection(statement_b_lemmas))
denominator = float(len(statement_a_lemmas.union(statement_b_lemmas)))
ratio = numerator / denominator
return ratio
|
Return the calculated similarity of two
statements based on the Jaccard index.
|
def run(self, cmd, sudo=False, ignore_error=False, success_status=(0,),
error_callback=None, custom_log=None, retry=0):
"""Run a command on the remote host.
The command is run on the remote host, if there is a redirected host
then the command will be run on that redirected host. See __init__.
:param cmd: the command to run
:type cmd: str
:param sudo: True if the command should be run with sudo, this parameter
disable the use of environment files.
:type sudo: str
:param success_status: the list of the possible success status
:type success_status: list
:param error_callback: if provided, the callback to call in case of
a failure. it will be called with two args, the output of the command
and the returned error code.
:return: the tuple (output of the command, returned code)
:rtype: tuple
:param custom_log: a optional string to record in the log instead of the command.
This is useful for example if you want to hide a password.
:type custom_log: str
"""
self._check_started()
cmd_output = io.StringIO()
channel = self._get_channel()
cmd = self._prepare_cmd(cmd, sudo=sudo)
if not custom_log:
custom_log = cmd
LOG.info("%s run '%s'" % (self.description, custom_log))
channel.exec_command(cmd)
while True:
received = None
rl, _, _ = select.select([channel], [], [], 30)
if rl:
received = channel.recv(1024).decode('UTF-8', 'ignore').strip()
if received:
LOG.debug(received)
cmd_output.write(received)
if channel.exit_status_ready() and not received:
break
cmd_output = cmd_output.getvalue()
exit_status = channel.exit_status
try:
return self._evaluate_run_result(
exit_status, cmd_output, ignore_error=ignore_error,
success_status=success_status, error_callback=error_callback,
custom_log=custom_log)
except (paramiko.ssh_exception.SSHException, socket.error) as e:
if not retry:
raise e
else:
return self.run(
cmd, sudo=sudo, ignore_error=ignore_error,
success_status=success_status,
error_callback=error_callback, custom_log=custom_log,
retry=(retry - 1))
|
Run a command on the remote host.
The command is run on the remote host, if there is a redirected host
then the command will be run on that redirected host. See __init__.
:param cmd: the command to run
:type cmd: str
:param sudo: True if the command should be run with sudo, this parameter
disable the use of environment files.
:type sudo: str
:param success_status: the list of the possible success status
:type success_status: list
:param error_callback: if provided, the callback to call in case of
a failure. it will be called with two args, the output of the command
and the returned error code.
:return: the tuple (output of the command, returned code)
:rtype: tuple
:param custom_log: a optional string to record in the log instead of the command.
This is useful for example if you want to hide a password.
:type custom_log: str
|
def plot(result_pickle_file_path, show, plot_save_file):
"""
[sys_analyser] draw result DataFrame
"""
import pandas as pd
from .plot import plot_result
result_dict = pd.read_pickle(result_pickle_file_path)
plot_result(result_dict, show, plot_save_file)
|
[sys_analyser] draw result DataFrame
|
def generate_config_set(self, config):
'''
Generates a list of magnitude frequency distributions and renders as
a tuple
:param dict/list config:
Configuration paramters of magnitude frequency distribution
'''
if isinstance(config, dict):
# Configuration list contains only one element
self.config = [(config, 1.0)]
elif isinstance(config, list):
# Multiple configurations with correscponding weights
total_weight = 0.
self.config = []
for params in config:
weight = params['Model_Weight']
total_weight += params['Model_Weight']
self.config.append((params, weight))
if fabs(total_weight - 1.0) > 1E-7:
raise ValueError('MFD config weights do not sum to 1.0 for '
'fault %s' % self.id)
else:
raise ValueError('MFD config must be input as dictionary or list!')
|
Generates a list of magnitude frequency distributions and renders as
a tuple
:param dict/list config:
Configuration paramters of magnitude frequency distribution
|
def parse_on_condition(self, node):
"""
Parses <OnCondition>
@param node: Node containing the <OnCondition> element
@type node: xml.etree.Element
"""
try:
test = node.lattrib['test']
except:
self.raise_error('<OnCondition> must specify a test.')
event_handler = OnCondition(test)
self.current_regime.add_event_handler(event_handler)
self.current_event_handler = event_handler
self.process_nested_tags(node)
self.current_event_handler = None
|
Parses <OnCondition>
@param node: Node containing the <OnCondition> element
@type node: xml.etree.Element
|
def timebinlc_worker(task):
'''
This is a parallel worker for the function below.
Parameters
----------
task : tuple
This is of the form::
task[0] = lcfile
task[1] = binsizesec
task[3] = {'outdir','lcformat','lcformatdir',
'timecols','magcols','errcols','minbinelems'}
Returns
-------
str
The output pickle file with the binned LC if successful. None otherwise.
'''
lcfile, binsizesec, kwargs = task
try:
binnedlc = timebinlc(lcfile, binsizesec, **kwargs)
LOGINFO('%s binned using %s sec -> %s OK' %
(lcfile, binsizesec, binnedlc))
return binnedlc
except Exception as e:
LOGEXCEPTION('failed to bin %s using binsizesec = %s' % (lcfile,
binsizesec))
return None
|
This is a parallel worker for the function below.
Parameters
----------
task : tuple
This is of the form::
task[0] = lcfile
task[1] = binsizesec
task[3] = {'outdir','lcformat','lcformatdir',
'timecols','magcols','errcols','minbinelems'}
Returns
-------
str
The output pickle file with the binned LC if successful. None otherwise.
|
def T11(word, rules):
'''If a VVV sequence contains a /u,y/-final diphthong, insert a syllable
boundary between the diphthong and the third vowel.'''
WORD = word
offset = 0
for vvv in precedence_sequences(WORD):
i = vvv.start(1) + (1 if vvv.group(1)[-1] in 'uyUY' else 2) + offset
WORD = WORD[:i] + '.' + WORD[i:]
offset += 1
rules += ' T11' if word != WORD else ''
return WORD, rules
|
If a VVV sequence contains a /u,y/-final diphthong, insert a syllable
boundary between the diphthong and the third vowel.
|
def asterisk_to_min_max(field, time_filter, search_engine_endpoint, actual_params=None):
"""
traduce [* TO *] to something like [MIN-INDEXED-DATE TO MAX-INDEXED-DATE]
:param field: map the stats to this field.
:param time_filter: this is the value to be translated. think in "[* TO 2000]"
:param search_engine_endpoint: solr core
:param actual_params: (not implemented) to merge with other params.
:return: translated time filter
"""
if actual_params:
raise NotImplemented("actual_params")
start, end = parse_solr_time_range_as_pair(time_filter)
if start == '*' or end == '*':
params_stats = {
"q": "*:*",
"rows": 0,
"stats.field": field,
"stats": "true",
"wt": "json"
}
res_stats = requests.get(search_engine_endpoint, params=params_stats)
if res_stats.ok:
stats_date_field = res_stats.json()["stats"]["stats_fields"][field]
date_min = stats_date_field["min"]
date_max = stats_date_field["max"]
if start != '*':
date_min = start
if end != '*':
date_max = end
time_filter = "[{0} TO {1}]".format(date_min, date_max)
return time_filter
|
traduce [* TO *] to something like [MIN-INDEXED-DATE TO MAX-INDEXED-DATE]
:param field: map the stats to this field.
:param time_filter: this is the value to be translated. think in "[* TO 2000]"
:param search_engine_endpoint: solr core
:param actual_params: (not implemented) to merge with other params.
:return: translated time filter
|
def populate_branch(self, editor, root_item, tree_cache=None):
"""
Generates an outline of the editor's content and stores the result
in a cache.
"""
if tree_cache is None:
tree_cache = {}
# Removing cached items for which line is > total line nb
for _l in list(tree_cache.keys()):
if _l >= editor.get_line_count():
# Checking if key is still in tree cache in case one of its
# ancestors was deleted in the meantime (deleting all children):
if _l in tree_cache:
remove_from_tree_cache(tree_cache, line=_l)
ancestors = [(root_item, 0)]
cell_ancestors = [(root_item, 0)]
previous_item = None
previous_level = None
prev_cell_level = None
prev_cell_item = None
oe_data = editor.get_outlineexplorer_data()
for block_nb in range(editor.get_line_count()):
line_nb = block_nb+1
data = oe_data.get(block_nb)
level = None if data is None else data.fold_level
citem, clevel, _d = tree_cache.get(line_nb, (None, None, ""))
# Skip iteration if line is not the first line of a foldable block
if level is None:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
# Searching for class/function statements
not_class_nor_function = data.is_not_class_nor_function()
if not not_class_nor_function:
class_name = data.get_class_name()
if class_name is None:
func_name = data.get_function_name()
if func_name is None:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
# Skip iteration for if/else/try/for/etc foldable blocks.
if not_class_nor_function and not data.is_comment():
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
if citem is not None:
cname = to_text_string(citem.text(0))
cparent = citem.parent
# Blocks for Cell Groups.
if (data is not None and data.def_type == data.CELL and
self.group_cells):
preceding = (root_item if previous_item is None
else previous_item)
cell_level = data.cell_level
if prev_cell_level is not None:
if cell_level == prev_cell_level:
pass
elif cell_level > prev_cell_level:
cell_ancestors.append((prev_cell_item,
prev_cell_level))
else:
while (len(cell_ancestors) > 1 and
cell_level <= prev_cell_level):
cell_ancestors.pop(-1)
_item, prev_cell_level = cell_ancestors[-1]
parent, _level = cell_ancestors[-1]
if citem is not None:
if data.text == cname and level == clevel:
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = CellItem(data.def_name, line_nb, parent, preceding)
item.setup()
debug = "%s -- %s/%s" % (str(item.line).rjust(6),
to_text_string(item.parent().text(0)),
to_text_string(item.text(0)))
tree_cache[line_nb] = (item, level, debug)
ancestors = [(item, 0)]
prev_cell_level = cell_level
prev_cell_item = item
previous_item = item
continue
# Blocks for Code Groups.
if previous_level is not None:
if level == previous_level:
pass
elif level > previous_level:
ancestors.append((previous_item, previous_level))
else:
while len(ancestors) > 1 and level <= previous_level:
ancestors.pop(-1)
_item, previous_level = ancestors[-1]
parent, _level = ancestors[-1]
preceding = root_item if previous_item is None else previous_item
if not_class_nor_function and data.is_comment():
if not self.show_comments:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
if citem is not None:
if data.text == cname and level == clevel:
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
if data.def_type == data.CELL:
item = CellItem(data.def_name, line_nb, parent, preceding)
else:
item = CommentItem(data.text, line_nb, parent, preceding)
elif class_name is not None:
if citem is not None:
if (class_name == cname and level == clevel and
parent is cparent):
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = ClassItem(class_name, line_nb, parent, preceding)
else:
if citem is not None:
if (func_name == cname and level == clevel and
parent is cparent):
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = FunctionItem(func_name, line_nb, parent, preceding)
item.setup()
debug = "%s -- %s/%s" % (str(item.line).rjust(6),
to_text_string(item.parent().text(0)),
to_text_string(item.text(0)))
tree_cache[line_nb] = (item, level, debug)
previous_level = level
previous_item = item
return tree_cache
|
Generates an outline of the editor's content and stores the result
in a cache.
|
def from_filename(cls, filename):
"""
Class constructor using the path to the corresponding mp3 file. The
metadata will be read from this file to create the song object, so it
must at least contain valid ID3 tags for artist and title.
"""
if not filename:
logger.error('No filename specified')
return None
if not os.path.exists(filename):
logger.error("Err: File '%s' does not exist", filename)
return None
if os.path.isdir(filename):
logger.error("Err: File '%s' is a directory", filename)
return None
try:
audiofile = eyed3.load(filename)
except Exception as error:
print(type(error), error)
return None
# Sometimes eyed3 may return a null object and not raise any exceptions
if audiofile is None:
return None
tags = audiofile.tag
album = tags.album
title = tags.title
lyrics = ''.join([l.text for l in tags.lyrics])
artist = tags.album_artist
if not artist:
artist = tags.artist
song = cls(artist, title, album, lyrics)
song.filename = filename
return song
|
Class constructor using the path to the corresponding mp3 file. The
metadata will be read from this file to create the song object, so it
must at least contain valid ID3 tags for artist and title.
|
def run(self, N=100):
"""
Parameter
---------
N: int
number of particles
Returns
-------
wgts: Weights object
The importance weights (with attributes lw, W, and ESS)
X: ThetaParticles object
The N particles (with attributes theta, logpost)
norm_cst: float
Estimate of the normalising constant of the target
"""
th = self.proposal.rvs(size=N)
self.X = ThetaParticles(theta=th, lpost=None)
self.X.lpost = self.model.logpost(th)
lw = self.X.lpost - self.proposal.logpdf(th)
self.wgts = rs.Weights(lw=lw)
self.norm_cst = rs.log_mean_exp(lw)
|
Parameter
---------
N: int
number of particles
Returns
-------
wgts: Weights object
The importance weights (with attributes lw, W, and ESS)
X: ThetaParticles object
The N particles (with attributes theta, logpost)
norm_cst: float
Estimate of the normalising constant of the target
|
def do_forceescape(value):
"""Enforce HTML escaping. This will probably double escape variables."""
if hasattr(value, '__html__'):
value = value.__html__()
return escape(text_type(value))
|
Enforce HTML escaping. This will probably double escape variables.
|
async def rset(self, timeout: DefaultNumType = _default) -> SMTPResponse:
"""
Send an SMTP RSET command, which resets the server's envelope
(the envelope contains the sender, recipient, and mail data).
:raises SMTPResponseException: on unexpected server response code
"""
await self._ehlo_or_helo_if_needed()
async with self._command_lock:
response = await self.execute_command(b"RSET", timeout=timeout)
if response.code != SMTPStatus.completed:
raise SMTPResponseException(response.code, response.message)
return response
|
Send an SMTP RSET command, which resets the server's envelope
(the envelope contains the sender, recipient, and mail data).
:raises SMTPResponseException: on unexpected server response code
|
def format_output(self, rendered_widgets):
"""
This output will yeild all widgets grouped in a un-ordered list
"""
ret = [u'<ul class="formfield">']
for i, field in enumerate(self.fields):
label = self.format_label(field, i)
help_text = self.format_help_text(field, i)
ret.append(u'<li>%s %s %s</li>' % (
label, rendered_widgets[i], field.help_text and help_text))
ret.append(u'</ul>')
return ''.join(ret)
|
This output will yeild all widgets grouped in a un-ordered list
|
def get_events(self, start_time, end_time, ignore_cancelled = True, get_recurring_events_as_instances = True, restrict_to_calendars = []):
'''A wrapper for events().list. Returns the events from the calendar within the specified times. Some of the interesting fields are:
description, end, htmlLink, location, organizer, start, summary
Note: "Cancelled instances of recurring events (but not the underlying recurring event) will still be included if showDeleted and singleEvents are both False."
'''
es = []
calendar_ids = restrict_to_calendars or self.calendar_ids
for calendar_id in calendar_ids:
now = datetime.now(tz = self.timezone)
events = []
page_token = None
while True:
events = self.service.events().list(pageToken=page_token, maxResults = 250, calendarId = self.configured_calendar_ids[calendar_id], timeMin = start_time, timeMax = end_time, showDeleted = False).execute()
for event in events['items']:
dt = None
nb = DeepNonStrictNestedBunch(event)
assert(not(nb._event))
nb._event = event # keep the original event as returned in case we want to reuse it e.g. insert it into another calendar
if (not ignore_cancelled) or (nb.status != 'cancelled'):
# Ignore cancelled events
if nb.recurrence:
if get_recurring_events_as_instances:
# Retrieve all occurrences of the recurring event within the timeframe
es += self.get_recurring_events(calendar_id, nb.id, start_time, end_time)
else:
es.append(nb)
elif nb.start.dateTime:
dt = dateutil.parser.parse(nb.start.dateTime)
elif nb.start.date:
dt = dateutil.parser.parse(nb.start.date)
dt = datetime(year = dt.year, month = dt.month, day = dt.day, hour=0, minute=0, second=0, tzinfo=self.timezone)
if dt:
nb.datetime_o = dt
nb.calendar_id = calendar_id
es.append(nb)
page_token = events.get('nextPageToken')
if not page_token:
break
es.sort(key=lambda x: x.datetime_o)
return es
|
A wrapper for events().list. Returns the events from the calendar within the specified times. Some of the interesting fields are:
description, end, htmlLink, location, organizer, start, summary
Note: "Cancelled instances of recurring events (but not the underlying recurring event) will still be included if showDeleted and singleEvents are both False."
|
def _auth_req_callback_func(self, context, internal_request):
"""
This function is called by a frontend module when an authorization request has been
processed.
:type context: satosa.context.Context
:type internal_request: satosa.internal.InternalData
:rtype: satosa.response.Response
:param context: The request context
:param internal_request: request processed by the frontend
:return: response
"""
state = context.state
state[STATE_KEY] = {"requester": internal_request.requester}
# TODO consent module should manage any state it needs by itself
try:
state_dict = context.state[consent.STATE_KEY]
except KeyError:
state_dict = context.state[consent.STATE_KEY] = {}
finally:
state_dict.update({
"filter": internal_request.attributes or [],
"requester_name": internal_request.requester_name,
})
satosa_logging(logger, logging.INFO,
"Requesting provider: {}".format(internal_request.requester), state)
if self.request_micro_services:
return self.request_micro_services[0].process(context, internal_request)
return self._auth_req_finish(context, internal_request)
|
This function is called by a frontend module when an authorization request has been
processed.
:type context: satosa.context.Context
:type internal_request: satosa.internal.InternalData
:rtype: satosa.response.Response
:param context: The request context
:param internal_request: request processed by the frontend
:return: response
|
def rename(self, name, **kwargs):
"""
:param name: New name for the object
:type name: string
Renames the remote object.
The name is changed on the copy of the object in the project
associated with the handler.
"""
return self._rename(self._dxid, {"project": self._proj,
"name": name}, **kwargs)
|
:param name: New name for the object
:type name: string
Renames the remote object.
The name is changed on the copy of the object in the project
associated with the handler.
|
def getIncludeAndRuntime():
"""
A function from distutils' build_ext.py that was updated and changed
to ACTUALLY WORK
"""
include_dirs, library_dirs = [], []
py_include = distutils.sysconfig.get_python_inc()
plat_py_include = distutils.sysconfig.get_python_inc(plat_specific=1)
include_dirs.append(py_include)
if plat_py_include != py_include:
include_dirs.append(plat_py_include)
if os.name == 'nt':
library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
include_dirs.append(os.path.join(sys.exec_prefix, 'PC'))
MSVC_VERSION = int(distutils.msvccompiler.get_build_version())
if MSVC_VERSION == 14:
library_dirs.append(os.path.join(sys.exec_prefix, 'PC', 'VS14',
'win32release'))
elif MSVC_VERSION == 9:
suffix = '' if PLATFORM == 'win32' else PLATFORM[4:]
new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
if suffix:
new_lib = os.path.join(new_lib, suffix)
library_dirs.append(new_lib)
elif MSVC_VERSION == 8:
library_dirs.append(os.path.join(sys.exec_prefix, 'PC', 'VS8.0',
'win32release'))
elif MSVC_VERSION == 7:
library_dirs.append(os.path.join(sys.exec_prefix, 'PC', 'VS7.1'))
else:
library_dirs.append(os.path.join(sys.exec_prefix, 'PC', 'VC6'))
if os.name == 'os2':
library_dirs.append(os.path.join(sys.exec_prefix, 'Config'))
is_cygwin = sys.platform[:6] == 'cygwin'
is_atheos = sys.platform[:6] == 'atheos'
is_shared = distutils.sysconfig.get_config_var('Py_ENABLE_SHARED')
is_linux = sys.platform.startswith('linux')
is_gnu = sys.platform.startswith('gnu')
is_sunos = sys.platform.startswith('sunos')
if is_cygwin or is_atheos:
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
library_dirs.append(os.path.join(sys.prefix, "lib", BASENAME,
"config"))
else:
library_dirs.append(os.getcwd())
if (is_linux or is_gnu or is_sunos) and is_shared:
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
library_dirs.append(distutils.sysconfig.get_config_var('LIBDIR'))
else:
library_dirs.append(os.getcwd())
user_include = os.path.join(site.USER_BASE, "include")
user_lib = os.path.join(site.USER_BASE, "lib")
if os.path.isdir(user_include):
include_dirs.append(user_include)
if os.path.isdir(user_lib):
library_dirs.append(user_lib)
ret_object = (include_dirs, library_dirs)
_filter_non_existing_dirs(ret_object)
return ret_object
|
A function from distutils' build_ext.py that was updated and changed
to ACTUALLY WORK
|
def versions_request(self):
"""List Available REST API Versions"""
ret = self.handle_api_exceptions('GET', '', api_ver='')
return [str_dict(x) for x in ret.json()]
|
List Available REST API Versions
|
def compare_digest(a, b):
"""Compare 2 hash digest."""
py_version = sys.version_info[0]
if py_version >= 3:
return _compare_digest_py3(a, b)
return _compare_digest_py2(a, b)
|
Compare 2 hash digest.
|
def extract_docs():
"""
Parses the nano.rpc.Client for methods that have a __doc_meta__ attribute
and saves generated docs
"""
methods = []
def _key(entry):
return
sorted_entries = sorted(Client.__dict__.items(), key=lambda x: x[0])
tree = {}
meta_key = '__doc_meta__'
for attr_name, attr_value in sorted_entries:
if not hasattr(attr_value, meta_key):
continue
func = attr_value
meta = getattr(func, meta_key)
arg_spec = inspect.getargspec(func)
if arg_spec[0] and arg_spec[0][0] in ('cls', 'self'):
del arg_spec[0][0]
func_name = func.__name__
func_spec = func_name + inspect.formatargspec(*arg_spec)
doc = textwrap.dedent((func.__doc__ or ''))
doc = indent(doc, n=3)
func_desc_lines = []
for i, line in enumerate(func.__doc__.splitlines()):
if i == 0:
continue
func_desc_lines.append(line.strip())
if not line:
break
func_desc = ' '.join(func_desc_lines)
doc = textwrap.dedent(
"""\
{func_name}
{func_name_line}
{func_desc}
:py:func:`nano.rpc.Client.{func_spec} <nano.rpc.Client.{func_name}>`
.. .. py:function:: nano.rpc.Client.{func_spec}
.. {doc}
"""
).format(
func_spec=func_spec,
func_name_line='-' * len(func_name),
func_name=func_name,
func_desc=func_desc,
doc=doc,
)
categories = meta['categories']
for category in categories:
tree.setdefault(category, []).append(doc)
directory = 'rpc/methods'
for file in os.listdir(directory):
if file.endswith('.rst'):
os.unlink(os.path.join(directory, file))
for category, func_docs in sorted(tree.items(), key=lambda x: x[0]):
category = category or 'other'
file_path = os.path.join(directory, category) + '.rst'
with open(file_path, 'w') as docfile:
docfile.write('.. _%s-ref:\n' % category + '\n')
title = '{category}'.format(category=category.capitalize())
docfile.write('%s\n' % title)
docfile.write('%s\n' % (len(title) * '='))
docfile.write('\n')
for func_doc in func_docs:
docfile.write(func_doc + '\n')
|
Parses the nano.rpc.Client for methods that have a __doc_meta__ attribute
and saves generated docs
|
def parse_eggs_list(path):
"""Parse eggs list from the script at the given path
"""
with open(path, 'r') as script:
data = script.readlines()
start = 0
end = 0
for counter, line in enumerate(data):
if not start:
if 'sys.path[0:0]' in line:
start = counter + 1
if counter >= start and not end:
if ']' in line:
end = counter
script_eggs = tidy_eggs_list(data[start:end])
return script_eggs
|
Parse eggs list from the script at the given path
|
def run_powerflow_onthefly(components, components_data, grid, export_pypsa_dir=None, debug=False):
"""
Run powerflow to test grid stability
Two cases are defined to be tested here:
i) load case
ii) feed-in case
Parameters
----------
components: dict of pandas.DataFrame
components_data: dict of pandas.DataFrame
export_pypsa_dir: str
Sub-directory in output/debug/grid/ where csv Files of PyPSA network are exported to.
Export is omitted if argument is empty.
"""
scenario = cfg_ding0.get("powerflow", "test_grid_stability_scenario")
start_hour = cfg_ding0.get("powerflow", "start_hour")
end_hour = cfg_ding0.get("powerflow", "end_hour")
# choose temp_id
temp_id_set = 1
timesteps = 2
start_time = datetime(1970, 1, 1, 00, 00, 0)
resolution = 'H'
# inspect grid data for integrity
if debug:
data_integrity(components, components_data)
# define investigated time range
timerange = DatetimeIndex(freq=resolution,
periods=timesteps,
start=start_time)
# TODO: Instead of hard coding PF config, values from class PFConfigDing0 can be used here.
# create PyPSA powerflow problem
network, snapshots = create_powerflow_problem(timerange, components)
# import pq-sets
for key in ['Load', 'Generator']:
for attr in ['p_set', 'q_set']:
# catch MV grid districts without generators
if not components_data[key].empty:
series = transform_timeseries4pypsa(components_data[key][
attr].to_frame(),
timerange,
column=attr)
import_series_from_dataframe(network,
series,
key,
attr)
series = transform_timeseries4pypsa(components_data['Bus']
['v_mag_pu_set'].to_frame(),
timerange,
column='v_mag_pu_set')
import_series_from_dataframe(network,
series,
'Bus',
'v_mag_pu_set')
# add coordinates to network nodes and make ready for map plotting
# network = add_coordinates(network)
# start powerflow calculations
network.pf(snapshots)
# # make a line loading plot
# # TODO: make this optional
# plot_line_loading(network, timestep=0,
# filename='Line_loading_load_case.png')
# plot_line_loading(network, timestep=1,
# filename='Line_loading_feed-in_case.png')
# process results
bus_data, line_data = process_pf_results(network)
# assign results data to graph
assign_bus_results(grid, bus_data)
assign_line_results(grid, line_data)
# export network if directory is specified
if export_pypsa_dir:
export_to_dir(network, export_dir=export_pypsa_dir)
|
Run powerflow to test grid stability
Two cases are defined to be tested here:
i) load case
ii) feed-in case
Parameters
----------
components: dict of pandas.DataFrame
components_data: dict of pandas.DataFrame
export_pypsa_dir: str
Sub-directory in output/debug/grid/ where csv Files of PyPSA network are exported to.
Export is omitted if argument is empty.
|
def is_spontaneous(gene, custom_id=None):
"""Input a COBRApy Gene object and check if the ID matches a spontaneous ID regex.
Args:
gene (Gene): COBRApy Gene
custom_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``
Returns:
bool: If gene ID matches spontaneous ID
"""
spont = re.compile("[Ss](_|)0001")
if spont.match(gene.id):
return True
elif gene.id == custom_id:
return True
else:
return False
|
Input a COBRApy Gene object and check if the ID matches a spontaneous ID regex.
Args:
gene (Gene): COBRApy Gene
custom_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``
Returns:
bool: If gene ID matches spontaneous ID
|
def run(self):
"""The actual event loop.
Calls the ``owner``'s :py:meth:`~Component.start_event` method,
then calls its :py:meth:`~Component.new_frame_event` and
:py:meth:`~Component.new_config_event` methods as required until
:py:meth:`~Component.stop` is called. Finally the ``owner``'s
:py:meth:`~Component.stop_event` method is called before the
thread terminates.
"""
try:
self.owner.start_event()
while True:
while not self.incoming:
time.sleep(0.01)
while self.incoming:
command = self.incoming.popleft()
if command is None:
raise StopIteration()
command()
except StopIteration:
pass
self.owner.stop_event()
|
The actual event loop.
Calls the ``owner``'s :py:meth:`~Component.start_event` method,
then calls its :py:meth:`~Component.new_frame_event` and
:py:meth:`~Component.new_config_event` methods as required until
:py:meth:`~Component.stop` is called. Finally the ``owner``'s
:py:meth:`~Component.stop_event` method is called before the
thread terminates.
|
def home_mode_set_state(self, state, **kwargs):
"""Set the state of Home Mode"""
# It appears that surveillance station needs lowercase text
# true/false for the on switch
if state not in (HOME_MODE_ON, HOME_MODE_OFF):
raise ValueError('Invalid home mode state')
api = self._api_info['home_mode']
payload = dict({
'api': api['name'],
'method': 'Switch',
'version': api['version'],
'on': state,
'_sid': self._sid,
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
if response['success']:
return True
return False
|
Set the state of Home Mode
|
def keys(self):
"""Return ids of all indexed documents."""
result = []
if self.fresh_index is not None:
result += self.fresh_index.keys()
if self.opt_index is not None:
result += self.opt_index.keys()
return result
|
Return ids of all indexed documents.
|
def mean_return_by_quantile(factor_data,
by_date=False,
by_group=False,
demeaned=True,
group_adjust=False):
"""
Computes mean returns for factor quantiles across
provided forward returns columns.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
by_date : bool
If True, compute quantile bucket returns separately for each date.
by_group : bool
If True, compute quantile bucket returns separately for each group.
demeaned : bool
Compute demeaned mean returns (long short portfolio)
group_adjust : bool
Returns demeaning will occur on the group level.
Returns
-------
mean_ret : pd.DataFrame
Mean period wise returns by specified factor quantile.
std_error_ret : pd.DataFrame
Standard error of returns by specified quantile.
"""
if group_adjust:
grouper = [factor_data.index.get_level_values('date')] + ['group']
factor_data = utils.demean_forward_returns(factor_data, grouper)
elif demeaned:
factor_data = utils.demean_forward_returns(factor_data)
else:
factor_data = factor_data.copy()
grouper = ['factor_quantile', factor_data.index.get_level_values('date')]
if by_group:
grouper.append('group')
group_stats = factor_data.groupby(grouper)[
utils.get_forward_returns_columns(factor_data.columns)] \
.agg(['mean', 'std', 'count'])
mean_ret = group_stats.T.xs('mean', level=1).T
if not by_date:
grouper = [mean_ret.index.get_level_values('factor_quantile')]
if by_group:
grouper.append(mean_ret.index.get_level_values('group'))
group_stats = mean_ret.groupby(grouper)\
.agg(['mean', 'std', 'count'])
mean_ret = group_stats.T.xs('mean', level=1).T
std_error_ret = group_stats.T.xs('std', level=1).T \
/ np.sqrt(group_stats.T.xs('count', level=1).T)
return mean_ret, std_error_ret
|
Computes mean returns for factor quantiles across
provided forward returns columns.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
by_date : bool
If True, compute quantile bucket returns separately for each date.
by_group : bool
If True, compute quantile bucket returns separately for each group.
demeaned : bool
Compute demeaned mean returns (long short portfolio)
group_adjust : bool
Returns demeaning will occur on the group level.
Returns
-------
mean_ret : pd.DataFrame
Mean period wise returns by specified factor quantile.
std_error_ret : pd.DataFrame
Standard error of returns by specified quantile.
|
async def unset_lock(self, resource, lock_identifier):
"""
Unlock this instance
:param resource: redis key to set
:param lock_identifier: uniquie id of lock
:raises: LockError if the lock resource acquired with different lock_identifier
"""
try:
with await self.connect() as redis:
await redis.eval(
self.unset_lock_script,
keys=[resource],
args=[lock_identifier]
)
except aioredis.errors.ReplyError as exc: # script fault
self.log.debug('Can not unset lock "%s" on %s',
resource, repr(self))
raise LockError('Can not unset lock') from exc
except (aioredis.errors.RedisError, OSError) as exc:
self.log.error('Can not unset lock "%s" on %s: %s',
resource, repr(self), repr(exc))
raise LockError('Can not set lock') from exc
except asyncio.CancelledError:
self.log.debug('Lock "%s" unset is cancelled on %s',
resource, repr(self))
raise
except Exception as exc:
self.log.exception('Can not unset lock "%s" on %s',
resource, repr(self))
raise
else:
self.log.debug('Lock "%s" is unset on %s', resource, repr(self))
|
Unlock this instance
:param resource: redis key to set
:param lock_identifier: uniquie id of lock
:raises: LockError if the lock resource acquired with different lock_identifier
|
def registIssue(self, CorpNum, taxinvoice, writeSpecification=False, forceIssue=False, dealInvoiceMgtKey=None,
memo=None, emailSubject=None, UserID=None):
""" 즉시 발행
args
CorpNum : 팝빌회원 사업자번호
taxinvoice : 세금계산서 객체
writeSpecification : 거래명세서 동시작성 여부
forceIssue : 지연발행 강제여부
dealInvoiceMgtKey : 거래명세서 문서관리번호
memo : 메모
emailSubject : 메일제목, 미기재시 기본제목으로 전송
UsreID : 팝빌회원 아이디
return
검색결과 정보
raise
PopbillException
"""
if writeSpecification:
taxinvoice.writeSpecification = True
if forceIssue:
taxinvoice.forceIssue = True
if dealInvoiceMgtKey != None and dealInvoiceMgtKey != '':
taxinvoice.dealInvoiceMgtKey = dealInvoiceMgtKey
if memo != None and memo != '':
taxinvoice.memo = memo
if emailSubject != None and emailSubject != '':
taxinvoice.emailSubject = emailSubject
postData = self._stringtify(taxinvoice)
return self._httppost('/Taxinvoice', postData, CorpNum, UserID, "ISSUE")
|
즉시 발행
args
CorpNum : 팝빌회원 사업자번호
taxinvoice : 세금계산서 객체
writeSpecification : 거래명세서 동시작성 여부
forceIssue : 지연발행 강제여부
dealInvoiceMgtKey : 거래명세서 문서관리번호
memo : 메모
emailSubject : 메일제목, 미기재시 기본제목으로 전송
UsreID : 팝빌회원 아이디
return
검색결과 정보
raise
PopbillException
|
def get_border_phase(self, idn=0, idr=0):
"""Return one of nine border fields
Parameters
----------
idn: int
Index for refractive index.
One of -1 (left), 0 (center), 1 (right)
idr: int
Index for radius.
One of -1 (left), 0 (center), 1 (right)
"""
assert idn in [-1, 0, 1]
assert idr in [-1, 0, 1]
n = self.sphere_index + self.dn * idn
r = self.radius + self.dr * idr
# convert to array indices
idn += 1
idr += 1
# find out whether we need to compute a new border field
if self._n_border[idn, idr] == n and self._r_border[idn, idr] == r:
if self.verbose > 3:
print("Using cached border phase (n{}, r{})".format(idn, idr))
# return previously computed field
pha = self._border_pha[(idn, idr)]
else:
if self.verbose > 3:
print("Computing border phase (n{}, r{})".format(idn, idr))
kwargs = self.model_kwargs.copy()
kwargs["radius"] = r
kwargs["sphere_index"] = n
kwargs["center"] = [self.posx_offset, self.posy_offset]
tb = time.time()
pha = self.sphere_method(**kwargs).pha
if self.verbose > 2:
print("Border phase computation time:",
self.sphere_method.__module__, time.time() - tb)
self._border_pha[(idn, idr)] = pha
self._n_border[idn, idr] = n
self._r_border[idn, idr] = r
return pha
|
Return one of nine border fields
Parameters
----------
idn: int
Index for refractive index.
One of -1 (left), 0 (center), 1 (right)
idr: int
Index for radius.
One of -1 (left), 0 (center), 1 (right)
|
def helical_turbulent_fd_Mori_Nakayama(Re, Di, Dc):
r'''Calculates Darcy friction factor for a fluid flowing inside a curved
pipe such as a helical coil under turbulent conditions, using the method of
Mori and Nakayama [1]_, also shown in [2]_ and [3]_.
.. math::
f_{curv} = 0.3\left(\frac{D_i}{D_c}\right)^{0.5}
\left[Re\left(\frac{D_i}{D_c}\right)^2\right]^{-0.2}\left[1
+ 0.112\left[Re\left(\frac{D_i}{D_c}\right)^2\right]^{-0.2}\right]
Parameters
----------
Re : float
Reynolds number with `D=Di`, [-]
Di : float
Inner diameter of the coil, [m]
Dc : float
Diameter of the helix/coil measured from the center of the tube on one
side to the center of the tube on the other side, [m]
Returns
-------
fd : float
Darcy friction factor for a curved pipe [-]
Notes
-----
Valid from the transition to turbulent flow up to
:math:`Re=6.5\times 10^{5}\sqrt{D_i/D_c}`. Does not use a straight pipe
correlation, and so will not converge on the
straight pipe result at very low curvature.
Examples
--------
>>> helical_turbulent_fd_Mori_Nakayama(1E4, 0.01, .2)
0.037311802071379796
References
----------
.. [1] Mori, Yasuo, and Wataru Nakayama. "Study of Forced Convective Heat
Transfer in Curved Pipes (2nd Report, Turbulent Region)." International
Journal of Heat and Mass Transfer 10, no. 1 (January 1, 1967): 37-59.
doi:10.1016/0017-9310(67)90182-2.
.. [2] El-Genk, Mohamed S., and Timothy M. Schriener. "A Review and
Correlations for Convection Heat Transfer and Pressure Losses in
Toroidal and Helically Coiled Tubes." Heat Transfer Engineering 0, no. 0
(June 7, 2016): 1-28. doi:10.1080/01457632.2016.1194693.
.. [3] Ali, Shaukat. "Pressure Drop Correlations for Flow through Regular
Helical Coil Tubes." Fluid Dynamics Research 28, no. 4 (April 2001):
295-310. doi:10.1016/S0169-5983(00)00034-4.
'''
term = (Re*(Di/Dc)**2)**-0.2
return 0.3*(Dc/Di)**-0.5*term*(1. + 0.112*term)
|
r'''Calculates Darcy friction factor for a fluid flowing inside a curved
pipe such as a helical coil under turbulent conditions, using the method of
Mori and Nakayama [1]_, also shown in [2]_ and [3]_.
.. math::
f_{curv} = 0.3\left(\frac{D_i}{D_c}\right)^{0.5}
\left[Re\left(\frac{D_i}{D_c}\right)^2\right]^{-0.2}\left[1
+ 0.112\left[Re\left(\frac{D_i}{D_c}\right)^2\right]^{-0.2}\right]
Parameters
----------
Re : float
Reynolds number with `D=Di`, [-]
Di : float
Inner diameter of the coil, [m]
Dc : float
Diameter of the helix/coil measured from the center of the tube on one
side to the center of the tube on the other side, [m]
Returns
-------
fd : float
Darcy friction factor for a curved pipe [-]
Notes
-----
Valid from the transition to turbulent flow up to
:math:`Re=6.5\times 10^{5}\sqrt{D_i/D_c}`. Does not use a straight pipe
correlation, and so will not converge on the
straight pipe result at very low curvature.
Examples
--------
>>> helical_turbulent_fd_Mori_Nakayama(1E4, 0.01, .2)
0.037311802071379796
References
----------
.. [1] Mori, Yasuo, and Wataru Nakayama. "Study of Forced Convective Heat
Transfer in Curved Pipes (2nd Report, Turbulent Region)." International
Journal of Heat and Mass Transfer 10, no. 1 (January 1, 1967): 37-59.
doi:10.1016/0017-9310(67)90182-2.
.. [2] El-Genk, Mohamed S., and Timothy M. Schriener. "A Review and
Correlations for Convection Heat Transfer and Pressure Losses in
Toroidal and Helically Coiled Tubes." Heat Transfer Engineering 0, no. 0
(June 7, 2016): 1-28. doi:10.1080/01457632.2016.1194693.
.. [3] Ali, Shaukat. "Pressure Drop Correlations for Flow through Regular
Helical Coil Tubes." Fluid Dynamics Research 28, no. 4 (April 2001):
295-310. doi:10.1016/S0169-5983(00)00034-4.
|
def verbose_message(self):
"""return more complete message"""
if self.threshold is None:
return 'No threshold'
return '%.1f is %s than %.1f' % (self.value,
self.adjective,
self.threshold)
|
return more complete message
|
def validate_path(ctx, param, value):
"""Detect a workflow path if it is not passed."""
client = ctx.obj
if value is None:
from renku.models.provenance import ProcessRun
activity = client.process_commit()
if not isinstance(activity, ProcessRun):
raise click.BadParameter('No tool was found.')
return activity.path
return value
|
Detect a workflow path if it is not passed.
|
def macro_body(self, node, frame, children=None):
"""Dump the function def of a macro or call block."""
frame = self.function_scoping(node, frame, children)
# macros are delayed, they never require output checks
frame.require_output_check = False
args = frame.arguments
# XXX: this is an ugly fix for the loop nesting bug
# (tests.test_old_bugs.test_loop_call_bug). This works around
# a identifier nesting problem we have in general. It's just more
# likely to happen in loops which is why we work around it. The
# real solution would be "nonlocal" all the identifiers that are
# leaking into a new python frame and might be used both unassigned
# and assigned.
if 'loop' in frame.identifiers.declared:
args = args + ['l_loop=l_loop']
self.writeline('def macro(%s):' % ', '.join(args), node)
self.indent()
self.buffer(frame)
self.pull_locals(frame)
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame)
self.outdent()
return frame
|
Dump the function def of a macro or call block.
|
def srcnode(self):
"""If this node is in a build path, return the node
corresponding to its source file. Otherwise, return
ourself.
"""
srcdir_list = self.dir.srcdir_list()
if srcdir_list:
srcnode = srcdir_list[0].Entry(self.name)
srcnode.must_be_same(self.__class__)
return srcnode
return self
|
If this node is in a build path, return the node
corresponding to its source file. Otherwise, return
ourself.
|
def get(self, metric_id=None, **kwargs):
"""Get metrics
:param int metric_id: Metric ID
:return: Metrics data (:class:`dict`)
Additional named arguments may be passed and are directly transmitted
to API. It is useful to use the API search features.
.. seealso:: https://docs.cachethq.io/reference#get-metrics
.. seealso:: https://docs.cachethq.io/docs/advanced-api-usage
"""
path = 'metrics'
if metric_id is not None:
path += '/%s' % metric_id
return self.paginate_get(path, data=kwargs)
|
Get metrics
:param int metric_id: Metric ID
:return: Metrics data (:class:`dict`)
Additional named arguments may be passed and are directly transmitted
to API. It is useful to use the API search features.
.. seealso:: https://docs.cachethq.io/reference#get-metrics
.. seealso:: https://docs.cachethq.io/docs/advanced-api-usage
|
async def _deploy(self, charm_url, application, series, config,
constraints, endpoint_bindings, resources, storage,
channel=None, num_units=None, placement=None,
devices=None):
"""Logic shared between `Model.deploy` and `BundleHandler.deploy`.
"""
log.info('Deploying %s', charm_url)
# stringify all config values for API, and convert to YAML
config = {k: str(v) for k, v in config.items()}
config = yaml.dump({application: config},
default_flow_style=False)
app_facade = client.ApplicationFacade.from_connection(
self.connection())
app = client.ApplicationDeploy(
charm_url=charm_url,
application=application,
series=series,
channel=channel,
config_yaml=config,
constraints=parse_constraints(constraints),
endpoint_bindings=endpoint_bindings,
num_units=num_units,
resources=resources,
storage=storage,
placement=placement,
devices=devices,
)
result = await app_facade.Deploy([app])
errors = [r.error.message for r in result.results if r.error]
if errors:
raise JujuError('\n'.join(errors))
return await self._wait_for_new('application', application)
|
Logic shared between `Model.deploy` and `BundleHandler.deploy`.
|
def get_code(self, timestamp=None):
"""
:param timestamp: time to use for code generation
:type timestamp: int
:return: two factor code
:rtype: str
"""
return generate_twofactor_code_for_time(b64decode(self.shared_secret),
self.get_time() if timestamp is None else timestamp)
|
:param timestamp: time to use for code generation
:type timestamp: int
:return: two factor code
:rtype: str
|
def retrieve(self, session, lookup_keys, *args, **kwargs):
"""
Retrieves a model using the lookup keys provided.
Only one model should be returned by the lookup_keys
or else the manager will fail.
:param Session session: The SQLAlchemy session to use
:param dict lookup_keys: A dictionary mapping the fields
and their expected values
:return: The dictionary of keys and values for the retrieved
model. The only values returned will be those specified by
fields attrbute on the class
:rtype: dict
:raises: NotFoundException
"""
model = self._get_model(lookup_keys, session)
return self.serialize_model(model)
|
Retrieves a model using the lookup keys provided.
Only one model should be returned by the lookup_keys
or else the manager will fail.
:param Session session: The SQLAlchemy session to use
:param dict lookup_keys: A dictionary mapping the fields
and their expected values
:return: The dictionary of keys and values for the retrieved
model. The only values returned will be those specified by
fields attrbute on the class
:rtype: dict
:raises: NotFoundException
|
def share_file(comm, path):
"""
Copies the file from rank 0 to all other ranks
Puts it in the same place on all machines
"""
localrank, _ = get_local_rank_size(comm)
if comm.Get_rank() == 0:
with open(path, 'rb') as fh:
data = fh.read()
comm.bcast(data)
else:
data = comm.bcast(None)
if localrank == 0:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'wb') as fh:
fh.write(data)
comm.Barrier()
|
Copies the file from rank 0 to all other ranks
Puts it in the same place on all machines
|
def iter_islast(iterable):
"""Generate (item, islast) pairs for an iterable.
Generates pairs where the first element is an item from the iterable
source and the second element is a boolean flag indicating if it is
the last item in the sequence.
"""
it = iter(iterable)
prev = next(it)
for item in it:
yield prev, False
prev = item
yield prev, True
|
Generate (item, islast) pairs for an iterable.
Generates pairs where the first element is an item from the iterable
source and the second element is a boolean flag indicating if it is
the last item in the sequence.
|
def path_size(path, total=False, ext='', level=None, verbosity=0):
"""Walk the file tree and query the file.stat object(s) to compute their total (or individual) size in bytes
Returns:
dict: {relative_path: file_size_in_bytes, ...}
Examples:
>>> all(d >= 0 for d in path_size(__file__).values())
True
>>> sum(path_size(os.path.dirname(__file__)).values()) == path_size(os.path.dirname(__file__), total=True)
True
>>> path_size(__file__, total=True) > 10000
True
>>> len(path_size('.')) >= 2
True
"""
dict_of_path_sizes = dict((d['path'], d['size']) for d in util.find_files(path, ext=ext, level=level, verbosity=0))
if total:
return reduce(lambda tot, size: tot + size, dict_of_path_sizes.values(), 0)
return dict_of_path_sizes
|
Walk the file tree and query the file.stat object(s) to compute their total (or individual) size in bytes
Returns:
dict: {relative_path: file_size_in_bytes, ...}
Examples:
>>> all(d >= 0 for d in path_size(__file__).values())
True
>>> sum(path_size(os.path.dirname(__file__)).values()) == path_size(os.path.dirname(__file__), total=True)
True
>>> path_size(__file__, total=True) > 10000
True
>>> len(path_size('.')) >= 2
True
|
def _writeXputMaps(self, session, directory, mapCards,
name=None, replaceParamFile=None):
"""
GSSHAPY Project Write Map Files to File Method
"""
if self.mapType in self.MAP_TYPES_SUPPORTED:
for card in self.projectCards:
if (card.name in mapCards) and self._noneOrNumValue(card.value):
filename = card.value.strip('"')
# Determine new filename
filename = self._replaceNewFilename(filename, name)
# Write map file
self._invokeWrite(fileIO=RasterMapFile,
session=session,
directory=directory,
filename=filename,
replaceParamFile=replaceParamFile)
else:
for card in self.projectCards:
if (card.name in mapCards) and self._noneOrNumValue(card.value):
filename = card.value.strip('"')
fileExtension = filename.split('.')[1]
if fileExtension in self.ALWAYS_READ_AND_WRITE_MAPS:
# Determine new filename
filename = self._replaceNewFilename(filename, name)
# Write map file
self._invokeWrite(fileIO=RasterMapFile,
session=session,
directory=directory,
filename=filename,
replaceParamFile=replaceParamFile)
log.error('Could not write map files. MAP_TYPE {0} '
'not supported.'.format(self.mapType))
|
GSSHAPY Project Write Map Files to File Method
|
def header(self, sheet, name):
"""
Write sheet header.
Args:
sheet: (xlwt.Worksheet.Worksheet) instance of xlwt sheet.
name: (unicode) name of sheet.
"""
header = sheet.row(0)
for i, column in enumerate(self.headers[name]):
header.write(i, self.headers[name][i])
|
Write sheet header.
Args:
sheet: (xlwt.Worksheet.Worksheet) instance of xlwt sheet.
name: (unicode) name of sheet.
|
def select(self, *features):
"""
selects the features given as string
e.g
passing 'hello' and 'world' will result in imports of
'hello' and 'world'. Then, if possible 'hello.feature'
and 'world.feature' are imported and select is called
in each feature module.
"""
for feature_name in features:
feature_module = importlib.import_module(feature_name)
# if available, import feature.py and select the feature
try:
feature_spec_module = importlib.import_module(
feature_name + '.feature'
)
if not hasattr(feature_spec_module, 'select'):
raise CompositionError(
'Function %s.feature.select not found!\n '
'Feature modules need to specify a function'
' select(composer).' % (
feature_name
)
)
args, varargs, keywords, defaults = inspect.getargspec(
feature_spec_module.select
)
if varargs or keywords or defaults or len(args) != 1:
raise CompositionError(
'invalid signature: %s.feature.select must '
'have the signature select(composer)' % (
feature_name
)
)
# call the feature`s select function
feature_spec_module.select(self)
except ImportError:
# Unfortunately, python makes it really hard
# to distinguish missing modules from modules
# that contain errors.
# Hacks like parsing the exception message will
# not work reliably due to import hooks and such.
# Conclusion: features must contain a feature.py for now
# re-raise
raise
|
selects the features given as string
e.g
passing 'hello' and 'world' will result in imports of
'hello' and 'world'. Then, if possible 'hello.feature'
and 'world.feature' are imported and select is called
in each feature module.
|
def commit(name,
repository,
tag='latest',
message=None,
author=None):
'''
.. versionchanged:: 2018.3.0
The repository and tag must now be passed separately using the
``repository`` and ``tag`` arguments, rather than together in the (now
deprecated) ``image`` argument.
Commits a container, thereby promoting it to an image. Equivalent to
running the ``docker commit`` Docker CLI command.
name
Container name or ID to commit
repository
Repository name for the image being committed
.. versionadded:: 2018.3.0
tag : latest
Tag name for the image
.. versionadded:: 2018.3.0
image
.. deprecated:: 2018.3.0
Use both ``repository`` and ``tag`` instead
message
Commit message (Optional)
author
Author name (Optional)
**RETURN DATA**
A dictionary containing the following keys:
- ``Id`` - ID of the newly-created image
- ``Image`` - Name of the newly-created image
- ``Time_Elapsed`` - Time in seconds taken to perform the commit
CLI Example:
.. code-block:: bash
salt myminion docker.commit mycontainer myuser/myimage mytag
'''
if not isinstance(repository, six.string_types):
repository = six.text_type(repository)
if not isinstance(tag, six.string_types):
tag = six.text_type(tag)
time_started = time.time()
response = _client_wrapper(
'commit',
name,
repository=repository,
tag=tag,
message=message,
author=author)
ret = {'Time_Elapsed': time.time() - time_started}
_clear_context()
image_id = None
for id_ in ('Id', 'id', 'ID'):
if id_ in response:
image_id = response[id_]
break
if image_id is None:
raise CommandExecutionError('No image ID was returned in API response')
ret['Id'] = image_id
return ret
|
.. versionchanged:: 2018.3.0
The repository and tag must now be passed separately using the
``repository`` and ``tag`` arguments, rather than together in the (now
deprecated) ``image`` argument.
Commits a container, thereby promoting it to an image. Equivalent to
running the ``docker commit`` Docker CLI command.
name
Container name or ID to commit
repository
Repository name for the image being committed
.. versionadded:: 2018.3.0
tag : latest
Tag name for the image
.. versionadded:: 2018.3.0
image
.. deprecated:: 2018.3.0
Use both ``repository`` and ``tag`` instead
message
Commit message (Optional)
author
Author name (Optional)
**RETURN DATA**
A dictionary containing the following keys:
- ``Id`` - ID of the newly-created image
- ``Image`` - Name of the newly-created image
- ``Time_Elapsed`` - Time in seconds taken to perform the commit
CLI Example:
.. code-block:: bash
salt myminion docker.commit mycontainer myuser/myimage mytag
|
def unindent_selection(self, cursor):
"""
Un-indents selected text
:param cursor: QTextCursor
"""
doc = self.editor.document()
tab_len = self.editor.tab_length
nb_lines = len(cursor.selection().toPlainText().splitlines())
if nb_lines == 0:
nb_lines = 1
block = doc.findBlock(cursor.selectionStart())
assert isinstance(block, QtGui.QTextBlock)
i = 0
_logger().debug('unindent selection: %d lines', nb_lines)
while i < nb_lines:
txt = block.text()[self.min_column:]
_logger().debug('line to unindent: %s', txt)
_logger().debug('self.editor.use_spaces_instead_of_tabs: %r',
self.editor.use_spaces_instead_of_tabs)
if self.editor.use_spaces_instead_of_tabs:
indentation = len(txt) - len(txt.lstrip())
else:
indentation = len(txt) - len(txt.replace('\t', ''))
_logger().debug('unindent line %d: %d spaces (min indent=%d)', i, indentation, self.min_column)
if indentation > 0:
c = QtGui.QTextCursor(block)
c.movePosition(c.StartOfLine, cursor.MoveAnchor)
c.movePosition(c.Right, cursor.MoveAnchor, indentation + self.min_column)
max_spaces = indentation % tab_len
if max_spaces == 0:
max_spaces = tab_len
spaces = self.count_deletable_spaces(c, max_spaces)
for _ in range(spaces):
c.deletePreviousChar()
block = block.next()
i += 1
return cursor
|
Un-indents selected text
:param cursor: QTextCursor
|
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
result = [TableInfo(SfProtectName(x['name']), 't') for x in self.table_list_cache['sobjects']]
return result
|
Returns a list of table names in the current database.
|
def object_to_json(obj, indent=2):
"""
transform object to json
"""
instance_json = json.dumps(obj, indent=indent, ensure_ascii=False, cls=DjangoJSONEncoder)
return instance_json
|
transform object to json
|
def _DecodeUnrecognizedFields(message, pair_type):
"""Process unrecognized fields in message."""
new_values = []
codec = _ProtoJsonApiTools.Get()
for unknown_field in message.all_unrecognized_fields():
# TODO(craigcitro): Consider validating the variant if
# the assignment below doesn't take care of it. It may
# also be necessary to check it in the case that the
# type has multiple encodings.
value, _ = message.get_unrecognized_field_info(unknown_field)
value_type = pair_type.field_by_name('value')
if isinstance(value_type, messages.MessageField):
decoded_value = DictToMessage(value, pair_type.value.message_type)
else:
decoded_value = codec.decode_field(
pair_type.value, value)
try:
new_pair_key = str(unknown_field)
except UnicodeEncodeError:
new_pair_key = protojson.ProtoJson().decode_field(
pair_type.key, unknown_field)
new_pair = pair_type(key=new_pair_key, value=decoded_value)
new_values.append(new_pair)
return new_values
|
Process unrecognized fields in message.
|
def get_salic_url(item, prefix, df_values=None):
"""
Mount a salic url for the given item.
"""
url_keys = {
'pronac': 'idPronac',
'uf': 'uf',
'product': 'produto',
'county': 'idmunicipio',
'item_id': 'idPlanilhaItem',
'stage': 'etapa',
}
if df_values:
values = [item[v] for v in df_values]
url_values = dict(
zip(url_keys.keys(), values)
)
else:
url_values = {
"pronac": item["idPronac"],
"uf": item["UfItem"],
"product": item["idProduto"],
"county": item["cdCidade"],
"item_id": item["idPlanilhaItens"],
"stage": item["cdEtapa"],
}
item_data = [(value, url_values[key]) for key, value in url_keys.items()]
url = prefix
for k, v in item_data:
url += f'/{str(k)}/{str(v)}'
return url
|
Mount a salic url for the given item.
|
def diff(self, container):
"""
Inspect changes on a container's filesystem.
Args:
container (str): The container to diff
Returns:
(str)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(
self._get(self._url("/containers/{0}/changes", container)), True
)
|
Inspect changes on a container's filesystem.
Args:
container (str): The container to diff
Returns:
(str)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
def RemoveMethod(self, function):
"""
Removes the specified function's MethodWrapper from the
added_methods list, so we don't re-bind it when making a clone.
"""
self.added_methods = [dm for dm in self.added_methods if not dm.method is function]
|
Removes the specified function's MethodWrapper from the
added_methods list, so we don't re-bind it when making a clone.
|
def capture_heroku_database(self):
""" Capture Heroku database backup. """
self.print_message("Capturing database backup for app '%s'" % self.args.source_app)
args = [
"heroku",
"pg:backups:capture",
"--app=%s" % self.args.source_app,
]
if self.args.use_pgbackups:
args = [
"heroku",
"pgbackups:capture",
"--app=%s" % self.args.source_app,
"--expire",
]
subprocess.check_call(args)
|
Capture Heroku database backup.
|
def trim_decimals(s, precision=-3):
"""
Convert from scientific notation using precision
"""
encoded = s.encode('ascii', 'ignore')
str_val = ""
if six.PY3:
str_val = str(encoded, encoding='ascii', errors='ignore')[:precision]
else:
# If precision is 0, this must be handled seperately
if precision == 0:
str_val = str(encoded)
else:
str_val = str(encoded)[:precision]
if len(str_val) > 0:
return float(str_val)
else:
return 0
|
Convert from scientific notation using precision
|
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
|
Create validator for given mapping.
|
def _parse_hextet(self, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from [0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._HEX_DIGITS.issuperset(hextet_str):
raise ValueError
if len(hextet_str) > 4:
raise ValueError
hextet_int = int(hextet_str, 16)
if hextet_int > 0xFFFF:
raise ValueError
return hextet_int
|
Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from [0..FFFF].
|
def code_almost_equal(a, b):
"""Return True if code is similar.
Ignore whitespace when comparing specific line.
"""
split_a = split_and_strip_non_empty_lines(a)
split_b = split_and_strip_non_empty_lines(b)
if len(split_a) != len(split_b):
return False
for (index, _) in enumerate(split_a):
if ''.join(split_a[index].split()) != ''.join(split_b[index].split()):
return False
return True
|
Return True if code is similar.
Ignore whitespace when comparing specific line.
|
def add_fields(self, fields = None, **kwargs):
"""
Add the fields into the list of fields.
"""
if fields != None:
for field in fields:
self.fields.append(field)
|
Add the fields into the list of fields.
|
def get_keypair_name():
"""Returns current keypair name."""
username = get_username()
assert '-' not in username, "username must not contain -, change $USER"
validate_aws_name(username)
assert len(username) < 30 # to avoid exceeding AWS 127 char limit
return get_prefix() + '-' + username
|
Returns current keypair name.
|
def encode_network(root):
"""Yield ref-containing obj table entries from object network"""
def fix_values(obj):
if isinstance(obj, Container):
obj.update((k, get_ref(v)) for (k, v) in obj.items()
if k != 'class_name')
fixed_obj = obj
elif isinstance(obj, Dictionary):
fixed_obj = obj.__class__(dict(
(get_ref(field), get_ref(value))
for (field, value) in obj.value.items()
))
elif isinstance(obj, dict):
fixed_obj = dict(
(get_ref(field), get_ref(value))
for (field, value) in obj.items()
)
elif isinstance(obj, list):
fixed_obj = [get_ref(field) for field in obj]
elif isinstance(obj, Form):
fixed_obj = obj.__class__(**dict(
(field, get_ref(value))
for (field, value) in obj.value.items()
))
elif isinstance(obj, ContainsRefs):
fixed_obj = obj.__class__([get_ref(field)
for field in obj.value])
else:
return obj
fixed_obj._made_from = obj
return fixed_obj
objects = []
def get_ref(obj, objects=objects):
obj = PythonicAdapter(Pass)._encode(obj, None)
if isinstance(obj, (FixedObject, Container)):
if getattr(obj, '_index', None):
index = obj._index
else:
objects.append(None)
obj._index = index = len(objects)
objects[index - 1] = fix_values(obj)
return Ref(index)
else:
return obj # Inline value
get_ref(root)
for obj in objects:
if getattr(obj, '_index', None):
del obj._index
return objects
|
Yield ref-containing obj table entries from object network
|
def Append(self, **kw):
"""Append values to existing construction variables
in an Environment.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
# It would be easier on the eyes to write this using
# "continue" statements whenever we finish processing an item,
# but Python 1.5.2 apparently doesn't let you use "continue"
# within try:-except: blocks, so we have to nest our code.
try:
if key == 'CPPDEFINES' and SCons.Util.is_String(self._dict[key]):
self._dict[key] = [self._dict[key]]
orig = self._dict[key]
except KeyError:
# No existing variable in the environment, so just set
# it to the new value.
if key == 'CPPDEFINES' and SCons.Util.is_String(val):
self._dict[key] = [val]
else:
self._dict[key] = val
else:
try:
# Check if the original looks like a dictionary.
# If it is, we can't just try adding the value because
# dictionaries don't have __add__() methods, and
# things like UserList will incorrectly coerce the
# original dict to a list (which we don't want).
update_dict = orig.update
except AttributeError:
try:
# Most straightforward: just try to add them
# together. This will work in most cases, when the
# original and new values are of compatible types.
self._dict[key] = orig + val
except (KeyError, TypeError):
try:
# Check if the original is a list.
add_to_orig = orig.append
except AttributeError:
# The original isn't a list, but the new
# value is (by process of elimination),
# so insert the original in the new value
# (if there's one to insert) and replace
# the variable with it.
if orig:
val.insert(0, orig)
self._dict[key] = val
else:
# The original is a list, so append the new
# value to it (if there's a value to append).
if val:
add_to_orig(val)
else:
# The original looks like a dictionary, so update it
# based on what we think the value looks like.
if SCons.Util.is_List(val):
if key == 'CPPDEFINES':
tmp = []
for (k, v) in orig.items():
if v is not None:
tmp.append((k, v))
else:
tmp.append((k,))
orig = tmp
orig += val
self._dict[key] = orig
else:
for v in val:
orig[v] = None
else:
try:
update_dict(val)
except (AttributeError, TypeError, ValueError):
if SCons.Util.is_Dict(val):
for k, v in val.items():
orig[k] = v
else:
orig[val] = None
self.scanner_map_delete(kw)
|
Append values to existing construction variables
in an Environment.
|
def patch(self, patched_value):
"""Set a new value for the attribute of the object."""
try:
if self.getter:
setattr(self.getter_class, self.attr_name, patched_value)
else:
setattr(self.orig_object, self.attr_name, patched_value)
except TypeError:
# Workaround for patching builtin objects:
proxy_name = 'fudge_proxy_%s_%s_%s' % (
self.orig_object.__module__,
self.orig_object.__name__,
patched_value.__class__.__name__
)
self.proxy_object = type(proxy_name, (self.orig_object,),
{self.attr_name: patched_value})
mod = sys.modules[self.orig_object.__module__]
setattr(mod, self.orig_object.__name__, self.proxy_object)
|
Set a new value for the attribute of the object.
|
def validation_scatter(self, log_lam, b, masks, pre_v, gp, flux,
time, med):
'''
Computes the scatter in the validation set.
'''
# Update the lambda matrix
self.lam[b] = 10 ** log_lam
# Validation set scatter
scatter = [None for i in range(len(masks))]
for i in range(len(masks)):
model = self.cv_compute(b, *pre_v[i])
try:
gpm, _ = gp.predict(flux - model - med, time[masks[i]])
except ValueError:
# Sometimes the model can have NaNs if
# `lambda` is a crazy value
return 1.e30
fdet = (flux - model)[masks[i]] - gpm
scatter[i] = 1.e6 * (1.4826 * np.nanmedian(np.abs(fdet / med -
np.nanmedian(fdet / med))) /
np.sqrt(len(masks[i])))
return np.max(scatter)
|
Computes the scatter in the validation set.
|
def remove_connection(self, id_interface, back_or_front):
"""
Remove a connection between two interfaces
:param id_interface: One side of relation
:param back_or_front: This side of relation is back(0) or front(1)
:return: None
:raise InterfaceInvalidBackFrontError: Front or Back of interfaces not match to remove connection
:raise InvalidParameterError: Interface id or back or front indicator is none or invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
msg_err = u'Parameter %s is invalid. Value: %s.'
if not is_valid_0_1(back_or_front):
raise InvalidParameterError(
msg_err %
('back_or_front', back_or_front))
if not is_valid_int_param(id_interface):
raise InvalidParameterError(
msg_err %
('id_interface', id_interface))
url = 'interface/%s/%s/' % (str(id_interface), str(back_or_front))
code, xml = self.submit(None, 'DELETE', url)
return self.response(code, xml)
|
Remove a connection between two interfaces
:param id_interface: One side of relation
:param back_or_front: This side of relation is back(0) or front(1)
:return: None
:raise InterfaceInvalidBackFrontError: Front or Back of interfaces not match to remove connection
:raise InvalidParameterError: Interface id or back or front indicator is none or invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
|
def txn_data2schema_key(self, txn: dict) -> SchemaKey:
"""
Return schema key from ledger transaction data.
:param txn: get-schema transaction (by sequence number)
:return: schema key identified
"""
rv = None
if self == Protocol.V_13:
rv = SchemaKey(txn['identifier'], txn['data']['name'], txn['data']['version'])
else:
txn_txn = txn.get('txn', None) or txn # may have already run this txn through txn2data() below
rv = SchemaKey(
txn_txn['metadata']['from'],
txn_txn['data']['data']['name'],
txn_txn['data']['data']['version'])
return rv
|
Return schema key from ledger transaction data.
:param txn: get-schema transaction (by sequence number)
:return: schema key identified
|
def _assign_name(self, obj, name, shaders):
""" Assign *name* to *obj* in *shaders*.
"""
if self._is_global(obj):
assert name not in self._global_ns
self._global_ns[name] = obj
else:
for shader in shaders:
ns = self._shader_ns[shader]
assert name not in ns
ns[name] = obj
self._object_names[obj] = name
|
Assign *name* to *obj* in *shaders*.
|
def authorize(self, ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None, src_group=None):
"""
Add a new rule to this security group.
You need to pass in either src_group_name
OR ip_protocol, from_port, to_port,
and cidr_ip. In other words, either you are authorizing another
group or you are authorizing some ip-based rule.
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp
:type from_port: int
:param from_port: The beginning port number you are enabling
:type to_port: int
:param to_port: The ending port number you are enabling
:type cidr_ip: string
:param cidr_ip: The CIDR block you are providing access to.
See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
:type src_group: :class:`boto.ec2.securitygroup.SecurityGroup` or
:class:`boto.ec2.securitygroup.GroupOrCIDR`
:param src_group: The Security Group you are granting access to.
:rtype: bool
:return: True if successful.
"""
if src_group:
cidr_ip = None
src_group_name = src_group.name
src_group_owner_id = src_group.owner_id
else:
src_group_name = None
src_group_owner_id = None
status = self.connection.authorize_security_group(self.name,
src_group_name,
src_group_owner_id,
ip_protocol,
from_port,
to_port,
cidr_ip)
if status:
self.add_rule(ip_protocol, from_port, to_port, src_group_name,
src_group_owner_id, cidr_ip)
return status
|
Add a new rule to this security group.
You need to pass in either src_group_name
OR ip_protocol, from_port, to_port,
and cidr_ip. In other words, either you are authorizing another
group or you are authorizing some ip-based rule.
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp
:type from_port: int
:param from_port: The beginning port number you are enabling
:type to_port: int
:param to_port: The ending port number you are enabling
:type cidr_ip: string
:param cidr_ip: The CIDR block you are providing access to.
See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
:type src_group: :class:`boto.ec2.securitygroup.SecurityGroup` or
:class:`boto.ec2.securitygroup.GroupOrCIDR`
:param src_group: The Security Group you are granting access to.
:rtype: bool
:return: True if successful.
|
def push(self, *args, **kwargs):
"""
GitHub push Event
When a GitHub push event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-push-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'push',
'name': 'push',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/github-push-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
GitHub push Event
When a GitHub push event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-push-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
|
def textContent(self, text: str) -> None: # type: ignore
"""Set textContent both on this node and related browser node."""
self._set_text_content(text)
if self.connected:
self._set_text_content_web(text)
|
Set textContent both on this node and related browser node.
|
def get_instances(feature_name):
"""Return all all instances that compute `feature_name`"""
feats = []
for ft in AncillaryFeature.features:
if ft.feature_name == feature_name:
feats.append(ft)
return feats
|
Return all all instances that compute `feature_name`
|
def call_fset(self, obj, value) -> None:
"""Store the given custom value and call the setter function."""
vars(obj)[self.name] = self.fset(obj, value)
|
Store the given custom value and call the setter function.
|
def handle_stream(self, stream, address):
'''
Handle incoming streams and add messages to the incoming queue
'''
log.trace('Req client %s connected', address)
self.clients.append((stream, address))
unpacker = msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
self.io_loop.spawn_callback(self.message_handler, stream, header, framed_msg['body'])
except StreamClosedError:
log.trace('req client disconnected %s', address)
self.clients.remove((stream, address))
except Exception as e:
log.trace('other master-side exception: %s', e)
self.clients.remove((stream, address))
stream.close()
|
Handle incoming streams and add messages to the incoming queue
|
def open(self):
"""
Load topology elements
"""
if self._status == "opened":
return
self.reset()
self._loading = True
self._status = "opened"
path = self._topology_file()
if not os.path.exists(path):
self._loading = False
return
try:
shutil.copy(path, path + ".backup")
except OSError:
pass
try:
project_data = load_topology(path)
#load meta of project
keys_to_load = [
"auto_start",
"auto_close",
"auto_open",
"scene_height",
"scene_width",
"zoom",
"show_layers",
"snap_to_grid",
"show_grid",
"show_interface_labels"
]
for key in keys_to_load:
val = project_data.get(key, None)
if val is not None:
setattr(self, key, val)
topology = project_data["topology"]
for compute in topology.get("computes", []):
yield from self.controller.add_compute(**compute)
for node in topology.get("nodes", []):
compute = self.controller.get_compute(node.pop("compute_id"))
name = node.pop("name")
node_id = node.pop("node_id", str(uuid.uuid4()))
yield from self.add_node(compute, name, node_id, dump=False, **node)
for link_data in topology.get("links", []):
if 'link_id' not in link_data.keys():
# skip the link
continue
link = yield from self.add_link(link_id=link_data["link_id"])
if "filters" in link_data:
yield from link.update_filters(link_data["filters"])
for node_link in link_data["nodes"]:
node = self.get_node(node_link["node_id"])
port = node.get_port(node_link["adapter_number"], node_link["port_number"])
if port is None:
log.warning("Port {}/{} for {} not found".format(node_link["adapter_number"], node_link["port_number"], node.name))
continue
if port.link is not None:
log.warning("Port {}/{} is already connected to link ID {}".format(node_link["adapter_number"], node_link["port_number"], port.link.id))
continue
yield from link.add_node(node, node_link["adapter_number"], node_link["port_number"], label=node_link.get("label"), dump=False)
if len(link.nodes) != 2:
# a link should have 2 attached nodes, this can happen with corrupted projects
yield from self.delete_link(link.id, force_delete=True)
for drawing_data in topology.get("drawings", []):
yield from self.add_drawing(dump=False, **drawing_data)
self.dump()
# We catch all error to be able to rollback the .gns3 to the previous state
except Exception as e:
for compute in list(self._project_created_on_compute):
try:
yield from compute.post("/projects/{}/close".format(self._id))
# We don't care if a compute is down at this step
except (ComputeError, aiohttp.web.HTTPNotFound, aiohttp.web.HTTPConflict, aiohttp.ServerDisconnectedError):
pass
try:
if os.path.exists(path + ".backup"):
shutil.copy(path + ".backup", path)
except (PermissionError, OSError):
pass
self._status = "closed"
self._loading = False
if isinstance(e, ComputeError):
raise aiohttp.web.HTTPConflict(text=str(e))
else:
raise e
try:
os.remove(path + ".backup")
except OSError:
pass
self._loading = False
# Should we start the nodes when project is open
if self._auto_start:
# Start all in the background without waiting for completion
# we ignore errors because we want to let the user open
# their project and fix it
asyncio.async(self.start_all())
|
Load topology elements
|
def get_previous_character(self):
"""
Returns the character before the cursor.
:return: Previous cursor character.
:rtype: QString
"""
cursor = self.textCursor()
cursor.movePosition(QTextCursor.PreviousCharacter, QTextCursor.KeepAnchor)
return cursor.selectedText()
|
Returns the character before the cursor.
:return: Previous cursor character.
:rtype: QString
|
def build(self):
"""
Iterates through the views pointed to by self.detail_views, runs
build_object with `self`, and calls _build_extra()
and _build_related().
"""
for detail_view in self.detail_views:
view = self._get_view(detail_view)
view().build_object(self)
self._build_extra()
self._build_related()
|
Iterates through the views pointed to by self.detail_views, runs
build_object with `self`, and calls _build_extra()
and _build_related().
|
def _nonzero_counter_hook(module, inputs, output):
"""
Module hook used to count the number of nonzero floating point values from
all the tensors used by the given network during inference. This hook will be
called every time before :func:`forward` is invoked.
See :func:`torch.nn.Module.register_forward_hook`
"""
if not hasattr(module, "__counter_nonzero__"):
raise ValueError("register_counter_nonzero was not called for this network")
if module.training:
return
size = module.__counter_nonzero__.get("input", 0)
size += sum([torch.nonzero(i).size(0) for i in inputs])
module.__counter_nonzero__["input"] = size
size = module.__counter_nonzero__.get("output", 0)
size += torch.nonzero(output).size(0)
module.__counter_nonzero__["output"] = size
for name, param in module._parameters.items():
if param is None:
continue
size = module.__counter_nonzero__.get(name, 0)
size += torch.nonzero(param.data).size(0)
module.__counter_nonzero__[name] = size
for name, buffer in module._buffers.items():
if buffer is None:
continue
size = module.__counter_nonzero__.get(name, 0)
size += torch.nonzero(buffer).size(0)
module.__counter_nonzero__[name] = size
|
Module hook used to count the number of nonzero floating point values from
all the tensors used by the given network during inference. This hook will be
called every time before :func:`forward` is invoked.
See :func:`torch.nn.Module.register_forward_hook`
|
def histogram_info(self) -> dict:
""" Return extra information about histogram """
return {
'support_atoms': self.support_atoms,
'atom_delta': self.atom_delta,
'vmin': self.vmin,
'vmax': self.vmax,
'num_atoms': self.atoms
}
|
Return extra information about histogram
|
def list_member_topics(self, member_id):
''' a method to retrieve a list of topics member follows
:param member_id: integer with meetup member id
:return: dictionary with list of topic details inside [json] key
topic_details = self.objects.topic.schema
'''
# https://www.meetup.com/meetup_api/docs/members/:member_id/#get
title = '%s.list_member_topics' % self.__class__.__name__
# validate inputs
input_fields = {
'member_id': member_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct member id
if not member_id:
raise IndexError('%s requires member id argument.' % title)
# compose request fields
url = '%s/members/%s' % (self.endpoint, str(member_id))
params = {
'fields': 'topics'
}
# send requests
response_details = self._get_request(url, params=params)
# construct method output dictionary
member_topics = {
'json': []
}
for key, value in response_details.items():
if not key == 'json':
member_topics[key] = value
# parse response
if response_details['json']:
if 'topics' in response_details['json'].keys():
for topic in response_details['json']['topics']:
member_topics['json'].append(self.objects.topic.ingest(**topic))
return member_topics
|
a method to retrieve a list of topics member follows
:param member_id: integer with meetup member id
:return: dictionary with list of topic details inside [json] key
topic_details = self.objects.topic.schema
|
def animate(self, duration = None, easing = None, on_complete = None,
on_update = None, round = False, **kwargs):
"""Request parent Scene to Interpolate attributes using the internal tweener.
Specify sprite's attributes that need changing.
`duration` defaults to 0.4 seconds and `easing` to cubic in-out
(for others see pytweener.Easing class).
Example::
# tween some_sprite to coordinates (50,100) using default duration and easing
self.animate(x = 50, y = 100)
"""
scene = self.get_scene()
if scene:
return scene.animate(self, duration, easing, on_complete,
on_update, round, **kwargs)
else:
for key, val in kwargs.items():
setattr(self, key, val)
return None
|
Request parent Scene to Interpolate attributes using the internal tweener.
Specify sprite's attributes that need changing.
`duration` defaults to 0.4 seconds and `easing` to cubic in-out
(for others see pytweener.Easing class).
Example::
# tween some_sprite to coordinates (50,100) using default duration and easing
self.animate(x = 50, y = 100)
|
def diff_files(left, right, diff_options=None, formatter=None):
"""Takes two filenames or streams, and diffs the XML in those files"""
return _diff(etree.parse, left, right,
diff_options=diff_options, formatter=formatter)
|
Takes two filenames or streams, and diffs the XML in those files
|
def unicode_decode(data, encoding_list):
"""
Decode string data with one or more encodings, trying sequentially
:param data: bytes: encoded string data
:param encoding_list: list[string] or string: encoding names
:return: string: decoded string
"""
assert encoding_list, 'encodings must not be empty.'
xs = distinct(encoding_list if isinstance(encoding_list, list) else [encoding_list])
first_exp = None
for i, encoding in enumerate(xs):
try:
return data.decode(encoding)
except UnicodeDecodeError as e:
if i == 0:
first_exp = e
raise first_exp
|
Decode string data with one or more encodings, trying sequentially
:param data: bytes: encoded string data
:param encoding_list: list[string] or string: encoding names
:return: string: decoded string
|
def build_keyjar(key_conf, kid_template="", keyjar=None, owner=''):
"""
Builds a :py:class:`oidcmsg.key_jar.KeyJar` instance or adds keys to
an existing KeyJar based on a key specification.
An example of such a specification::
keys = [
{"type": "RSA", "key": "cp_keys/key.pem", "use": ["enc", "sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"], "kid": "ec.1"},
{"type": "EC", "crv": "P-256", "use": ["enc"], "kid": "ec.2"}
]
Keys in this specification are:
type
The type of key. Presently only 'rsa' and 'ec' supported.
key
A name of a file where a key can be found. Only works with PEM encoded
RSA keys
use
What the key should be used for
crv
The elliptic curve that should be used. Only applies to elliptic curve
keys :-)
kid
Key ID, can only be used with one usage type is specified. If there
are more the one usage type specified 'kid' will just be ignored.
:param key_conf: The key configuration
:param kid_template: A template by which to build the key IDs. If no
kid_template is given then the built-in function add_kid() will be used.
:param keyjar: If an KeyJar instance the new keys are added to this key jar.
:param owner: The default owner of the keys in the key jar.
:return: A KeyJar instance
"""
if keyjar is None:
keyjar = KeyJar()
tot_kb = build_key_bundle(key_conf, kid_template)
keyjar.add_kb(owner, tot_kb)
return keyjar
|
Builds a :py:class:`oidcmsg.key_jar.KeyJar` instance or adds keys to
an existing KeyJar based on a key specification.
An example of such a specification::
keys = [
{"type": "RSA", "key": "cp_keys/key.pem", "use": ["enc", "sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"], "kid": "ec.1"},
{"type": "EC", "crv": "P-256", "use": ["enc"], "kid": "ec.2"}
]
Keys in this specification are:
type
The type of key. Presently only 'rsa' and 'ec' supported.
key
A name of a file where a key can be found. Only works with PEM encoded
RSA keys
use
What the key should be used for
crv
The elliptic curve that should be used. Only applies to elliptic curve
keys :-)
kid
Key ID, can only be used with one usage type is specified. If there
are more the one usage type specified 'kid' will just be ignored.
:param key_conf: The key configuration
:param kid_template: A template by which to build the key IDs. If no
kid_template is given then the built-in function add_kid() will be used.
:param keyjar: If an KeyJar instance the new keys are added to this key jar.
:param owner: The default owner of the keys in the key jar.
:return: A KeyJar instance
|
def from_plugin_classname(plugin_classname, exclude_lines_regex=None, **kwargs):
"""Initializes a plugin class, given a classname and kwargs.
:type plugin_classname: str
:param plugin_classname: subclass of BasePlugin.
:type exclude_lines_regex: str|None
:param exclude_lines_regex: optional regex for ignored lines.
"""
klass = globals()[plugin_classname]
# Make sure the instance is a BasePlugin type, before creating it.
if not issubclass(klass, BasePlugin):
raise TypeError
try:
instance = klass(exclude_lines_regex=exclude_lines_regex, **kwargs)
except TypeError:
log.warning(
'Unable to initialize plugin!',
)
raise
return instance
|
Initializes a plugin class, given a classname and kwargs.
:type plugin_classname: str
:param plugin_classname: subclass of BasePlugin.
:type exclude_lines_regex: str|None
:param exclude_lines_regex: optional regex for ignored lines.
|
def find_objects(config=None, config_path=None, regex=None, saltenv='base'):
'''
Return all the line objects that match the expression in the ``regex``
argument.
.. warning::
This function is mostly valuable when invoked from other Salt
components (i.e., execution modules, states, templates etc.). For CLI
usage, please consider using
:py:func:`ciscoconfparse.find_lines <salt.ciscoconfparse_mod.find_lines>`
config
The configuration sent as text.
.. note::
This argument is ignored when ``config_path`` is specified.
config_path
The absolute or remote path to the file with the configuration to be
parsed. This argument supports the usual Salt filesystem URIs, e.g.,
``salt://``, ``https://``, ``ftp://``, ``s3://``, etc.
regex
The regular expression to match the lines against.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. This
argument is ignored when ``config_path`` is not a ``salt://`` URL.
Usage example:
.. code-block:: python
objects = __salt__['ciscoconfparse.find_objects'](config_path='salt://path/to/config.txt',
regex='Gigabit')
for obj in objects:
print(obj.text)
'''
ccp = _get_ccp(config=config, config_path=config_path, saltenv=saltenv)
lines = ccp.find_objects(regex)
return lines
|
Return all the line objects that match the expression in the ``regex``
argument.
.. warning::
This function is mostly valuable when invoked from other Salt
components (i.e., execution modules, states, templates etc.). For CLI
usage, please consider using
:py:func:`ciscoconfparse.find_lines <salt.ciscoconfparse_mod.find_lines>`
config
The configuration sent as text.
.. note::
This argument is ignored when ``config_path`` is specified.
config_path
The absolute or remote path to the file with the configuration to be
parsed. This argument supports the usual Salt filesystem URIs, e.g.,
``salt://``, ``https://``, ``ftp://``, ``s3://``, etc.
regex
The regular expression to match the lines against.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. This
argument is ignored when ``config_path`` is not a ``salt://`` URL.
Usage example:
.. code-block:: python
objects = __salt__['ciscoconfparse.find_objects'](config_path='salt://path/to/config.txt',
regex='Gigabit')
for obj in objects:
print(obj.text)
|
def register(self, cmd: Type[Command]) -> None:
"""Register a new IMAP command.
Args:
cmd: The new command type.
"""
self.commands[cmd.command] = cmd
|
Register a new IMAP command.
Args:
cmd: The new command type.
|
def get(url, params={}):
"""Invoke an HTTP GET request on a url
Args:
url (string): URL endpoint to request
params (dict): Dictionary of url parameters
Returns:
dict: JSON response as a dictionary
"""
request_url = url
if len(params):
request_url = "{}?{}".format(url, urlencode(params))
try:
req = Request(request_url, headers={'User-Agent': 'Mozilla/5.0'})
response = json.loads(urlopen(req).read().decode("utf-8"))
return response
except HTTPError as err:
raise MtgException(err.read())
|
Invoke an HTTP GET request on a url
Args:
url (string): URL endpoint to request
params (dict): Dictionary of url parameters
Returns:
dict: JSON response as a dictionary
|
def remove_all(self, item):
# type: (Any) -> None
"""
Remove all occurrence of the parameter.
:param item: Value to delete from the WeakList.
"""
item = self.ref(item)
while list.__contains__(self, item):
list.remove(self, item)
|
Remove all occurrence of the parameter.
:param item: Value to delete from the WeakList.
|
def gtlike_spectrum_to_vectors(spectrum):
""" Convert a pyLikelihood object to a python dictionary which can
be easily saved to a file."""
parameters = pyLike.ParameterVector()
spectrum.getParams(parameters)
npar = max(parameters.size(), 10)
o = {'param_names': np.zeros(npar, dtype='S32'),
'param_values': np.empty(npar, dtype=float) * np.nan,
'param_errors': np.empty(npar, dtype=float) * np.nan,
}
for i, p in enumerate(parameters):
o['param_names'][i] = p.getName()
o['param_values'][i] = p.getTrueValue()
perr = abs(p.error() * p.getScale()) if p.isFree() else np.nan
o['param_errors'][i] = perr
return o
|
Convert a pyLikelihood object to a python dictionary which can
be easily saved to a file.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.