text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def commit_update():
""" Switch the target boot partition. """
unused = _find_unused_partition()
new = _switch_partition()
if new != unused:
msg = f"Bad switch: switched to {new} when {unused} was unused"
LOG.error(msg)
raise RuntimeError(msg)
else:
LOG.info(f'commit_update: committed to booting {new}') | [
"def",
"commit_update",
"(",
")",
":",
"unused",
"=",
"_find_unused_partition",
"(",
")",
"new",
"=",
"_switch_partition",
"(",
")",
"if",
"new",
"!=",
"unused",
":",
"msg",
"=",
"f\"Bad switch: switched to {new} when {unused} was unused\"",
"LOG",
".",
"error",
"(",
"msg",
")",
"raise",
"RuntimeError",
"(",
"msg",
")",
"else",
":",
"LOG",
".",
"info",
"(",
"f'commit_update: committed to booting {new}'",
")"
] | 34.7 | 16.4 |
def _recursive_repr(item):
"""Hack around python `repr` to deterministically represent dictionaries.
This is able to represent more things than json.dumps, since it does not require things to be JSON serializable
(e.g. datetimes).
"""
if isinstance(item, basestring):
result = str(item)
elif isinstance(item, list):
result = '[{}]'.format(', '.join([_recursive_repr(x) for x in item]))
elif isinstance(item, dict):
kv_pairs = ['{}: {}'.format(_recursive_repr(k), _recursive_repr(item[k])) for k in sorted(item)]
result = '{' + ', '.join(kv_pairs) + '}'
else:
result = repr(item)
return result | [
"def",
"_recursive_repr",
"(",
"item",
")",
":",
"if",
"isinstance",
"(",
"item",
",",
"basestring",
")",
":",
"result",
"=",
"str",
"(",
"item",
")",
"elif",
"isinstance",
"(",
"item",
",",
"list",
")",
":",
"result",
"=",
"'[{}]'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"[",
"_recursive_repr",
"(",
"x",
")",
"for",
"x",
"in",
"item",
"]",
")",
")",
"elif",
"isinstance",
"(",
"item",
",",
"dict",
")",
":",
"kv_pairs",
"=",
"[",
"'{}: {}'",
".",
"format",
"(",
"_recursive_repr",
"(",
"k",
")",
",",
"_recursive_repr",
"(",
"item",
"[",
"k",
"]",
")",
")",
"for",
"k",
"in",
"sorted",
"(",
"item",
")",
"]",
"result",
"=",
"'{'",
"+",
"', '",
".",
"join",
"(",
"kv_pairs",
")",
"+",
"'}'",
"else",
":",
"result",
"=",
"repr",
"(",
"item",
")",
"return",
"result"
] | 40.875 | 22.375 |
def format(self, version=0x10, wipe=None):
"""Format a FeliCa Lite Tag for NDEF.
"""
return super(FelicaLite, self).format(version, wipe) | [
"def",
"format",
"(",
"self",
",",
"version",
"=",
"0x10",
",",
"wipe",
"=",
"None",
")",
":",
"return",
"super",
"(",
"FelicaLite",
",",
"self",
")",
".",
"format",
"(",
"version",
",",
"wipe",
")"
] | 31.6 | 12.4 |
def fromtab(args):
"""
%prog fromtab tabfile fastafile
Convert 2-column sequence file to FASTA format. One usage for this is to
generatea `adapters.fasta` for TRIMMOMATIC.
"""
p = OptionParser(fromtab.__doc__)
p.set_sep(sep=None)
p.add_option("--noheader", default=False, action="store_true",
help="Ignore first line")
p.add_option("--replace",
help="Replace spaces in name to char [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tabfile, fastafile = args
sep = opts.sep
replace = opts.replace
fp = must_open(tabfile)
fw = must_open(fastafile, "w")
nseq = 0
if opts.noheader:
next(fp)
for row in fp:
row = row.strip()
if not row or row[0] == '#':
continue
name, seq = row.rsplit(sep, 1)
if replace:
name = name.replace(" ", replace)
print(">{0}\n{1}".format(name, seq), file=fw)
nseq += 1
fw.close()
logging.debug("A total of {0} sequences written to `{1}`.".\
format(nseq, fastafile)) | [
"def",
"fromtab",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"fromtab",
".",
"__doc__",
")",
"p",
".",
"set_sep",
"(",
"sep",
"=",
"None",
")",
"p",
".",
"add_option",
"(",
"\"--noheader\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Ignore first line\"",
")",
"p",
".",
"add_option",
"(",
"\"--replace\"",
",",
"help",
"=",
"\"Replace spaces in name to char [default: %default]\"",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"tabfile",
",",
"fastafile",
"=",
"args",
"sep",
"=",
"opts",
".",
"sep",
"replace",
"=",
"opts",
".",
"replace",
"fp",
"=",
"must_open",
"(",
"tabfile",
")",
"fw",
"=",
"must_open",
"(",
"fastafile",
",",
"\"w\"",
")",
"nseq",
"=",
"0",
"if",
"opts",
".",
"noheader",
":",
"next",
"(",
"fp",
")",
"for",
"row",
"in",
"fp",
":",
"row",
"=",
"row",
".",
"strip",
"(",
")",
"if",
"not",
"row",
"or",
"row",
"[",
"0",
"]",
"==",
"'#'",
":",
"continue",
"name",
",",
"seq",
"=",
"row",
".",
"rsplit",
"(",
"sep",
",",
"1",
")",
"if",
"replace",
":",
"name",
"=",
"name",
".",
"replace",
"(",
"\" \"",
",",
"replace",
")",
"print",
"(",
"\">{0}\\n{1}\"",
".",
"format",
"(",
"name",
",",
"seq",
")",
",",
"file",
"=",
"fw",
")",
"nseq",
"+=",
"1",
"fw",
".",
"close",
"(",
")",
"logging",
".",
"debug",
"(",
"\"A total of {0} sequences written to `{1}`.\"",
".",
"format",
"(",
"nseq",
",",
"fastafile",
")",
")"
] | 28.25 | 17.65 |
def replace_namespaced_replica_set_scale(self, name, namespace, body, **kwargs):
"""
replace scale of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_replica_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Scale body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs)
return data | [
"def",
"replace_namespaced_replica_set_scale",
"(",
"self",
",",
"name",
",",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"replace_namespaced_replica_set_scale_with_http_info",
"(",
"name",
",",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"replace_namespaced_replica_set_scale_with_http_info",
"(",
"name",
",",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 68.64 | 40.88 |
def smart_reroot(treefile, outgroupfile, outfile, format=0):
"""
simple function to reroot Newick format tree using ete2
Tree reading format options see here:
http://packages.python.org/ete2/tutorial/tutorial_trees.html#reading-newick-trees
"""
tree = Tree(treefile, format=format)
leaves = [t.name for t in tree.get_leaves()][::-1]
outgroup = []
for o in must_open(outgroupfile):
o = o.strip()
for leaf in leaves:
if leaf[:len(o)] == o:
outgroup.append(leaf)
if outgroup:
break
if not outgroup:
print("Outgroup not found. Tree {0} cannot be rerooted.".format(treefile), file=sys.stderr)
return treefile
try:
tree.set_outgroup(tree.get_common_ancestor(*outgroup))
except ValueError:
assert type(outgroup) == list
outgroup = outgroup[0]
tree.set_outgroup(outgroup)
tree.write(outfile=outfile, format=format)
logging.debug("Rerooted tree printed to {0}".format(outfile))
return outfile | [
"def",
"smart_reroot",
"(",
"treefile",
",",
"outgroupfile",
",",
"outfile",
",",
"format",
"=",
"0",
")",
":",
"tree",
"=",
"Tree",
"(",
"treefile",
",",
"format",
"=",
"format",
")",
"leaves",
"=",
"[",
"t",
".",
"name",
"for",
"t",
"in",
"tree",
".",
"get_leaves",
"(",
")",
"]",
"[",
":",
":",
"-",
"1",
"]",
"outgroup",
"=",
"[",
"]",
"for",
"o",
"in",
"must_open",
"(",
"outgroupfile",
")",
":",
"o",
"=",
"o",
".",
"strip",
"(",
")",
"for",
"leaf",
"in",
"leaves",
":",
"if",
"leaf",
"[",
":",
"len",
"(",
"o",
")",
"]",
"==",
"o",
":",
"outgroup",
".",
"append",
"(",
"leaf",
")",
"if",
"outgroup",
":",
"break",
"if",
"not",
"outgroup",
":",
"print",
"(",
"\"Outgroup not found. Tree {0} cannot be rerooted.\"",
".",
"format",
"(",
"treefile",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return",
"treefile",
"try",
":",
"tree",
".",
"set_outgroup",
"(",
"tree",
".",
"get_common_ancestor",
"(",
"*",
"outgroup",
")",
")",
"except",
"ValueError",
":",
"assert",
"type",
"(",
"outgroup",
")",
"==",
"list",
"outgroup",
"=",
"outgroup",
"[",
"0",
"]",
"tree",
".",
"set_outgroup",
"(",
"outgroup",
")",
"tree",
".",
"write",
"(",
"outfile",
"=",
"outfile",
",",
"format",
"=",
"format",
")",
"logging",
".",
"debug",
"(",
"\"Rerooted tree printed to {0}\"",
".",
"format",
"(",
"outfile",
")",
")",
"return",
"outfile"
] | 32.125 | 19 |
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids | [
"def",
"tokenize",
"(",
"self",
",",
"path",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
"# Add words to the dictionary",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"tokens",
"=",
"0",
"for",
"line",
"in",
"f",
":",
"words",
"=",
"line",
".",
"split",
"(",
")",
"+",
"[",
"'<eos>'",
"]",
"tokens",
"+=",
"len",
"(",
"words",
")",
"for",
"word",
"in",
"words",
":",
"self",
".",
"dictionary",
".",
"add_word",
"(",
"word",
")",
"# Tokenize file content",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"ids",
"=",
"torch",
".",
"LongTensor",
"(",
"tokens",
")",
"token",
"=",
"0",
"for",
"line",
"in",
"f",
":",
"words",
"=",
"line",
".",
"split",
"(",
")",
"+",
"[",
"'<eos>'",
"]",
"for",
"word",
"in",
"words",
":",
"ids",
"[",
"token",
"]",
"=",
"self",
".",
"dictionary",
".",
"word2idx",
"[",
"word",
"]",
"token",
"+=",
"1",
"return",
"ids"
] | 31.73913 | 12.521739 |
def on_log(request, page_name):
"""Show the list of recent changes."""
page = Page.query.filter_by(name=page_name).first()
if page is None:
return page_missing(request, page_name, False)
return Response(generate_template("action_log.html", page=page)) | [
"def",
"on_log",
"(",
"request",
",",
"page_name",
")",
":",
"page",
"=",
"Page",
".",
"query",
".",
"filter_by",
"(",
"name",
"=",
"page_name",
")",
".",
"first",
"(",
")",
"if",
"page",
"is",
"None",
":",
"return",
"page_missing",
"(",
"request",
",",
"page_name",
",",
"False",
")",
"return",
"Response",
"(",
"generate_template",
"(",
"\"action_log.html\"",
",",
"page",
"=",
"page",
")",
")"
] | 45 | 14.333333 |
def _predict_one(self, document, encoding=None, return_blocks=False):
"""
Predict class (content=1 or not-content=0) of each block in an HTML
document.
Args:
documents (str): HTML document
Returns:
``np.ndarray``: array of binary predictions for content (1) or
not-content (0).
"""
# blockify
blocks = self.blockifier.blockify(document, encoding=encoding)
# get features
try:
features = self.features.transform(blocks)
except ValueError: # Can't make features, predict no content
preds = np.zeros((len(blocks)))
# make predictions
else:
if self.prob_threshold is None:
preds = self.model.predict(features)
else:
self._positive_idx = (
self._positive_idx or list(self.model.classes_).index(1))
preds = self.model.predict_proba(features) > self.prob_threshold
preds = preds[:, self._positive_idx].astype(int)
if return_blocks:
return preds, blocks
else:
return preds | [
"def",
"_predict_one",
"(",
"self",
",",
"document",
",",
"encoding",
"=",
"None",
",",
"return_blocks",
"=",
"False",
")",
":",
"# blockify",
"blocks",
"=",
"self",
".",
"blockifier",
".",
"blockify",
"(",
"document",
",",
"encoding",
"=",
"encoding",
")",
"# get features",
"try",
":",
"features",
"=",
"self",
".",
"features",
".",
"transform",
"(",
"blocks",
")",
"except",
"ValueError",
":",
"# Can't make features, predict no content",
"preds",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"blocks",
")",
")",
")",
"# make predictions",
"else",
":",
"if",
"self",
".",
"prob_threshold",
"is",
"None",
":",
"preds",
"=",
"self",
".",
"model",
".",
"predict",
"(",
"features",
")",
"else",
":",
"self",
".",
"_positive_idx",
"=",
"(",
"self",
".",
"_positive_idx",
"or",
"list",
"(",
"self",
".",
"model",
".",
"classes_",
")",
".",
"index",
"(",
"1",
")",
")",
"preds",
"=",
"self",
".",
"model",
".",
"predict_proba",
"(",
"features",
")",
">",
"self",
".",
"prob_threshold",
"preds",
"=",
"preds",
"[",
":",
",",
"self",
".",
"_positive_idx",
"]",
".",
"astype",
"(",
"int",
")",
"if",
"return_blocks",
":",
"return",
"preds",
",",
"blocks",
"else",
":",
"return",
"preds"
] | 34.757576 | 21.121212 |
def sim_jaro_winkler(
src,
tar,
qval=1,
mode='winkler',
long_strings=False,
boost_threshold=0.7,
scaling_factor=0.1,
):
"""Return the Jaro or Jaro-Winkler similarity of two strings.
This is a wrapper for :py:meth:`JaroWinkler.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
qval : int
The length of each q-gram (defaults to 1: character-wise matching)
mode : str
Indicates which variant of this distance metric to compute:
- ``winkler`` -- computes the Jaro-Winkler distance (default) which
increases the score for matches near the start of the word
- ``jaro`` -- computes the Jaro distance
long_strings : bool
Set to True to "Increase the probability of a match when the number of
matched characters is large. This option allows for a little more
tolerance when the strings are large. It is not an appropriate test
when comparing fixedlength fields such as phone and social security
numbers." (Used in 'winkler' mode only.)
boost_threshold : float
A value between 0 and 1, below which the Winkler boost is not applied
(defaults to 0.7). (Used in 'winkler' mode only.)
scaling_factor : float
A value between 0 and 0.25, indicating by how much to boost scores for
matching prefixes (defaults to 0.1). (Used in 'winkler' mode only.)
Returns
-------
float
Jaro or Jaro-Winkler similarity
Examples
--------
>>> round(sim_jaro_winkler('cat', 'hat'), 12)
0.777777777778
>>> round(sim_jaro_winkler('Niall', 'Neil'), 12)
0.805
>>> round(sim_jaro_winkler('aluminum', 'Catalan'), 12)
0.60119047619
>>> round(sim_jaro_winkler('ATCG', 'TAGC'), 12)
0.833333333333
>>> round(sim_jaro_winkler('cat', 'hat', mode='jaro'), 12)
0.777777777778
>>> round(sim_jaro_winkler('Niall', 'Neil', mode='jaro'), 12)
0.783333333333
>>> round(sim_jaro_winkler('aluminum', 'Catalan', mode='jaro'), 12)
0.60119047619
>>> round(sim_jaro_winkler('ATCG', 'TAGC', mode='jaro'), 12)
0.833333333333
"""
return JaroWinkler().sim(
src, tar, qval, mode, long_strings, boost_threshold, scaling_factor
) | [
"def",
"sim_jaro_winkler",
"(",
"src",
",",
"tar",
",",
"qval",
"=",
"1",
",",
"mode",
"=",
"'winkler'",
",",
"long_strings",
"=",
"False",
",",
"boost_threshold",
"=",
"0.7",
",",
"scaling_factor",
"=",
"0.1",
",",
")",
":",
"return",
"JaroWinkler",
"(",
")",
".",
"sim",
"(",
"src",
",",
"tar",
",",
"qval",
",",
"mode",
",",
"long_strings",
",",
"boost_threshold",
",",
"scaling_factor",
")"
] | 32.6 | 25 |
def get_query_includes(tokenized_terms, search_fields):
"""
Builds a query for included terms in a text search.
"""
query = None
for term in tokenized_terms:
or_query = None
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query | [
"def",
"get_query_includes",
"(",
"tokenized_terms",
",",
"search_fields",
")",
":",
"query",
"=",
"None",
"for",
"term",
"in",
"tokenized_terms",
":",
"or_query",
"=",
"None",
"for",
"field_name",
"in",
"search_fields",
":",
"q",
"=",
"Q",
"(",
"*",
"*",
"{",
"\"%s__icontains\"",
"%",
"field_name",
":",
"term",
"}",
")",
"if",
"or_query",
"is",
"None",
":",
"or_query",
"=",
"q",
"else",
":",
"or_query",
"=",
"or_query",
"|",
"q",
"if",
"query",
"is",
"None",
":",
"query",
"=",
"or_query",
"else",
":",
"query",
"=",
"query",
"&",
"or_query",
"return",
"query"
] | 29.222222 | 12.333333 |
def log1p(
data: Union[AnnData, np.ndarray, spmatrix],
copy: bool = False,
chunked: bool = False,
chunk_size: Optional[int] = None,
) -> Optional[AnnData]:
"""Logarithmize the data matrix.
Computes :math:`X = \\log(X + 1)`, where :math:`log` denotes the natural logarithm.
Parameters
----------
data
The (annotated) data matrix of shape ``n_obs`` × ``n_vars``.
Rows correspond to cells and columns to genes.
copy
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
chunked
Process the data matrix in chunks, which will save memory.
Applies only to :class:`~anndata.AnnData`.
chunk_size
``n_obs`` of the chunks to process the data in.
Returns
-------
Returns or updates ``data``, depending on ``copy``.
"""
if copy:
if not isinstance(data, AnnData):
data = data.astype(np.floating)
else:
data = data.copy()
elif not isinstance(data, AnnData) and np.issubdtype(data.dtype, np.integer):
raise TypeError("Cannot perform inplace log1p on integer array")
def _log1p(X):
if issparse(X):
np.log1p(X.data, out=X.data)
else:
np.log1p(X, out=X)
return X
if isinstance(data, AnnData):
if not np.issubdtype(data.X.dtype, np.floating):
data.X = data.X.astype(np.float32)
if chunked:
for chunk, start, end in data.chunked_X(chunk_size):
data.X[start:end] = _log1p(chunk)
else:
_log1p(data.X)
else:
_log1p(data)
return data if copy else None | [
"def",
"log1p",
"(",
"data",
":",
"Union",
"[",
"AnnData",
",",
"np",
".",
"ndarray",
",",
"spmatrix",
"]",
",",
"copy",
":",
"bool",
"=",
"False",
",",
"chunked",
":",
"bool",
"=",
"False",
",",
"chunk_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
")",
"->",
"Optional",
"[",
"AnnData",
"]",
":",
"if",
"copy",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"AnnData",
")",
":",
"data",
"=",
"data",
".",
"astype",
"(",
"np",
".",
"floating",
")",
"else",
":",
"data",
"=",
"data",
".",
"copy",
"(",
")",
"elif",
"not",
"isinstance",
"(",
"data",
",",
"AnnData",
")",
"and",
"np",
".",
"issubdtype",
"(",
"data",
".",
"dtype",
",",
"np",
".",
"integer",
")",
":",
"raise",
"TypeError",
"(",
"\"Cannot perform inplace log1p on integer array\"",
")",
"def",
"_log1p",
"(",
"X",
")",
":",
"if",
"issparse",
"(",
"X",
")",
":",
"np",
".",
"log1p",
"(",
"X",
".",
"data",
",",
"out",
"=",
"X",
".",
"data",
")",
"else",
":",
"np",
".",
"log1p",
"(",
"X",
",",
"out",
"=",
"X",
")",
"return",
"X",
"if",
"isinstance",
"(",
"data",
",",
"AnnData",
")",
":",
"if",
"not",
"np",
".",
"issubdtype",
"(",
"data",
".",
"X",
".",
"dtype",
",",
"np",
".",
"floating",
")",
":",
"data",
".",
"X",
"=",
"data",
".",
"X",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"if",
"chunked",
":",
"for",
"chunk",
",",
"start",
",",
"end",
"in",
"data",
".",
"chunked_X",
"(",
"chunk_size",
")",
":",
"data",
".",
"X",
"[",
"start",
":",
"end",
"]",
"=",
"_log1p",
"(",
"chunk",
")",
"else",
":",
"_log1p",
"(",
"data",
".",
"X",
")",
"else",
":",
"_log1p",
"(",
"data",
")",
"return",
"data",
"if",
"copy",
"else",
"None"
] | 29.672727 | 21.690909 |
def curriculum2schedule(curriculum, first_day, compress=False, time_table=None):
"""
将课程表转换为上课时间表, 如果 compress=False 结果是未排序的, 否则为压缩并排序后的上课时间表
:param curriculum: 课表
:param first_day: 第一周周一, 如 datetime.datetime(2016, 8, 29)
:param compress: 压缩连续的课时为一个
:param time_table: 每天上课的时间表, 形如 ``((start timedelta, end timedelta), ...)`` 的 11 × 2 的矩阵
:return: [(datetime.datetime, str) ...]
"""
schedule = []
time_table = time_table or (
(timedelta(hours=8), timedelta(hours=8, minutes=50)),
(timedelta(hours=9), timedelta(hours=9, minutes=50)),
(timedelta(hours=10, minutes=10), timedelta(hours=11)),
(timedelta(hours=11, minutes=10), timedelta(hours=12)),
(timedelta(hours=14), timedelta(hours=14, minutes=50)),
(timedelta(hours=15), timedelta(hours=15, minutes=50)),
(timedelta(hours=16), timedelta(hours=16, minutes=50)),
(timedelta(hours=17), timedelta(hours=17, minutes=50)),
(timedelta(hours=19), timedelta(hours=19, minutes=50)),
(timedelta(hours=19, minutes=50), timedelta(hours=20, minutes=40)),
(timedelta(hours=20, minutes=40), timedelta(hours=21, minutes=30))
)
for i, d in enumerate(curriculum):
for j, cs in enumerate(d):
for c in cs or []:
course = '{name}[{place}]'.format(name=c['课程名称'], place=c['课程地点'])
for week in c['上课周数']:
day = first_day + timedelta(weeks=week - 1, days=i)
start, end = time_table[j]
item = (week, day + start, day + end, course)
schedule.append(item)
schedule.sort()
if compress:
new_schedule = [schedule[0]]
for i in range(1, len(schedule)):
sch = schedule[i]
# 同一天的连续课程
if new_schedule[-1][1].date() == sch[1].date() and new_schedule[-1][3] == sch[3]:
# 更新结束时间
old_item = new_schedule.pop()
# week, start, end, course
new_item = (old_item[0], old_item[1], sch[2], old_item[3])
else:
new_item = sch
new_schedule.append(new_item)
return new_schedule
return schedule | [
"def",
"curriculum2schedule",
"(",
"curriculum",
",",
"first_day",
",",
"compress",
"=",
"False",
",",
"time_table",
"=",
"None",
")",
":",
"schedule",
"=",
"[",
"]",
"time_table",
"=",
"time_table",
"or",
"(",
"(",
"timedelta",
"(",
"hours",
"=",
"8",
")",
",",
"timedelta",
"(",
"hours",
"=",
"8",
",",
"minutes",
"=",
"50",
")",
")",
",",
"(",
"timedelta",
"(",
"hours",
"=",
"9",
")",
",",
"timedelta",
"(",
"hours",
"=",
"9",
",",
"minutes",
"=",
"50",
")",
")",
",",
"(",
"timedelta",
"(",
"hours",
"=",
"10",
",",
"minutes",
"=",
"10",
")",
",",
"timedelta",
"(",
"hours",
"=",
"11",
")",
")",
",",
"(",
"timedelta",
"(",
"hours",
"=",
"11",
",",
"minutes",
"=",
"10",
")",
",",
"timedelta",
"(",
"hours",
"=",
"12",
")",
")",
",",
"(",
"timedelta",
"(",
"hours",
"=",
"14",
")",
",",
"timedelta",
"(",
"hours",
"=",
"14",
",",
"minutes",
"=",
"50",
")",
")",
",",
"(",
"timedelta",
"(",
"hours",
"=",
"15",
")",
",",
"timedelta",
"(",
"hours",
"=",
"15",
",",
"minutes",
"=",
"50",
")",
")",
",",
"(",
"timedelta",
"(",
"hours",
"=",
"16",
")",
",",
"timedelta",
"(",
"hours",
"=",
"16",
",",
"minutes",
"=",
"50",
")",
")",
",",
"(",
"timedelta",
"(",
"hours",
"=",
"17",
")",
",",
"timedelta",
"(",
"hours",
"=",
"17",
",",
"minutes",
"=",
"50",
")",
")",
",",
"(",
"timedelta",
"(",
"hours",
"=",
"19",
")",
",",
"timedelta",
"(",
"hours",
"=",
"19",
",",
"minutes",
"=",
"50",
")",
")",
",",
"(",
"timedelta",
"(",
"hours",
"=",
"19",
",",
"minutes",
"=",
"50",
")",
",",
"timedelta",
"(",
"hours",
"=",
"20",
",",
"minutes",
"=",
"40",
")",
")",
",",
"(",
"timedelta",
"(",
"hours",
"=",
"20",
",",
"minutes",
"=",
"40",
")",
",",
"timedelta",
"(",
"hours",
"=",
"21",
",",
"minutes",
"=",
"30",
")",
")",
")",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"curriculum",
")",
":",
"for",
"j",
",",
"cs",
"in",
"enumerate",
"(",
"d",
")",
":",
"for",
"c",
"in",
"cs",
"or",
"[",
"]",
":",
"course",
"=",
"'{name}[{place}]'",
".",
"format",
"(",
"name",
"=",
"c",
"[",
"'课程名称'], place",
"=",
"c",
"'课程地点",
"'",
"]",
")",
"",
"",
"",
"for",
"week",
"in",
"c",
"[",
"'上课周数']:",
"",
"",
"day",
"=",
"first_day",
"+",
"timedelta",
"(",
"weeks",
"=",
"week",
"-",
"1",
",",
"days",
"=",
"i",
")",
"start",
",",
"end",
"=",
"time_table",
"[",
"j",
"]",
"item",
"=",
"(",
"week",
",",
"day",
"+",
"start",
",",
"day",
"+",
"end",
",",
"course",
")",
"schedule",
".",
"append",
"(",
"item",
")",
"schedule",
".",
"sort",
"(",
")",
"if",
"compress",
":",
"new_schedule",
"=",
"[",
"schedule",
"[",
"0",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"schedule",
")",
")",
":",
"sch",
"=",
"schedule",
"[",
"i",
"]",
"# 同一天的连续课程",
"if",
"new_schedule",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
".",
"date",
"(",
")",
"==",
"sch",
"[",
"1",
"]",
".",
"date",
"(",
")",
"and",
"new_schedule",
"[",
"-",
"1",
"]",
"[",
"3",
"]",
"==",
"sch",
"[",
"3",
"]",
":",
"# 更新结束时间",
"old_item",
"=",
"new_schedule",
".",
"pop",
"(",
")",
"# week, start, end, course",
"new_item",
"=",
"(",
"old_item",
"[",
"0",
"]",
",",
"old_item",
"[",
"1",
"]",
",",
"sch",
"[",
"2",
"]",
",",
"old_item",
"[",
"3",
"]",
")",
"else",
":",
"new_item",
"=",
"sch",
"new_schedule",
".",
"append",
"(",
"new_item",
")",
"return",
"new_schedule",
"return",
"schedule"
] | 42.980392 | 19.607843 |
def save_sbml(filename, model, y0=None, volume=1.0, is_valid=True):
"""
Save a model in the SBML format.
Parameters
----------
model : NetworkModel
y0 : dict
Initial condition.
volume : Real or Real3, optional
A size of the simulation volume.
is_valid : bool, optional
Check if the generated model is valid. True as a default.
"""
y0 = y0 or {}
import libsbml
document = export_sbml(model, y0, volume, is_valid)
# with open(filename, 'w') as fout:
# fout.write(libsbml.writeSBMLToString(document))
# writer = libsbml.SBMLWriter()
# writer.writeSBML(document, filename)
libsbml.writeSBML(document, filename) | [
"def",
"save_sbml",
"(",
"filename",
",",
"model",
",",
"y0",
"=",
"None",
",",
"volume",
"=",
"1.0",
",",
"is_valid",
"=",
"True",
")",
":",
"y0",
"=",
"y0",
"or",
"{",
"}",
"import",
"libsbml",
"document",
"=",
"export_sbml",
"(",
"model",
",",
"y0",
",",
"volume",
",",
"is_valid",
")",
"# with open(filename, 'w') as fout:",
"# fout.write(libsbml.writeSBMLToString(document))",
"# writer = libsbml.SBMLWriter()",
"# writer.writeSBML(document, filename)",
"libsbml",
".",
"writeSBML",
"(",
"document",
",",
"filename",
")"
] | 26.230769 | 17.923077 |
def check_for_connection(self):
""" Scan arguments for a `@name` one.
"""
for idx, arg in enumerate(self.args):
if arg.startswith('@'):
if arg[1:] not in config.connections:
self.parser.error("Undefined connection '{}'!".format(arg[1:]))
config.scgi_url = config.connections[arg[1:]]
self.LOG.debug("Switched to connection %s (%s)", arg[1:], config.scgi_url)
del self.args[idx]
break | [
"def",
"check_for_connection",
"(",
"self",
")",
":",
"for",
"idx",
",",
"arg",
"in",
"enumerate",
"(",
"self",
".",
"args",
")",
":",
"if",
"arg",
".",
"startswith",
"(",
"'@'",
")",
":",
"if",
"arg",
"[",
"1",
":",
"]",
"not",
"in",
"config",
".",
"connections",
":",
"self",
".",
"parser",
".",
"error",
"(",
"\"Undefined connection '{}'!\"",
".",
"format",
"(",
"arg",
"[",
"1",
":",
"]",
")",
")",
"config",
".",
"scgi_url",
"=",
"config",
".",
"connections",
"[",
"arg",
"[",
"1",
":",
"]",
"]",
"self",
".",
"LOG",
".",
"debug",
"(",
"\"Switched to connection %s (%s)\"",
",",
"arg",
"[",
"1",
":",
"]",
",",
"config",
".",
"scgi_url",
")",
"del",
"self",
".",
"args",
"[",
"idx",
"]",
"break"
] | 46.272727 | 15.545455 |
def get_code_version_numbers(cp):
"""Will extract the version information from the executables listed in
the executable section of the supplied ConfigParser object.
Returns
--------
dict
A dictionary keyed by the executable name with values giving the
version string for each executable.
"""
code_version_dict = {}
for _, value in cp.items('executables'):
_, exe_name = os.path.split(value)
version_string = None
if value.startswith('gsiftp://') or value.startswith('http://'):
code_version_dict[exe_name] = "Using bundle downloaded from %s" % value
else:
try:
if value.startswith('file://'):
value = value[7:]
version_string = subprocess.check_output([value, '--version'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
version_string = "Executable fails on %s --version" % (value)
except OSError:
version_string = "Executable doesn't seem to exist(!)"
code_version_dict[exe_name] = version_string
return code_version_dict | [
"def",
"get_code_version_numbers",
"(",
"cp",
")",
":",
"code_version_dict",
"=",
"{",
"}",
"for",
"_",
",",
"value",
"in",
"cp",
".",
"items",
"(",
"'executables'",
")",
":",
"_",
",",
"exe_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"value",
")",
"version_string",
"=",
"None",
"if",
"value",
".",
"startswith",
"(",
"'gsiftp://'",
")",
"or",
"value",
".",
"startswith",
"(",
"'http://'",
")",
":",
"code_version_dict",
"[",
"exe_name",
"]",
"=",
"\"Using bundle downloaded from %s\"",
"%",
"value",
"else",
":",
"try",
":",
"if",
"value",
".",
"startswith",
"(",
"'file://'",
")",
":",
"value",
"=",
"value",
"[",
"7",
":",
"]",
"version_string",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"value",
",",
"'--version'",
"]",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"version_string",
"=",
"\"Executable fails on %s --version\"",
"%",
"(",
"value",
")",
"except",
"OSError",
":",
"version_string",
"=",
"\"Executable doesn't seem to exist(!)\"",
"code_version_dict",
"[",
"exe_name",
"]",
"=",
"version_string",
"return",
"code_version_dict"
] | 42.821429 | 19.928571 |
def _get_possible_query_bridging_contigs(self, nucmer_hits, log_fh=None, log_outprefix=None):
'''Input is dict qry_name -> list of nucmer hits to that qry. Returns dict qry_name -> tuple(start hit, end hit)'''
bridges = {}
writing_log_file = None not in [log_fh, log_outprefix]
for qry_name, hits_to_qry in nucmer_hits.items():
if len(hits_to_qry) < 2:
continue
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': checking nucmer matches', sep='', file=log_fh)
longest_start_hit = self._get_longest_hit_at_qry_start(hits_to_qry)
longest_end_hit = self._get_longest_hit_at_qry_end(hits_to_qry)
if (
None in (longest_start_hit, longest_end_hit)
or longest_start_hit.ref_name == longest_end_hit.ref_name
or self._hits_have_same_reference(longest_start_hit, longest_end_hit)
):
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': no potential pairs of hits to merge contigs', sep='', file=log_fh)
continue
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': potential pair of hits to merge contigs...', sep='', file=log_fh)
print(log_outprefix, '\t', qry_name, ': ', longest_start_hit, sep='', file=log_fh)
print(log_outprefix, '\t', qry_name, ': ', longest_end_hit, sep='', file=log_fh)
shortest_hit_length = self._min_qry_hit_length([longest_start_hit, longest_end_hit])
has_longer_hit = self._has_qry_hit_longer_than(
hits_to_qry,
shortest_hit_length,
hits_to_exclude={longest_start_hit, longest_end_hit}
)
if has_longer_hit and writing_log_file:
print(log_outprefix, '\t', qry_name, ': rejected - there is a longer hit to elsewhere', sep='', file=log_fh)
orientation_ok = self._orientation_ok_to_bridge_contigs(longest_start_hit, longest_end_hit)
if writing_log_file and not orientation_ok:
print(log_outprefix, '\t', qry_name, ': rejected - orientation/distance from ends not correct to make a merge', sep='', file=log_fh)
if orientation_ok and not has_longer_hit:
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': might be used - no longer hits elsewhere and orientation/distance to ends OK', sep='', file=log_fh)
bridges[qry_name] = (longest_start_hit, longest_end_hit)
return bridges | [
"def",
"_get_possible_query_bridging_contigs",
"(",
"self",
",",
"nucmer_hits",
",",
"log_fh",
"=",
"None",
",",
"log_outprefix",
"=",
"None",
")",
":",
"bridges",
"=",
"{",
"}",
"writing_log_file",
"=",
"None",
"not",
"in",
"[",
"log_fh",
",",
"log_outprefix",
"]",
"for",
"qry_name",
",",
"hits_to_qry",
"in",
"nucmer_hits",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"hits_to_qry",
")",
"<",
"2",
":",
"continue",
"if",
"writing_log_file",
":",
"print",
"(",
"log_outprefix",
",",
"'\\t'",
",",
"qry_name",
",",
"': checking nucmer matches'",
",",
"sep",
"=",
"''",
",",
"file",
"=",
"log_fh",
")",
"longest_start_hit",
"=",
"self",
".",
"_get_longest_hit_at_qry_start",
"(",
"hits_to_qry",
")",
"longest_end_hit",
"=",
"self",
".",
"_get_longest_hit_at_qry_end",
"(",
"hits_to_qry",
")",
"if",
"(",
"None",
"in",
"(",
"longest_start_hit",
",",
"longest_end_hit",
")",
"or",
"longest_start_hit",
".",
"ref_name",
"==",
"longest_end_hit",
".",
"ref_name",
"or",
"self",
".",
"_hits_have_same_reference",
"(",
"longest_start_hit",
",",
"longest_end_hit",
")",
")",
":",
"if",
"writing_log_file",
":",
"print",
"(",
"log_outprefix",
",",
"'\\t'",
",",
"qry_name",
",",
"': no potential pairs of hits to merge contigs'",
",",
"sep",
"=",
"''",
",",
"file",
"=",
"log_fh",
")",
"continue",
"if",
"writing_log_file",
":",
"print",
"(",
"log_outprefix",
",",
"'\\t'",
",",
"qry_name",
",",
"': potential pair of hits to merge contigs...'",
",",
"sep",
"=",
"''",
",",
"file",
"=",
"log_fh",
")",
"print",
"(",
"log_outprefix",
",",
"'\\t'",
",",
"qry_name",
",",
"': '",
",",
"longest_start_hit",
",",
"sep",
"=",
"''",
",",
"file",
"=",
"log_fh",
")",
"print",
"(",
"log_outprefix",
",",
"'\\t'",
",",
"qry_name",
",",
"': '",
",",
"longest_end_hit",
",",
"sep",
"=",
"''",
",",
"file",
"=",
"log_fh",
")",
"shortest_hit_length",
"=",
"self",
".",
"_min_qry_hit_length",
"(",
"[",
"longest_start_hit",
",",
"longest_end_hit",
"]",
")",
"has_longer_hit",
"=",
"self",
".",
"_has_qry_hit_longer_than",
"(",
"hits_to_qry",
",",
"shortest_hit_length",
",",
"hits_to_exclude",
"=",
"{",
"longest_start_hit",
",",
"longest_end_hit",
"}",
")",
"if",
"has_longer_hit",
"and",
"writing_log_file",
":",
"print",
"(",
"log_outprefix",
",",
"'\\t'",
",",
"qry_name",
",",
"': rejected - there is a longer hit to elsewhere'",
",",
"sep",
"=",
"''",
",",
"file",
"=",
"log_fh",
")",
"orientation_ok",
"=",
"self",
".",
"_orientation_ok_to_bridge_contigs",
"(",
"longest_start_hit",
",",
"longest_end_hit",
")",
"if",
"writing_log_file",
"and",
"not",
"orientation_ok",
":",
"print",
"(",
"log_outprefix",
",",
"'\\t'",
",",
"qry_name",
",",
"': rejected - orientation/distance from ends not correct to make a merge'",
",",
"sep",
"=",
"''",
",",
"file",
"=",
"log_fh",
")",
"if",
"orientation_ok",
"and",
"not",
"has_longer_hit",
":",
"if",
"writing_log_file",
":",
"print",
"(",
"log_outprefix",
",",
"'\\t'",
",",
"qry_name",
",",
"': might be used - no longer hits elsewhere and orientation/distance to ends OK'",
",",
"sep",
"=",
"''",
",",
"file",
"=",
"log_fh",
")",
"bridges",
"[",
"qry_name",
"]",
"=",
"(",
"longest_start_hit",
",",
"longest_end_hit",
")",
"return",
"bridges"
] | 52.36 | 37.6 |
def jtag_send(self, tms, tdi, num_bits):
"""Sends data via JTAG.
Sends data via JTAG on the rising clock edge, TCK. At on each rising
clock edge, on bit is transferred in from TDI and out to TDO. The
clock uses the TMS to step through the standard JTAG state machine.
Args:
self (JLink): the ``JLink`` instance
tms (int): used to determine the state transitions for the Test
Access Port (TAP) controller from its current state
tdi (int): input data to be transferred in from TDI to TDO
num_bits (int): a number in the range ``[1, 32]`` inclusively
specifying the number of meaningful bits in the ``tms`` and
``tdi`` parameters for the purpose of extracting state and data
information
Returns:
``None``
Raises:
ValueError: if ``num_bits < 1`` or ``num_bits > 32``.
See Also:
`JTAG Technical Overview <https://www.xjtag.com/about-jtag/jtag-a-technical-overview>`_.
"""
if not util.is_natural(num_bits) or num_bits <= 0 or num_bits > 32:
raise ValueError('Number of bits must be >= 1 and <= 32.')
self._dll.JLINKARM_StoreBits(tms, tdi, num_bits)
return None | [
"def",
"jtag_send",
"(",
"self",
",",
"tms",
",",
"tdi",
",",
"num_bits",
")",
":",
"if",
"not",
"util",
".",
"is_natural",
"(",
"num_bits",
")",
"or",
"num_bits",
"<=",
"0",
"or",
"num_bits",
">",
"32",
":",
"raise",
"ValueError",
"(",
"'Number of bits must be >= 1 and <= 32.'",
")",
"self",
".",
"_dll",
".",
"JLINKARM_StoreBits",
"(",
"tms",
",",
"tdi",
",",
"num_bits",
")",
"return",
"None"
] | 41.933333 | 27.133333 |
def parse_source_file(source_file):
"""Parses a source file thing and returns the file name
Example:
>>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06')
'js/src/jit/MIR.h'
:arg str source_file: the source file ("file") from a stack frame
:returns: the filename or ``None`` if it couldn't determine one
"""
if not source_file:
return None
vcsinfo = source_file.split(':')
if len(vcsinfo) == 4:
# These are repositories or cloud file systems (e.g. hg, git, s3)
vcstype, root, vcs_source_file, revision = vcsinfo
return vcs_source_file
if len(vcsinfo) == 2:
# These are directories on someone's Windows computer and vcstype is a
# file system (e.g. "c:", "d:", "f:")
vcstype, vcs_source_file = vcsinfo
return vcs_source_file
if source_file.startswith('/'):
# These are directories on OSX or Linux
return source_file
# We have no idea what this is, so return None
return None | [
"def",
"parse_source_file",
"(",
"source_file",
")",
":",
"if",
"not",
"source_file",
":",
"return",
"None",
"vcsinfo",
"=",
"source_file",
".",
"split",
"(",
"':'",
")",
"if",
"len",
"(",
"vcsinfo",
")",
"==",
"4",
":",
"# These are repositories or cloud file systems (e.g. hg, git, s3)",
"vcstype",
",",
"root",
",",
"vcs_source_file",
",",
"revision",
"=",
"vcsinfo",
"return",
"vcs_source_file",
"if",
"len",
"(",
"vcsinfo",
")",
"==",
"2",
":",
"# These are directories on someone's Windows computer and vcstype is a",
"# file system (e.g. \"c:\", \"d:\", \"f:\")",
"vcstype",
",",
"vcs_source_file",
"=",
"vcsinfo",
"return",
"vcs_source_file",
"if",
"source_file",
".",
"startswith",
"(",
"'/'",
")",
":",
"# These are directories on OSX or Linux",
"return",
"source_file",
"# We have no idea what this is, so return None",
"return",
"None"
] | 30 | 22.588235 |
def encode_access_token(identity, secret, algorithm, expires_delta, fresh,
user_claims, csrf, identity_claim_key, user_claims_key,
json_encoder=None):
"""
Creates a new encoded (utf-8) access token.
:param identity: Identifier for who this token is for (ex, username). This
data must be json serializable
:param secret: Secret key to encode the JWT with
:param algorithm: Which algorithm to encode this JWT with
:param expires_delta: How far in the future this token should expire
(set to False to disable expiration)
:type expires_delta: datetime.timedelta or False
:param fresh: If this should be a 'fresh' token or not. If a
datetime.timedelta is given this will indicate how long this
token will remain fresh.
:param user_claims: Custom claims to include in this token. This data must
be json serializable
:param csrf: Whether to include a csrf double submit claim in this token
(boolean)
:param identity_claim_key: Which key should be used to store the identity
:param user_claims_key: Which key should be used to store the user claims
:return: Encoded access token
"""
if isinstance(fresh, datetime.timedelta):
now = datetime.datetime.utcnow()
fresh = timegm((now + fresh).utctimetuple())
token_data = {
identity_claim_key: identity,
'fresh': fresh,
'type': 'access',
}
# Don't add extra data to the token if user_claims is empty.
if user_claims:
token_data[user_claims_key] = user_claims
if csrf:
token_data['csrf'] = _create_csrf_token()
return _encode_jwt(token_data, expires_delta, secret, algorithm,
json_encoder=json_encoder) | [
"def",
"encode_access_token",
"(",
"identity",
",",
"secret",
",",
"algorithm",
",",
"expires_delta",
",",
"fresh",
",",
"user_claims",
",",
"csrf",
",",
"identity_claim_key",
",",
"user_claims_key",
",",
"json_encoder",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"fresh",
",",
"datetime",
".",
"timedelta",
")",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"fresh",
"=",
"timegm",
"(",
"(",
"now",
"+",
"fresh",
")",
".",
"utctimetuple",
"(",
")",
")",
"token_data",
"=",
"{",
"identity_claim_key",
":",
"identity",
",",
"'fresh'",
":",
"fresh",
",",
"'type'",
":",
"'access'",
",",
"}",
"# Don't add extra data to the token if user_claims is empty.",
"if",
"user_claims",
":",
"token_data",
"[",
"user_claims_key",
"]",
"=",
"user_claims",
"if",
"csrf",
":",
"token_data",
"[",
"'csrf'",
"]",
"=",
"_create_csrf_token",
"(",
")",
"return",
"_encode_jwt",
"(",
"token_data",
",",
"expires_delta",
",",
"secret",
",",
"algorithm",
",",
"json_encoder",
"=",
"json_encoder",
")"
] | 42.674419 | 21.046512 |
def trigger_all_callbacks(self, callbacks=None):
"""Trigger callbacks for all keys on all or a subset of subscribers.
:param Iterable callbacks: list of callbacks or none for all subscribed
:rtype: Iterable[tornado.concurrent.Future]
"""
return [ret
for key in self
for ret in self.trigger_callbacks(key, callbacks=None)] | [
"def",
"trigger_all_callbacks",
"(",
"self",
",",
"callbacks",
"=",
"None",
")",
":",
"return",
"[",
"ret",
"for",
"key",
"in",
"self",
"for",
"ret",
"in",
"self",
".",
"trigger_callbacks",
"(",
"key",
",",
"callbacks",
"=",
"None",
")",
"]"
] | 42.888889 | 17.666667 |
def _get_content(context, page, content_type, lang, fallback=True):
"""Helper function used by ``PlaceholderNode``."""
if not page:
return ''
if not lang and 'lang' in context:
lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE)
page = get_page_from_string_or_id(page, lang)
if not page:
return ''
content = Content.objects.get_content(page, lang, content_type, fallback)
return content | [
"def",
"_get_content",
"(",
"context",
",",
"page",
",",
"content_type",
",",
"lang",
",",
"fallback",
"=",
"True",
")",
":",
"if",
"not",
"page",
":",
"return",
"''",
"if",
"not",
"lang",
"and",
"'lang'",
"in",
"context",
":",
"lang",
"=",
"context",
".",
"get",
"(",
"'lang'",
",",
"pages_settings",
".",
"PAGE_DEFAULT_LANGUAGE",
")",
"page",
"=",
"get_page_from_string_or_id",
"(",
"page",
",",
"lang",
")",
"if",
"not",
"page",
":",
"return",
"''",
"content",
"=",
"Content",
".",
"objects",
".",
"get_content",
"(",
"page",
",",
"lang",
",",
"content_type",
",",
"fallback",
")",
"return",
"content"
] | 29.4 | 25.533333 |
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary."""
return cls(**{
'initializers': [tf.compat.v2.initializers.deserialize(init)
for init in config.get('initializers', [])],
'sizes': config.get('sizes', []),
'validate_args': config.get('validate_args', False),
}) | [
"def",
"from_config",
"(",
"cls",
",",
"config",
")",
":",
"return",
"cls",
"(",
"*",
"*",
"{",
"'initializers'",
":",
"[",
"tf",
".",
"compat",
".",
"v2",
".",
"initializers",
".",
"deserialize",
"(",
"init",
")",
"for",
"init",
"in",
"config",
".",
"get",
"(",
"'initializers'",
",",
"[",
"]",
")",
"]",
",",
"'sizes'",
":",
"config",
".",
"get",
"(",
"'sizes'",
",",
"[",
"]",
")",
",",
"'validate_args'",
":",
"config",
".",
"get",
"(",
"'validate_args'",
",",
"False",
")",
",",
"}",
")"
] | 45.125 | 18.125 |
def save_state(self, state):
"""Save a state doc to the LRS
:param state: State document to be saved
:type state: :class:`tincan.documents.state_document.StateDocument`
:return: LRS Response object with saved state as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
"""
request = HTTPRequest(
method="PUT",
resource="activities/state",
content=state.content,
)
if state.content_type is not None:
request.headers["Content-Type"] = state.content_type
else:
request.headers["Content-Type"] = "application/octet-stream"
if state.etag is not None:
request.headers["If-Match"] = state.etag
request.query_params = {
"stateId": state.id,
"activityId": state.activity.id,
"agent": state.agent.to_json(self.version)
}
lrs_response = self._send_request(request)
lrs_response.content = state
return self._send_request(request) | [
"def",
"save_state",
"(",
"self",
",",
"state",
")",
":",
"request",
"=",
"HTTPRequest",
"(",
"method",
"=",
"\"PUT\"",
",",
"resource",
"=",
"\"activities/state\"",
",",
"content",
"=",
"state",
".",
"content",
",",
")",
"if",
"state",
".",
"content_type",
"is",
"not",
"None",
":",
"request",
".",
"headers",
"[",
"\"Content-Type\"",
"]",
"=",
"state",
".",
"content_type",
"else",
":",
"request",
".",
"headers",
"[",
"\"Content-Type\"",
"]",
"=",
"\"application/octet-stream\"",
"if",
"state",
".",
"etag",
"is",
"not",
"None",
":",
"request",
".",
"headers",
"[",
"\"If-Match\"",
"]",
"=",
"state",
".",
"etag",
"request",
".",
"query_params",
"=",
"{",
"\"stateId\"",
":",
"state",
".",
"id",
",",
"\"activityId\"",
":",
"state",
".",
"activity",
".",
"id",
",",
"\"agent\"",
":",
"state",
".",
"agent",
".",
"to_json",
"(",
"self",
".",
"version",
")",
"}",
"lrs_response",
"=",
"self",
".",
"_send_request",
"(",
"request",
")",
"lrs_response",
".",
"content",
"=",
"state",
"return",
"self",
".",
"_send_request",
"(",
"request",
")"
] | 34.466667 | 16.7 |
def get(self, url, **kwargs):
"""Send a GET request to the specified URL.
Method directly wraps around `Session.get` and updates browser
attributes.
<http://docs.python-requests.org/en/master/api/#requests.get>
Args:
url: URL for the new `Request` object.
**kwargs: Optional arguments that `Request` takes.
Returns:
`Response` object of a successful request.
"""
response = self.session.get(url, **kwargs)
self._url = response.url
self._response = response
return response | [
"def",
"get",
"(",
"self",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_url",
"=",
"response",
".",
"url",
"self",
".",
"_response",
"=",
"response",
"return",
"response"
] | 32.333333 | 19.444444 |
def temperature(self) -> Optional[ErrorValue]:
"""Sample temperature"""
try:
return ErrorValue(self._data['Temperature'], self._data.setdefault('TemperatureError', 0.0))
except KeyError:
return None | [
"def",
"temperature",
"(",
"self",
")",
"->",
"Optional",
"[",
"ErrorValue",
"]",
":",
"try",
":",
"return",
"ErrorValue",
"(",
"self",
".",
"_data",
"[",
"'Temperature'",
"]",
",",
"self",
".",
"_data",
".",
"setdefault",
"(",
"'TemperatureError'",
",",
"0.0",
")",
")",
"except",
"KeyError",
":",
"return",
"None"
] | 40.166667 | 21.833333 |
def to_dict(self):
"""Return all the details of this MLPipeline in a dict.
The dict structure contains all the `__init__` arguments of the
MLPipeline, as well as the current hyperparameter values and the
specification of the tunable_hyperparameters::
{
"primitives": [
"a_primitive",
"another_primitive"
],
"init_params": {
"a_primitive": {
"an_argument": "a_value"
}
},
"hyperparameters": {
"a_primitive#1": {
"an_argument": "a_value",
"another_argument": "another_value",
},
"another_primitive#1": {
"yet_another_argument": "yet_another_value"
}
},
"tunable_hyperparameters": {
"another_primitive#1": {
"yet_another_argument": {
"type": "str",
"default": "a_default_value",
"values": [
"a_default_value",
"yet_another_value"
]
}
}
}
}
"""
return {
'primitives': self.primitives,
'init_params': self.init_params,
'input_names': self.input_names,
'output_names': self.output_names,
'hyperparameters': self.get_hyperparameters(),
'tunable_hyperparameters': self._tunable_hyperparameters
} | [
"def",
"to_dict",
"(",
"self",
")",
":",
"return",
"{",
"'primitives'",
":",
"self",
".",
"primitives",
",",
"'init_params'",
":",
"self",
".",
"init_params",
",",
"'input_names'",
":",
"self",
".",
"input_names",
",",
"'output_names'",
":",
"self",
".",
"output_names",
",",
"'hyperparameters'",
":",
"self",
".",
"get_hyperparameters",
"(",
")",
",",
"'tunable_hyperparameters'",
":",
"self",
".",
"_tunable_hyperparameters",
"}"
] | 36.3125 | 14.5625 |
def init_from_file(cls,
catalog,
name=None,
path=None,
clean=False,
merge=True,
pop_schema=True,
ignore_keys=[],
compare_to_existing=True,
try_gzip=False,
filter_on={}):
"""Construct a new `Entry` instance from an input file.
The input file can be given explicitly by `path`, or a path will
be constructed appropriately if possible.
Arguments
---------
catalog : `astrocats.catalog.catalog.Catalog` instance
The parent catalog object of which this entry belongs.
name : str or 'None'
The name of this entry, e.g. `SN1987A` for a `Supernova` entry.
If no `path` is given, a path is constructed by trying to find
a file in one of the 'output' repositories with this `name`.
note: either `name` or `path` must be provided.
path : str or 'None'
The absolutely path of the input file.
note: either `name` or `path` must be provided.
clean : bool
Whether special sanitization processing should be done on the input
data. This is mostly for input files from the 'internal'
repositories.
"""
if not catalog:
from astrocats.catalog.catalog import Catalog
log = logging.getLogger()
catalog = Catalog(None, log)
catalog.log.debug("init_from_file()")
if name is None and path is None:
err = ("Either entry `name` or `path` must be specified to load "
"entry.")
log.error(err)
raise ValueError(err)
# If the path is given, use that to load from
load_path = ''
if path is not None:
load_path = path
name = ''
# If the name is given, try to find a path for it
else:
repo_paths = catalog.PATHS.get_repo_output_folders()
for rep in repo_paths:
filename = cls.get_filename(name)
newpath = os.path.join(rep, filename + '.json')
if os.path.isfile(newpath):
load_path = newpath
break
if load_path is None or not os.path.isfile(load_path):
# FIX: is this warning worthy?
return None
# Create a new `Entry` instance
new_entry = cls(catalog, name)
# Check if .gz file
if try_gzip and not load_path.endswith('.gz'):
try_gzip = False
# Fill it with data from json file
new_entry._load_data_from_json(
load_path,
clean=clean,
merge=merge,
pop_schema=pop_schema,
ignore_keys=ignore_keys,
compare_to_existing=compare_to_existing,
gzip=try_gzip,
filter_on=filter_on)
return new_entry | [
"def",
"init_from_file",
"(",
"cls",
",",
"catalog",
",",
"name",
"=",
"None",
",",
"path",
"=",
"None",
",",
"clean",
"=",
"False",
",",
"merge",
"=",
"True",
",",
"pop_schema",
"=",
"True",
",",
"ignore_keys",
"=",
"[",
"]",
",",
"compare_to_existing",
"=",
"True",
",",
"try_gzip",
"=",
"False",
",",
"filter_on",
"=",
"{",
"}",
")",
":",
"if",
"not",
"catalog",
":",
"from",
"astrocats",
".",
"catalog",
".",
"catalog",
"import",
"Catalog",
"log",
"=",
"logging",
".",
"getLogger",
"(",
")",
"catalog",
"=",
"Catalog",
"(",
"None",
",",
"log",
")",
"catalog",
".",
"log",
".",
"debug",
"(",
"\"init_from_file()\"",
")",
"if",
"name",
"is",
"None",
"and",
"path",
"is",
"None",
":",
"err",
"=",
"(",
"\"Either entry `name` or `path` must be specified to load \"",
"\"entry.\"",
")",
"log",
".",
"error",
"(",
"err",
")",
"raise",
"ValueError",
"(",
"err",
")",
"# If the path is given, use that to load from",
"load_path",
"=",
"''",
"if",
"path",
"is",
"not",
"None",
":",
"load_path",
"=",
"path",
"name",
"=",
"''",
"# If the name is given, try to find a path for it",
"else",
":",
"repo_paths",
"=",
"catalog",
".",
"PATHS",
".",
"get_repo_output_folders",
"(",
")",
"for",
"rep",
"in",
"repo_paths",
":",
"filename",
"=",
"cls",
".",
"get_filename",
"(",
"name",
")",
"newpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"rep",
",",
"filename",
"+",
"'.json'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"newpath",
")",
":",
"load_path",
"=",
"newpath",
"break",
"if",
"load_path",
"is",
"None",
"or",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"load_path",
")",
":",
"# FIX: is this warning worthy?",
"return",
"None",
"# Create a new `Entry` instance",
"new_entry",
"=",
"cls",
"(",
"catalog",
",",
"name",
")",
"# Check if .gz file",
"if",
"try_gzip",
"and",
"not",
"load_path",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"try_gzip",
"=",
"False",
"# Fill it with data from json file",
"new_entry",
".",
"_load_data_from_json",
"(",
"load_path",
",",
"clean",
"=",
"clean",
",",
"merge",
"=",
"merge",
",",
"pop_schema",
"=",
"pop_schema",
",",
"ignore_keys",
"=",
"ignore_keys",
",",
"compare_to_existing",
"=",
"compare_to_existing",
",",
"gzip",
"=",
"try_gzip",
",",
"filter_on",
"=",
"filter_on",
")",
"return",
"new_entry"
] | 35.690476 | 16.5 |
def save_model(self, model, meta_data=None, index_fields=None):
"""
model (instance): Model instance.
meta (dict): JSON serializable meta data for logging of save operation.
{'lorem': 'ipsum', 'dolar': 5}
index_fields (list): Tuple list for indexing keys in riak (with 'bin' or 'int').
[('lorem','bin'),('dolar','int')]
:return:
"""
# if model:
# self._model = model
if settings.DEBUG:
t1 = time.time()
clean_value = model.clean_value()
model._data = clean_value
if settings.DEBUG:
t2 = time.time()
if not model.exist:
obj = self.bucket.new(data=clean_value).store()
model.key = obj.key
new_obj = True
else:
new_obj = False
obj = self.bucket.get(model.key)
obj.data = clean_value
obj.store()
if settings.ENABLE_VERSIONS:
version_key = self._write_version(clean_value, model)
else:
version_key = ''
if settings.ENABLE_CACHING:
self.set_to_cache((clean_value, model.key))
meta_data = meta_data or model.save_meta_data
if settings.ENABLE_ACTIVITY_LOGGING and meta_data:
self._write_log(version_key, meta_data, index_fields)
if self.COLLECT_SAVES and self.COLLECT_SAVES_FOR_MODEL == model.__class__.__name__:
self.block_saved_keys.append(obj.key)
if settings.DEBUG:
if new_obj:
sys.PYOKO_STAT_COUNTER['save'] += 1
sys.PYOKO_LOGS['new'].append(obj.key)
else:
sys.PYOKO_LOGS[self._model_class.__name__].append(obj.key)
sys.PYOKO_STAT_COUNTER['update'] += 1
# sys._debug_db_queries.append({
# 'TIMESTAMP': t1,
# 'KEY': obj.key,
# 'BUCKET': self.index_name,
# 'SAVE_IS_NEW': new_obj,
# 'SERIALIZATION_TIME': round(t2 - t1, 5),
# 'TIME': round(time.time() - t2, 5)
# })
return model | [
"def",
"save_model",
"(",
"self",
",",
"model",
",",
"meta_data",
"=",
"None",
",",
"index_fields",
"=",
"None",
")",
":",
"# if model:",
"# self._model = model",
"if",
"settings",
".",
"DEBUG",
":",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"clean_value",
"=",
"model",
".",
"clean_value",
"(",
")",
"model",
".",
"_data",
"=",
"clean_value",
"if",
"settings",
".",
"DEBUG",
":",
"t2",
"=",
"time",
".",
"time",
"(",
")",
"if",
"not",
"model",
".",
"exist",
":",
"obj",
"=",
"self",
".",
"bucket",
".",
"new",
"(",
"data",
"=",
"clean_value",
")",
".",
"store",
"(",
")",
"model",
".",
"key",
"=",
"obj",
".",
"key",
"new_obj",
"=",
"True",
"else",
":",
"new_obj",
"=",
"False",
"obj",
"=",
"self",
".",
"bucket",
".",
"get",
"(",
"model",
".",
"key",
")",
"obj",
".",
"data",
"=",
"clean_value",
"obj",
".",
"store",
"(",
")",
"if",
"settings",
".",
"ENABLE_VERSIONS",
":",
"version_key",
"=",
"self",
".",
"_write_version",
"(",
"clean_value",
",",
"model",
")",
"else",
":",
"version_key",
"=",
"''",
"if",
"settings",
".",
"ENABLE_CACHING",
":",
"self",
".",
"set_to_cache",
"(",
"(",
"clean_value",
",",
"model",
".",
"key",
")",
")",
"meta_data",
"=",
"meta_data",
"or",
"model",
".",
"save_meta_data",
"if",
"settings",
".",
"ENABLE_ACTIVITY_LOGGING",
"and",
"meta_data",
":",
"self",
".",
"_write_log",
"(",
"version_key",
",",
"meta_data",
",",
"index_fields",
")",
"if",
"self",
".",
"COLLECT_SAVES",
"and",
"self",
".",
"COLLECT_SAVES_FOR_MODEL",
"==",
"model",
".",
"__class__",
".",
"__name__",
":",
"self",
".",
"block_saved_keys",
".",
"append",
"(",
"obj",
".",
"key",
")",
"if",
"settings",
".",
"DEBUG",
":",
"if",
"new_obj",
":",
"sys",
".",
"PYOKO_STAT_COUNTER",
"[",
"'save'",
"]",
"+=",
"1",
"sys",
".",
"PYOKO_LOGS",
"[",
"'new'",
"]",
".",
"append",
"(",
"obj",
".",
"key",
")",
"else",
":",
"sys",
".",
"PYOKO_LOGS",
"[",
"self",
".",
"_model_class",
".",
"__name__",
"]",
".",
"append",
"(",
"obj",
".",
"key",
")",
"sys",
".",
"PYOKO_STAT_COUNTER",
"[",
"'update'",
"]",
"+=",
"1",
"# sys._debug_db_queries.append({",
"# 'TIMESTAMP': t1,",
"# 'KEY': obj.key,",
"# 'BUCKET': self.index_name,",
"# 'SAVE_IS_NEW': new_obj,",
"# 'SERIALIZATION_TIME': round(t2 - t1, 5),",
"# 'TIME': round(time.time() - t2, 5)",
"# })",
"return",
"model"
] | 35.881356 | 17.508475 |
def push_script(self, scriptable, script, callback=None):
"""Run the script and add it to the list of threads."""
if script in self.threads:
self.threads[script].finish()
thread = Thread(self.run_script(scriptable, script),
scriptable, callback)
self.new_threads[script] = thread
return thread | [
"def",
"push_script",
"(",
"self",
",",
"scriptable",
",",
"script",
",",
"callback",
"=",
"None",
")",
":",
"if",
"script",
"in",
"self",
".",
"threads",
":",
"self",
".",
"threads",
"[",
"script",
"]",
".",
"finish",
"(",
")",
"thread",
"=",
"Thread",
"(",
"self",
".",
"run_script",
"(",
"scriptable",
",",
"script",
")",
",",
"scriptable",
",",
"callback",
")",
"self",
".",
"new_threads",
"[",
"script",
"]",
"=",
"thread",
"return",
"thread"
] | 47 | 10.375 |
def _aws_get_instance_by_tag(region, name, tag, raw):
"""Get all instances matching a tag."""
client = boto3.session.Session().client('ec2', region)
matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])
instances = []
[[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned
for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]
return instances | [
"def",
"_aws_get_instance_by_tag",
"(",
"region",
",",
"name",
",",
"tag",
",",
"raw",
")",
":",
"client",
"=",
"boto3",
".",
"session",
".",
"Session",
"(",
")",
".",
"client",
"(",
"'ec2'",
",",
"region",
")",
"matching_reservations",
"=",
"client",
".",
"describe_instances",
"(",
"Filters",
"=",
"[",
"{",
"'Name'",
":",
"tag",
",",
"'Values'",
":",
"[",
"name",
"]",
"}",
"]",
")",
".",
"get",
"(",
"'Reservations'",
",",
"[",
"]",
")",
"instances",
"=",
"[",
"]",
"[",
"[",
"instances",
".",
"append",
"(",
"_aws_instance_from_dict",
"(",
"region",
",",
"instance",
",",
"raw",
")",
")",
"# pylint: disable=expression-not-assigned",
"for",
"instance",
"in",
"reservation",
".",
"get",
"(",
"'Instances'",
")",
"]",
"for",
"reservation",
"in",
"matching_reservations",
"if",
"reservation",
"]",
"return",
"instances"
] | 66.625 | 36.75 |
def create(self, path, visibility):
"""
Create a new FunctionVersionInstance
:param unicode path: The path
:param FunctionVersionInstance.Visibility visibility: The visibility
:returns: Newly created FunctionVersionInstance
:rtype: twilio.rest.serverless.v1.service.function.function_version.FunctionVersionInstance
"""
data = values.of({'Path': path, 'Visibility': visibility, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return FunctionVersionInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
function_sid=self._solution['function_sid'],
) | [
"def",
"create",
"(",
"self",
",",
"path",
",",
"visibility",
")",
":",
"data",
"=",
"values",
".",
"of",
"(",
"{",
"'Path'",
":",
"path",
",",
"'Visibility'",
":",
"visibility",
",",
"}",
")",
"payload",
"=",
"self",
".",
"_version",
".",
"create",
"(",
"'POST'",
",",
"self",
".",
"_uri",
",",
"data",
"=",
"data",
",",
")",
"return",
"FunctionVersionInstance",
"(",
"self",
".",
"_version",
",",
"payload",
",",
"service_sid",
"=",
"self",
".",
"_solution",
"[",
"'service_sid'",
"]",
",",
"function_sid",
"=",
"self",
".",
"_solution",
"[",
"'function_sid'",
"]",
",",
")"
] | 31.291667 | 20.625 |
def build_finished(app, exception):
"""
When the build is finished, we copy the javascript files (if specified)
to the build directory (the static folder)
"""
# Skip for non-html or if javascript is not inlined
if not app.env.config.wavedrom_html_jsinline:
return
if app.config.offline_skin_js_path is not None:
copy_static_entry(path.join(app.builder.srcdir, app.config.offline_skin_js_path), path.join(app.builder.outdir, '_static'), app.builder)
if app.config.offline_wavedrom_js_path is not None:
copy_static_entry(path.join(app.builder.srcdir, app.config.offline_wavedrom_js_path), path.join(app.builder.outdir, '_static'), app.builder) | [
"def",
"build_finished",
"(",
"app",
",",
"exception",
")",
":",
"# Skip for non-html or if javascript is not inlined",
"if",
"not",
"app",
".",
"env",
".",
"config",
".",
"wavedrom_html_jsinline",
":",
"return",
"if",
"app",
".",
"config",
".",
"offline_skin_js_path",
"is",
"not",
"None",
":",
"copy_static_entry",
"(",
"path",
".",
"join",
"(",
"app",
".",
"builder",
".",
"srcdir",
",",
"app",
".",
"config",
".",
"offline_skin_js_path",
")",
",",
"path",
".",
"join",
"(",
"app",
".",
"builder",
".",
"outdir",
",",
"'_static'",
")",
",",
"app",
".",
"builder",
")",
"if",
"app",
".",
"config",
".",
"offline_wavedrom_js_path",
"is",
"not",
"None",
":",
"copy_static_entry",
"(",
"path",
".",
"join",
"(",
"app",
".",
"builder",
".",
"srcdir",
",",
"app",
".",
"config",
".",
"offline_wavedrom_js_path",
")",
",",
"path",
".",
"join",
"(",
"app",
".",
"builder",
".",
"outdir",
",",
"'_static'",
")",
",",
"app",
".",
"builder",
")"
] | 52.769231 | 28.769231 |
def symmetric_difference_update(self, other):
# type: (Iterable[Any]) -> _BaseSet
"""
Update the TerminalSet.
Keep elements from self and other, but discard elements that are in both.
:param other: Iterable object with elements to compare with.
:return: Current instance with updated state.
"""
intersect = self.intersection(other)
self.remove(*intersect)
for elem in set(other).difference(intersect):
self.add(elem)
return self | [
"def",
"symmetric_difference_update",
"(",
"self",
",",
"other",
")",
":",
"# type: (Iterable[Any]) -> _BaseSet",
"intersect",
"=",
"self",
".",
"intersection",
"(",
"other",
")",
"self",
".",
"remove",
"(",
"*",
"intersect",
")",
"for",
"elem",
"in",
"set",
"(",
"other",
")",
".",
"difference",
"(",
"intersect",
")",
":",
"self",
".",
"add",
"(",
"elem",
")",
"return",
"self"
] | 39.692308 | 12.307692 |
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal)
) | [
"def",
"is_protected_type",
"(",
"obj",
")",
":",
"return",
"isinstance",
"(",
"obj",
",",
"(",
"types",
".",
"NoneType",
",",
"int",
",",
"long",
",",
"datetime",
".",
"datetime",
",",
"datetime",
".",
"date",
",",
"datetime",
".",
"time",
",",
"float",
",",
"Decimal",
")",
")"
] | 29.333333 | 16.666667 |
def p_finally(self, p):
"""finally : FINALLY block"""
p[0] = self.asttypes.Finally(elements=p[2])
p[0].setpos(p) | [
"def",
"p_finally",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"self",
".",
"asttypes",
".",
"Finally",
"(",
"elements",
"=",
"p",
"[",
"2",
"]",
")",
"p",
"[",
"0",
"]",
".",
"setpos",
"(",
"p",
")"
] | 33.25 | 11.5 |
def clear(self):
"""
Command: 0x03
clear all leds
Data:
[Command]
"""
header = bytearray()
header.append(LightProtocolCommand.Clear)
return self.send(header) | [
"def",
"clear",
"(",
"self",
")",
":",
"header",
"=",
"bytearray",
"(",
")",
"header",
".",
"append",
"(",
"LightProtocolCommand",
".",
"Clear",
")",
"return",
"self",
".",
"send",
"(",
"header",
")"
] | 12.769231 | 22.307692 |
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
logger.info('Starting daemon')
try:
with open(self.pidfile, 'r') as fd:
pid = int(fd.read().strip())
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
pid = self.daemonize()
if pid:
return pid
self.run() | [
"def",
"start",
"(",
"self",
")",
":",
"# Check for a pidfile to see if the daemon already runs",
"logger",
".",
"info",
"(",
"'Starting daemon'",
")",
"try",
":",
"with",
"open",
"(",
"self",
".",
"pidfile",
",",
"'r'",
")",
"as",
"fd",
":",
"pid",
"=",
"int",
"(",
"fd",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
")",
"except",
"IOError",
":",
"pid",
"=",
"None",
"if",
"pid",
":",
"message",
"=",
"\"pidfile %s already exist. Daemon already running?\\n\"",
"sys",
".",
"stderr",
".",
"write",
"(",
"message",
"%",
"self",
".",
"pidfile",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# Start the daemon",
"pid",
"=",
"self",
".",
"daemonize",
"(",
")",
"if",
"pid",
":",
"return",
"pid",
"self",
".",
"run",
"(",
")"
] | 26.681818 | 18.045455 |
def load_module_from_path(i):
"""
Input: {
path - module path
module_code_name - module name
(cfg) - configuration of the module if exists ...
(skip_init) - if 'yes', skip init
(data_uoa) - module UOA (useful when printing error)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
code - python code object
path - full path to the module
cuid - internal UID of the module
}
"""
p=i['path']
n=i['module_code_name']
xcfg=i.get('cfg',None)
# Find module
try:
x=imp.find_module(n, [p])
except ImportError as e: # pragma: no cover
return {'return':1, 'error':'can\'t find module code (path='+p+', name='+n+', err='+format(e)+')'}
ff=x[0]
full_path=x[1]
# Check if code has been already loaded
if full_path in work['cached_module_by_path'] and work['cached_module_by_path_last_modification'][full_path]==os.path.getmtime(full_path):
ff.close()
# Code already loaded
return work['cached_module_by_path'][full_path]
# Check if has dependency on specific CK kernel version
if xcfg!=None:
kd=xcfg.get('min_kernel_dep','')
if kd!='':
rx=check_version({'version':kd})
if rx['return']>0: return rx
ok=rx['ok']
version_str=rx['current_version']
if ok!='yes':
return {'return':1, 'error':'module "'+i.get('data_uoa','')+'" requires minimal CK kernel version '+kd+' while your version is '+version_str}
# Generate uid for the run-time extension of the loaded module
# otherwise modules with the same extension (key.py for example)
# will be reloaded ...
r=gen_uid({})
if r['return']>0: return r
ruid='rt-'+r['data_uid']
try:
c=imp.load_module(ruid, ff, full_path, x[2])
except ImportError as e: # pragma: no cover
return {'return':1, 'error':'can\'t load module code (path='+p+', name='+n+', err='+format(e)+')'}
x[0].close()
# Initialize module with this CK instance
c.ck=sys.modules[__name__]
if xcfg!=None: c.cfg=xcfg
# Initialize module
if i.get('skip_init','')!='yes':
# Check if init function exists
if getattr(c, 'init')!=None:
r=c.init(i)
if r['return']>0: return r
r={'return':0, 'code':c, 'path':full_path, 'cuid':ruid}
# Cache code together with its time of change
work['cached_module_by_path'][full_path]=r
work['cached_module_by_path_last_modification'][full_path]=os.path.getmtime(full_path)
return r | [
"def",
"load_module_from_path",
"(",
"i",
")",
":",
"p",
"=",
"i",
"[",
"'path'",
"]",
"n",
"=",
"i",
"[",
"'module_code_name'",
"]",
"xcfg",
"=",
"i",
".",
"get",
"(",
"'cfg'",
",",
"None",
")",
"# Find module",
"try",
":",
"x",
"=",
"imp",
".",
"find_module",
"(",
"n",
",",
"[",
"p",
"]",
")",
"except",
"ImportError",
"as",
"e",
":",
"# pragma: no cover",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'can\\'t find module code (path='",
"+",
"p",
"+",
"', name='",
"+",
"n",
"+",
"', err='",
"+",
"format",
"(",
"e",
")",
"+",
"')'",
"}",
"ff",
"=",
"x",
"[",
"0",
"]",
"full_path",
"=",
"x",
"[",
"1",
"]",
"# Check if code has been already loaded",
"if",
"full_path",
"in",
"work",
"[",
"'cached_module_by_path'",
"]",
"and",
"work",
"[",
"'cached_module_by_path_last_modification'",
"]",
"[",
"full_path",
"]",
"==",
"os",
".",
"path",
".",
"getmtime",
"(",
"full_path",
")",
":",
"ff",
".",
"close",
"(",
")",
"# Code already loaded ",
"return",
"work",
"[",
"'cached_module_by_path'",
"]",
"[",
"full_path",
"]",
"# Check if has dependency on specific CK kernel version",
"if",
"xcfg",
"!=",
"None",
":",
"kd",
"=",
"xcfg",
".",
"get",
"(",
"'min_kernel_dep'",
",",
"''",
")",
"if",
"kd",
"!=",
"''",
":",
"rx",
"=",
"check_version",
"(",
"{",
"'version'",
":",
"kd",
"}",
")",
"if",
"rx",
"[",
"'return'",
"]",
">",
"0",
":",
"return",
"rx",
"ok",
"=",
"rx",
"[",
"'ok'",
"]",
"version_str",
"=",
"rx",
"[",
"'current_version'",
"]",
"if",
"ok",
"!=",
"'yes'",
":",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'module \"'",
"+",
"i",
".",
"get",
"(",
"'data_uoa'",
",",
"''",
")",
"+",
"'\" requires minimal CK kernel version '",
"+",
"kd",
"+",
"' while your version is '",
"+",
"version_str",
"}",
"# Generate uid for the run-time extension of the loaded module ",
"# otherwise modules with the same extension (key.py for example) ",
"# will be reloaded ...",
"r",
"=",
"gen_uid",
"(",
"{",
"}",
")",
"if",
"r",
"[",
"'return'",
"]",
">",
"0",
":",
"return",
"r",
"ruid",
"=",
"'rt-'",
"+",
"r",
"[",
"'data_uid'",
"]",
"try",
":",
"c",
"=",
"imp",
".",
"load_module",
"(",
"ruid",
",",
"ff",
",",
"full_path",
",",
"x",
"[",
"2",
"]",
")",
"except",
"ImportError",
"as",
"e",
":",
"# pragma: no cover",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'can\\'t load module code (path='",
"+",
"p",
"+",
"', name='",
"+",
"n",
"+",
"', err='",
"+",
"format",
"(",
"e",
")",
"+",
"')'",
"}",
"x",
"[",
"0",
"]",
".",
"close",
"(",
")",
"# Initialize module with this CK instance ",
"c",
".",
"ck",
"=",
"sys",
".",
"modules",
"[",
"__name__",
"]",
"if",
"xcfg",
"!=",
"None",
":",
"c",
".",
"cfg",
"=",
"xcfg",
"# Initialize module",
"if",
"i",
".",
"get",
"(",
"'skip_init'",
",",
"''",
")",
"!=",
"'yes'",
":",
"# Check if init function exists",
"if",
"getattr",
"(",
"c",
",",
"'init'",
")",
"!=",
"None",
":",
"r",
"=",
"c",
".",
"init",
"(",
"i",
")",
"if",
"r",
"[",
"'return'",
"]",
">",
"0",
":",
"return",
"r",
"r",
"=",
"{",
"'return'",
":",
"0",
",",
"'code'",
":",
"c",
",",
"'path'",
":",
"full_path",
",",
"'cuid'",
":",
"ruid",
"}",
"# Cache code together with its time of change",
"work",
"[",
"'cached_module_by_path'",
"]",
"[",
"full_path",
"]",
"=",
"r",
"work",
"[",
"'cached_module_by_path_last_modification'",
"]",
"[",
"full_path",
"]",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"full_path",
")",
"return",
"r"
] | 31.758621 | 24.195402 |
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
fmts = ['%Y-%m-%d', '%m-%d-%Y', '%m-%d-%y',
'%m/%d/%Y', '%m/%d/%y', '%Y/%m/%d']
# Get date/time object from newdate
dt_obj = _try_parse_datetime(newdate, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(years=dt_obj.year,
months=dt_obj.month,
days=dt_obj.day) | [
"def",
"set_system_date",
"(",
"newdate",
")",
":",
"fmts",
"=",
"[",
"'%Y-%m-%d'",
",",
"'%m-%d-%Y'",
",",
"'%m-%d-%y'",
",",
"'%m/%d/%Y'",
",",
"'%m/%d/%y'",
",",
"'%Y/%m/%d'",
"]",
"# Get date/time object from newdate",
"dt_obj",
"=",
"_try_parse_datetime",
"(",
"newdate",
",",
"fmts",
")",
"if",
"dt_obj",
"is",
"None",
":",
"return",
"False",
"# Set time using set_system_date_time()",
"return",
"set_system_date_time",
"(",
"years",
"=",
"dt_obj",
".",
"year",
",",
"months",
"=",
"dt_obj",
".",
"month",
",",
"days",
"=",
"dt_obj",
".",
"day",
")"
] | 26.285714 | 21.314286 |
def _suitableVerbExpansion( foundSubcatChain ):
'''
V6tab etteantud jadast osa, mis sobib:
*) kui liikmeid on 3, keskmine on konjuktsioon ning esimene ja viimane
klapivad, tagastab selle kolmiku;
Nt. ei_0 saa_0 lihtsalt välja astuda_? ja_? uttu tõmmata_?
=> astuda ja tõmmata
*) kui liikmeid on rohkem kui 3, teine on konjuktsioon ning esimene ja
kolmas klapivad, ning l6pus pole verbe, tagastab esikolmiku;
*) kui liikmeid on rohkem kui yks, v6tab liikmeks esimese mitte-
konjunktsiooni (kui selline leidub);
Kui need tingimused pole t2idetud, tagastab tyhis6ne;
'''
markings = []
tokens = []
nonConjTokens = []
for (marking, token) in foundSubcatChain:
markings.append( marking )
tokens.append( token )
if marking != '&':
nonConjTokens.append( token )
if (len(markings) == 3 and markings[0]==markings[2] and markings[0]!='&' and markings[1]=='&'):
return tokens
elif (len(markings) > 3 and markings[0]==markings[2] and markings[0]!='&' and markings[1]=='&' and \
all([m == '&' for m in markings[3:]]) ):
return tokens[:3]
elif (len(nonConjTokens) > 0):
return nonConjTokens[:1]
return [] | [
"def",
"_suitableVerbExpansion",
"(",
"foundSubcatChain",
")",
":",
"markings",
"=",
"[",
"]",
"tokens",
"=",
"[",
"]",
"nonConjTokens",
"=",
"[",
"]",
"for",
"(",
"marking",
",",
"token",
")",
"in",
"foundSubcatChain",
":",
"markings",
".",
"append",
"(",
"marking",
")",
"tokens",
".",
"append",
"(",
"token",
")",
"if",
"marking",
"!=",
"'&'",
":",
"nonConjTokens",
".",
"append",
"(",
"token",
")",
"if",
"(",
"len",
"(",
"markings",
")",
"==",
"3",
"and",
"markings",
"[",
"0",
"]",
"==",
"markings",
"[",
"2",
"]",
"and",
"markings",
"[",
"0",
"]",
"!=",
"'&'",
"and",
"markings",
"[",
"1",
"]",
"==",
"'&'",
")",
":",
"return",
"tokens",
"elif",
"(",
"len",
"(",
"markings",
")",
">",
"3",
"and",
"markings",
"[",
"0",
"]",
"==",
"markings",
"[",
"2",
"]",
"and",
"markings",
"[",
"0",
"]",
"!=",
"'&'",
"and",
"markings",
"[",
"1",
"]",
"==",
"'&'",
"and",
"all",
"(",
"[",
"m",
"==",
"'&'",
"for",
"m",
"in",
"markings",
"[",
"3",
":",
"]",
"]",
")",
")",
":",
"return",
"tokens",
"[",
":",
"3",
"]",
"elif",
"(",
"len",
"(",
"nonConjTokens",
")",
">",
"0",
")",
":",
"return",
"nonConjTokens",
"[",
":",
"1",
"]",
"return",
"[",
"]"
] | 46.034483 | 20.724138 |
def zoom_for_pixelsize(pixel_size, max_z=24, tilesize=256):
"""
Get mercator zoom level corresponding to a pixel resolution.
Freely adapted from
https://github.com/OSGeo/gdal/blob/b0dfc591929ebdbccd8a0557510c5efdb893b852/gdal/swig/python/scripts/gdal2tiles.py#L294
Parameters
----------
pixel_size: float
Pixel size
max_z: int, optional (default: 24)
Max mercator zoom level allowed
tilesize: int, optional
Mercator tile size (default: 256).
Returns
-------
Mercator zoom level corresponding to the pixel resolution
"""
for z in range(max_z):
if pixel_size > _meters_per_pixel(z, 0, tilesize=tilesize):
return max(0, z - 1) # We don't want to scale up
return max_z - 1 | [
"def",
"zoom_for_pixelsize",
"(",
"pixel_size",
",",
"max_z",
"=",
"24",
",",
"tilesize",
"=",
"256",
")",
":",
"for",
"z",
"in",
"range",
"(",
"max_z",
")",
":",
"if",
"pixel_size",
">",
"_meters_per_pixel",
"(",
"z",
",",
"0",
",",
"tilesize",
"=",
"tilesize",
")",
":",
"return",
"max",
"(",
"0",
",",
"z",
"-",
"1",
")",
"# We don't want to scale up",
"return",
"max_z",
"-",
"1"
] | 28.961538 | 23.653846 |
def get_currency_symbol(self):
"""Get the currency Symbol
"""
locale = locales.getLocale('en')
setup = api.get_setup()
currency = setup.getCurrency()
return locale.numbers.currencies[currency].symbol | [
"def",
"get_currency_symbol",
"(",
"self",
")",
":",
"locale",
"=",
"locales",
".",
"getLocale",
"(",
"'en'",
")",
"setup",
"=",
"api",
".",
"get_setup",
"(",
")",
"currency",
"=",
"setup",
".",
"getCurrency",
"(",
")",
"return",
"locale",
".",
"numbers",
".",
"currencies",
"[",
"currency",
"]",
".",
"symbol"
] | 34.428571 | 5.428571 |
def draw_grid(self):
"""Draws the grid and tiles."""
self.screen.fill((0xbb, 0xad, 0xa0), self.origin + (self.game_width, self.game_height))
for y, row in enumerate(self.grid):
for x, cell in enumerate(row):
self.screen.blit(self.tiles[cell], self.get_tile_location(x, y)) | [
"def",
"draw_grid",
"(",
"self",
")",
":",
"self",
".",
"screen",
".",
"fill",
"(",
"(",
"0xbb",
",",
"0xad",
",",
"0xa0",
")",
",",
"self",
".",
"origin",
"+",
"(",
"self",
".",
"game_width",
",",
"self",
".",
"game_height",
")",
")",
"for",
"y",
",",
"row",
"in",
"enumerate",
"(",
"self",
".",
"grid",
")",
":",
"for",
"x",
",",
"cell",
"in",
"enumerate",
"(",
"row",
")",
":",
"self",
".",
"screen",
".",
"blit",
"(",
"self",
".",
"tiles",
"[",
"cell",
"]",
",",
"self",
".",
"get_tile_location",
"(",
"x",
",",
"y",
")",
")"
] | 53.166667 | 20 |
def setup_installation():
"""Install necessary GUI resources
By default, RAFCON should be installed via `setup.py` (`pip install rafcon`). Thereby, all resources are being
installed. However, if this is not the case, one can set the `RAFCON_CHECK_INSTALLATION` env variable to `True`.
Then, the installation will be performed before starting the GUI.
"""
if os.environ.get("RAFCON_CHECK_INSTALLATION", False) == "True":
rafcon_root = os.path.dirname(rafcon.__file__)
installation.assets_folder = os.path.join(rafcon_root, 'gui', 'assets')
installation.share_folder = os.path.join(os.path.dirname(os.path.dirname(rafcon_root)), 'share')
installation.install_fonts(logger, restart=True)
installation.install_gtk_source_view_styles(logger)
installation.install_libraries(logger, overwrite=False) | [
"def",
"setup_installation",
"(",
")",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"\"RAFCON_CHECK_INSTALLATION\"",
",",
"False",
")",
"==",
"\"True\"",
":",
"rafcon_root",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"rafcon",
".",
"__file__",
")",
"installation",
".",
"assets_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"rafcon_root",
",",
"'gui'",
",",
"'assets'",
")",
"installation",
".",
"share_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"rafcon_root",
")",
")",
",",
"'share'",
")",
"installation",
".",
"install_fonts",
"(",
"logger",
",",
"restart",
"=",
"True",
")",
"installation",
".",
"install_gtk_source_view_styles",
"(",
"logger",
")",
"installation",
".",
"install_libraries",
"(",
"logger",
",",
"overwrite",
"=",
"False",
")"
] | 61.357143 | 31.142857 |
def _path_to_module(path):
"""Translates paths to *.py? files into module paths.
>>> _path_to_module("rapport/bar.py")
'rapport.bar'
>>> _path_to_module("/usr/lib/rapport/bar.py")
'rapport.bar'
"""
# Split of preceeding path elements:
path = "rapport" + path.split("rapport")[1]
# Split of ending and replace os.sep with dots:
path = path.replace(os.sep, ".").rsplit(".", 1)[0]
return path | [
"def",
"_path_to_module",
"(",
"path",
")",
":",
"# Split of preceeding path elements:",
"path",
"=",
"\"rapport\"",
"+",
"path",
".",
"split",
"(",
"\"rapport\"",
")",
"[",
"1",
"]",
"# Split of ending and replace os.sep with dots:",
"path",
"=",
"path",
".",
"replace",
"(",
"os",
".",
"sep",
",",
"\".\"",
")",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"return",
"path"
] | 33.692308 | 12.923077 |
def from_offset(tu, file, offset):
"""Retrieve a SourceLocation from a given character offset.
tu -- TranslationUnit file belongs to
file -- File instance to obtain offset from
offset -- Integer character offset within file
"""
return conf.lib.clang_getLocationForOffset(tu, file, offset) | [
"def",
"from_offset",
"(",
"tu",
",",
"file",
",",
"offset",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_getLocationForOffset",
"(",
"tu",
",",
"file",
",",
"offset",
")"
] | 41.25 | 13 |
def mime(self):
"""Produce the final MIME message."""
author = self.author
sender = self.sender
if not author:
raise ValueError("You must specify an author.")
if not self.subject:
raise ValueError("You must specify a subject.")
if len(self.recipients) == 0:
raise ValueError("You must specify at least one recipient.")
if not self.plain:
raise ValueError("You must provide plain text content.")
# DISCUSS: Take the first author, or raise this error?
# if len(author) > 1 and len(sender) == 0:
# raise ValueError('If there are multiple authors of message, you must specify a sender!')
# if len(sender) > 1:
# raise ValueError('You must not specify more than one sender!')
if not self._dirty and self._processed:
return self._mime
self._processed = False
plain = MIMEText(self._callable(self.plain), 'plain', self.encoding)
rich = None
if self.rich:
rich = MIMEText(self._callable(self.rich), 'html', self.encoding)
message = self._mime_document(plain, rich)
headers = self._build_header_list(author, sender)
self._add_headers_to_message(message, headers)
self._mime = message
self._processed = True
self._dirty = False
return message | [
"def",
"mime",
"(",
"self",
")",
":",
"author",
"=",
"self",
".",
"author",
"sender",
"=",
"self",
".",
"sender",
"if",
"not",
"author",
":",
"raise",
"ValueError",
"(",
"\"You must specify an author.\"",
")",
"if",
"not",
"self",
".",
"subject",
":",
"raise",
"ValueError",
"(",
"\"You must specify a subject.\"",
")",
"if",
"len",
"(",
"self",
".",
"recipients",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"You must specify at least one recipient.\"",
")",
"if",
"not",
"self",
".",
"plain",
":",
"raise",
"ValueError",
"(",
"\"You must provide plain text content.\"",
")",
"# DISCUSS: Take the first author, or raise this error?",
"# if len(author) > 1 and len(sender) == 0:",
"#\t raise ValueError('If there are multiple authors of message, you must specify a sender!')",
"# if len(sender) > 1:",
"#\t raise ValueError('You must not specify more than one sender!')",
"if",
"not",
"self",
".",
"_dirty",
"and",
"self",
".",
"_processed",
":",
"return",
"self",
".",
"_mime",
"self",
".",
"_processed",
"=",
"False",
"plain",
"=",
"MIMEText",
"(",
"self",
".",
"_callable",
"(",
"self",
".",
"plain",
")",
",",
"'plain'",
",",
"self",
".",
"encoding",
")",
"rich",
"=",
"None",
"if",
"self",
".",
"rich",
":",
"rich",
"=",
"MIMEText",
"(",
"self",
".",
"_callable",
"(",
"self",
".",
"rich",
")",
",",
"'html'",
",",
"self",
".",
"encoding",
")",
"message",
"=",
"self",
".",
"_mime_document",
"(",
"plain",
",",
"rich",
")",
"headers",
"=",
"self",
".",
"_build_header_list",
"(",
"author",
",",
"sender",
")",
"self",
".",
"_add_headers_to_message",
"(",
"message",
",",
"headers",
")",
"self",
".",
"_mime",
"=",
"message",
"self",
".",
"_processed",
"=",
"True",
"self",
".",
"_dirty",
"=",
"False",
"return",
"message"
] | 26.636364 | 24.431818 |
def get_text(self):
"""Get the loaded, decompressed, and decoded text of this content."""
self._load_raw_content()
if self._text is None:
assert self._raw_content is not None
ret_cont = self._raw_content
if self.compressed:
ret_cont = zlib.decompress(ret_cont, zlib.MAX_WBITS+16)
if self.encoded:
ret_cont = ret_cont.decode('utf-8')
self._text = ret_cont
assert self._text is not None
return self._text | [
"def",
"get_text",
"(",
"self",
")",
":",
"self",
".",
"_load_raw_content",
"(",
")",
"if",
"self",
".",
"_text",
"is",
"None",
":",
"assert",
"self",
".",
"_raw_content",
"is",
"not",
"None",
"ret_cont",
"=",
"self",
".",
"_raw_content",
"if",
"self",
".",
"compressed",
":",
"ret_cont",
"=",
"zlib",
".",
"decompress",
"(",
"ret_cont",
",",
"zlib",
".",
"MAX_WBITS",
"+",
"16",
")",
"if",
"self",
".",
"encoded",
":",
"ret_cont",
"=",
"ret_cont",
".",
"decode",
"(",
"'utf-8'",
")",
"self",
".",
"_text",
"=",
"ret_cont",
"assert",
"self",
".",
"_text",
"is",
"not",
"None",
"return",
"self",
".",
"_text"
] | 40.153846 | 10.384615 |
def handle_version(self, message_header, message):
"""
This method will handle the Version message and
will send a VerAck message when it receives the
Version message.
:param message_header: The Version message header
:param message: The Version message
"""
log.debug("handle version")
verack = VerAck()
log.debug("send VerAck")
self.send_message(verack)
self.verack = True
# begin!
self.send_getheaders( self.first_block_hash ) | [
"def",
"handle_version",
"(",
"self",
",",
"message_header",
",",
"message",
")",
":",
"log",
".",
"debug",
"(",
"\"handle version\"",
")",
"verack",
"=",
"VerAck",
"(",
")",
"log",
".",
"debug",
"(",
"\"send VerAck\"",
")",
"self",
".",
"send_message",
"(",
"verack",
")",
"self",
".",
"verack",
"=",
"True",
"# begin!",
"self",
".",
"send_getheaders",
"(",
"self",
".",
"first_block_hash",
")"
] | 30.941176 | 14.235294 |
def get_mesh_name(mesh_id, offline=False):
"""Get the MESH label for the given MESH ID.
Uses the mappings table in `indra/resources`; if the MESH ID is not listed
there, falls back on the NLM REST API.
Parameters
----------
mesh_id : str
MESH Identifier, e.g. 'D003094'.
offline : bool
Whether to allow queries to the NLM REST API if the given MESH ID is not
contained in INDRA's internal MESH mappings file. Default is False
(allows REST API queries).
Returns
-------
str
Label for the MESH ID, or None if the query failed or no label was
found.
"""
indra_mesh_mapping = mesh_id_to_name.get(mesh_id)
if offline or indra_mesh_mapping is not None:
return indra_mesh_mapping
# Look up the MESH mapping from NLM if we don't have it locally
return get_mesh_name_from_web(mesh_id) | [
"def",
"get_mesh_name",
"(",
"mesh_id",
",",
"offline",
"=",
"False",
")",
":",
"indra_mesh_mapping",
"=",
"mesh_id_to_name",
".",
"get",
"(",
"mesh_id",
")",
"if",
"offline",
"or",
"indra_mesh_mapping",
"is",
"not",
"None",
":",
"return",
"indra_mesh_mapping",
"# Look up the MESH mapping from NLM if we don't have it locally",
"return",
"get_mesh_name_from_web",
"(",
"mesh_id",
")"
] | 33.423077 | 21.076923 |
def update_file(dk_api, kitchen, recipe_name, message, files_to_update_param):
"""
reutrns a string.
:param dk_api: -- api object
:param kitchen: string
:param recipe_name: string -- kitchen name, string
:param message: string message -- commit message, string
:param files_to_update_param: string -- file system directory where the recipe file lives
:rtype: string
"""
rc = DKReturnCode()
if kitchen is None or recipe_name is None or message is None or files_to_update_param is None:
s = 'ERROR: DKCloudCommandRunner bad input parameters'
rc.set(rc.DK_FAIL, s)
return rc
# Take a simple string or an array
if isinstance(files_to_update_param, basestring):
files_to_update = [files_to_update_param]
else:
files_to_update = files_to_update_param
msg = ''
for file_to_update in files_to_update:
try:
with open(file_to_update, 'r') as f:
file_contents = f.read()
except IOError as e:
if len(msg) != 0:
msg += '\n'
msg += '%s' % (str(e))
rc.set(rc.DK_FAIL, msg)
return rc
except ValueError as e:
if len(msg) != 0:
msg += '\n'
msg += 'ERROR: %s' % e.message
rc.set(rc.DK_FAIL, msg)
return rc
rc = dk_api.update_file(kitchen, recipe_name, message, file_to_update, file_contents)
if not rc.ok():
if len(msg) != 0:
msg += '\n'
msg += 'DKCloudCommand.update_file for %s failed\n\tmessage: %s' % (file_to_update, rc.get_message())
rc.set_message(msg)
return rc
else:
if len(msg) != 0:
msg += '\n'
msg += 'DKCloudCommand.update_file for %s succeeded' % file_to_update
rc.set_message(msg)
return rc | [
"def",
"update_file",
"(",
"dk_api",
",",
"kitchen",
",",
"recipe_name",
",",
"message",
",",
"files_to_update_param",
")",
":",
"rc",
"=",
"DKReturnCode",
"(",
")",
"if",
"kitchen",
"is",
"None",
"or",
"recipe_name",
"is",
"None",
"or",
"message",
"is",
"None",
"or",
"files_to_update_param",
"is",
"None",
":",
"s",
"=",
"'ERROR: DKCloudCommandRunner bad input parameters'",
"rc",
".",
"set",
"(",
"rc",
".",
"DK_FAIL",
",",
"s",
")",
"return",
"rc",
"# Take a simple string or an array",
"if",
"isinstance",
"(",
"files_to_update_param",
",",
"basestring",
")",
":",
"files_to_update",
"=",
"[",
"files_to_update_param",
"]",
"else",
":",
"files_to_update",
"=",
"files_to_update_param",
"msg",
"=",
"''",
"for",
"file_to_update",
"in",
"files_to_update",
":",
"try",
":",
"with",
"open",
"(",
"file_to_update",
",",
"'r'",
")",
"as",
"f",
":",
"file_contents",
"=",
"f",
".",
"read",
"(",
")",
"except",
"IOError",
"as",
"e",
":",
"if",
"len",
"(",
"msg",
")",
"!=",
"0",
":",
"msg",
"+=",
"'\\n'",
"msg",
"+=",
"'%s'",
"%",
"(",
"str",
"(",
"e",
")",
")",
"rc",
".",
"set",
"(",
"rc",
".",
"DK_FAIL",
",",
"msg",
")",
"return",
"rc",
"except",
"ValueError",
"as",
"e",
":",
"if",
"len",
"(",
"msg",
")",
"!=",
"0",
":",
"msg",
"+=",
"'\\n'",
"msg",
"+=",
"'ERROR: %s'",
"%",
"e",
".",
"message",
"rc",
".",
"set",
"(",
"rc",
".",
"DK_FAIL",
",",
"msg",
")",
"return",
"rc",
"rc",
"=",
"dk_api",
".",
"update_file",
"(",
"kitchen",
",",
"recipe_name",
",",
"message",
",",
"file_to_update",
",",
"file_contents",
")",
"if",
"not",
"rc",
".",
"ok",
"(",
")",
":",
"if",
"len",
"(",
"msg",
")",
"!=",
"0",
":",
"msg",
"+=",
"'\\n'",
"msg",
"+=",
"'DKCloudCommand.update_file for %s failed\\n\\tmessage: %s'",
"%",
"(",
"file_to_update",
",",
"rc",
".",
"get_message",
"(",
")",
")",
"rc",
".",
"set_message",
"(",
"msg",
")",
"return",
"rc",
"else",
":",
"if",
"len",
"(",
"msg",
")",
"!=",
"0",
":",
"msg",
"+=",
"'\\n'",
"msg",
"+=",
"'DKCloudCommand.update_file for %s succeeded'",
"%",
"file_to_update",
"rc",
".",
"set_message",
"(",
"msg",
")",
"return",
"rc"
] | 38.773585 | 18.132075 |
def download(url, dest):
"""Download the image to disk."""
path = os.path.join(dest, url.split('/')[-1])
r = requests.get(url, stream=True)
r.raise_for_status()
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return path | [
"def",
"download",
"(",
"url",
",",
"dest",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dest",
",",
"url",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
")",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"r",
".",
"raise_for_status",
"(",
")",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"f",
":",
"for",
"chunk",
"in",
"r",
".",
"iter_content",
"(",
"chunk_size",
"=",
"1024",
")",
":",
"if",
"chunk",
":",
"f",
".",
"write",
"(",
"chunk",
")",
"return",
"path"
] | 32.2 | 11.9 |
def get_hull_energy(self, comp):
"""
Args:
comp (Composition): Input composition
Returns:
Energy of lowest energy equilibrium at desired composition. Not
normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)
"""
e = 0
for k, v in self.get_decomposition(comp).items():
e += k.energy_per_atom * v
return e * comp.num_atoms | [
"def",
"get_hull_energy",
"(",
"self",
",",
"comp",
")",
":",
"e",
"=",
"0",
"for",
"k",
",",
"v",
"in",
"self",
".",
"get_decomposition",
"(",
"comp",
")",
".",
"items",
"(",
")",
":",
"e",
"+=",
"k",
".",
"energy_per_atom",
"*",
"v",
"return",
"e",
"*",
"comp",
".",
"num_atoms"
] | 31.384615 | 16.615385 |
def _read_num( # noqa: C901 # pylint: disable=too-many-statements
ctx: ReaderContext
) -> MaybeNumber:
"""Return a numeric (complex, Decimal, float, int, Fraction) from the input stream."""
chars: List[str] = []
reader = ctx.reader
is_complex = False
is_decimal = False
is_float = False
is_integer = False
is_ratio = False
while True:
token = reader.peek()
if token == "-":
following_token = reader.next_token()
if not begin_num_chars.match(following_token):
reader.pushback()
try:
for _ in chars:
reader.pushback()
except IndexError:
raise SyntaxError(
"Requested to pushback too many characters onto StreamReader"
)
return _read_sym(ctx)
chars.append(token)
continue
elif token == ".":
if is_float:
raise SyntaxError("Found extra '.' in float; expected decimal portion")
is_float = True
elif token == "J":
if is_complex:
raise SyntaxError("Found extra 'J' suffix in complex literal")
is_complex = True
elif token == "M":
if is_decimal:
raise SyntaxError("Found extra 'M' suffix in decimal literal")
is_decimal = True
elif token == "N":
if is_integer:
raise SyntaxError("Found extra 'N' suffix in integer literal")
is_integer = True
elif token == "/":
if is_ratio:
raise SyntaxError("Found extra '/' in ratio literal")
is_ratio = True
elif not num_chars.match(token):
break
reader.next_token()
chars.append(token)
assert len(chars) > 0, "Must have at least one digit in integer or float"
s = "".join(chars)
if (
sum(
[
is_complex and is_decimal,
is_complex and is_integer,
is_complex and is_ratio,
is_decimal or is_float,
is_integer,
is_ratio,
]
)
> 1
):
raise SyntaxError(f"Invalid number format: {s}")
if is_complex:
imaginary = float(s[:-1]) if is_float else int(s[:-1])
return complex(0, imaginary)
elif is_decimal:
try:
return decimal.Decimal(s[:-1])
except decimal.InvalidOperation:
raise SyntaxError(f"Invalid number format: {s}") from None
elif is_float:
return float(s)
elif is_ratio:
assert "/" in s, "Ratio must contain one '/' character"
num, denominator = s.split("/")
return Fraction(numerator=int(num), denominator=int(denominator))
elif is_integer:
return int(s[:-1])
return int(s) | [
"def",
"_read_num",
"(",
"# noqa: C901 # pylint: disable=too-many-statements",
"ctx",
":",
"ReaderContext",
")",
"->",
"MaybeNumber",
":",
"chars",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"]",
"reader",
"=",
"ctx",
".",
"reader",
"is_complex",
"=",
"False",
"is_decimal",
"=",
"False",
"is_float",
"=",
"False",
"is_integer",
"=",
"False",
"is_ratio",
"=",
"False",
"while",
"True",
":",
"token",
"=",
"reader",
".",
"peek",
"(",
")",
"if",
"token",
"==",
"\"-\"",
":",
"following_token",
"=",
"reader",
".",
"next_token",
"(",
")",
"if",
"not",
"begin_num_chars",
".",
"match",
"(",
"following_token",
")",
":",
"reader",
".",
"pushback",
"(",
")",
"try",
":",
"for",
"_",
"in",
"chars",
":",
"reader",
".",
"pushback",
"(",
")",
"except",
"IndexError",
":",
"raise",
"SyntaxError",
"(",
"\"Requested to pushback too many characters onto StreamReader\"",
")",
"return",
"_read_sym",
"(",
"ctx",
")",
"chars",
".",
"append",
"(",
"token",
")",
"continue",
"elif",
"token",
"==",
"\".\"",
":",
"if",
"is_float",
":",
"raise",
"SyntaxError",
"(",
"\"Found extra '.' in float; expected decimal portion\"",
")",
"is_float",
"=",
"True",
"elif",
"token",
"==",
"\"J\"",
":",
"if",
"is_complex",
":",
"raise",
"SyntaxError",
"(",
"\"Found extra 'J' suffix in complex literal\"",
")",
"is_complex",
"=",
"True",
"elif",
"token",
"==",
"\"M\"",
":",
"if",
"is_decimal",
":",
"raise",
"SyntaxError",
"(",
"\"Found extra 'M' suffix in decimal literal\"",
")",
"is_decimal",
"=",
"True",
"elif",
"token",
"==",
"\"N\"",
":",
"if",
"is_integer",
":",
"raise",
"SyntaxError",
"(",
"\"Found extra 'N' suffix in integer literal\"",
")",
"is_integer",
"=",
"True",
"elif",
"token",
"==",
"\"/\"",
":",
"if",
"is_ratio",
":",
"raise",
"SyntaxError",
"(",
"\"Found extra '/' in ratio literal\"",
")",
"is_ratio",
"=",
"True",
"elif",
"not",
"num_chars",
".",
"match",
"(",
"token",
")",
":",
"break",
"reader",
".",
"next_token",
"(",
")",
"chars",
".",
"append",
"(",
"token",
")",
"assert",
"len",
"(",
"chars",
")",
">",
"0",
",",
"\"Must have at least one digit in integer or float\"",
"s",
"=",
"\"\"",
".",
"join",
"(",
"chars",
")",
"if",
"(",
"sum",
"(",
"[",
"is_complex",
"and",
"is_decimal",
",",
"is_complex",
"and",
"is_integer",
",",
"is_complex",
"and",
"is_ratio",
",",
"is_decimal",
"or",
"is_float",
",",
"is_integer",
",",
"is_ratio",
",",
"]",
")",
">",
"1",
")",
":",
"raise",
"SyntaxError",
"(",
"f\"Invalid number format: {s}\"",
")",
"if",
"is_complex",
":",
"imaginary",
"=",
"float",
"(",
"s",
"[",
":",
"-",
"1",
"]",
")",
"if",
"is_float",
"else",
"int",
"(",
"s",
"[",
":",
"-",
"1",
"]",
")",
"return",
"complex",
"(",
"0",
",",
"imaginary",
")",
"elif",
"is_decimal",
":",
"try",
":",
"return",
"decimal",
".",
"Decimal",
"(",
"s",
"[",
":",
"-",
"1",
"]",
")",
"except",
"decimal",
".",
"InvalidOperation",
":",
"raise",
"SyntaxError",
"(",
"f\"Invalid number format: {s}\"",
")",
"from",
"None",
"elif",
"is_float",
":",
"return",
"float",
"(",
"s",
")",
"elif",
"is_ratio",
":",
"assert",
"\"/\"",
"in",
"s",
",",
"\"Ratio must contain one '/' character\"",
"num",
",",
"denominator",
"=",
"s",
".",
"split",
"(",
"\"/\"",
")",
"return",
"Fraction",
"(",
"numerator",
"=",
"int",
"(",
"num",
")",
",",
"denominator",
"=",
"int",
"(",
"denominator",
")",
")",
"elif",
"is_integer",
":",
"return",
"int",
"(",
"s",
"[",
":",
"-",
"1",
"]",
")",
"return",
"int",
"(",
"s",
")"
] | 32.465909 | 18.488636 |
def _get_resource_raw(
self, cls, id, extra=None, headers=None, stream=False, **filters
):
"""Get an individual REST resource"""
headers = headers or {}
headers.update(self.session.headers)
postfix = "/{}".format(extra) if extra else ""
if cls.api_root != "a":
url = "{}/{}/{}{}".format(self.api_server, cls.collection_name, id, postfix)
else:
url = "{}/a/{}/{}/{}{}".format(
self.api_server, self.app_id, cls.collection_name, id, postfix
)
converted_filters = convert_datetimes_to_timestamps(
filters, cls.datetime_filter_attrs
)
url = str(URLObject(url).add_query_params(converted_filters.items()))
response = self._get_http_session(cls.api_root).get(
url, headers=headers, stream=stream
)
return _validate(response) | [
"def",
"_get_resource_raw",
"(",
"self",
",",
"cls",
",",
"id",
",",
"extra",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"stream",
"=",
"False",
",",
"*",
"*",
"filters",
")",
":",
"headers",
"=",
"headers",
"or",
"{",
"}",
"headers",
".",
"update",
"(",
"self",
".",
"session",
".",
"headers",
")",
"postfix",
"=",
"\"/{}\"",
".",
"format",
"(",
"extra",
")",
"if",
"extra",
"else",
"\"\"",
"if",
"cls",
".",
"api_root",
"!=",
"\"a\"",
":",
"url",
"=",
"\"{}/{}/{}{}\"",
".",
"format",
"(",
"self",
".",
"api_server",
",",
"cls",
".",
"collection_name",
",",
"id",
",",
"postfix",
")",
"else",
":",
"url",
"=",
"\"{}/a/{}/{}/{}{}\"",
".",
"format",
"(",
"self",
".",
"api_server",
",",
"self",
".",
"app_id",
",",
"cls",
".",
"collection_name",
",",
"id",
",",
"postfix",
")",
"converted_filters",
"=",
"convert_datetimes_to_timestamps",
"(",
"filters",
",",
"cls",
".",
"datetime_filter_attrs",
")",
"url",
"=",
"str",
"(",
"URLObject",
"(",
"url",
")",
".",
"add_query_params",
"(",
"converted_filters",
".",
"items",
"(",
")",
")",
")",
"response",
"=",
"self",
".",
"_get_http_session",
"(",
"cls",
".",
"api_root",
")",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"stream",
"=",
"stream",
")",
"return",
"_validate",
"(",
"response",
")"
] | 38.347826 | 21.782609 |
def atmost(cls, lits, bound=1, top_id=None, encoding=EncType.seqcounter):
"""
This method can be used for creating a CNF encoding of an AtMostK
constraint, i.e. of :math:`\sum_{i=1}^{n}{x_i}\leq k`. The method
shares the arguments and the return type with method
:meth:`CardEnc.atleast`. Please, see it for details.
"""
if encoding < 0 or encoding > 9:
raise(NoSuchEncodingError(encoding))
if not top_id:
top_id = max(map(lambda x: abs(x), lits))
# we are going to return this formula
ret = CNFPlus()
# MiniCard's native representation is handled separately
if encoding == 9:
ret.atmosts, ret.nv = [(lits, bound)], top_id
return ret
# saving default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL)
res = pycard.encode_atmost(lits, bound, top_id, encoding)
# recovering default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler)
if res:
ret.clauses, ret.nv = res
return ret | [
"def",
"atmost",
"(",
"cls",
",",
"lits",
",",
"bound",
"=",
"1",
",",
"top_id",
"=",
"None",
",",
"encoding",
"=",
"EncType",
".",
"seqcounter",
")",
":",
"if",
"encoding",
"<",
"0",
"or",
"encoding",
">",
"9",
":",
"raise",
"(",
"NoSuchEncodingError",
"(",
"encoding",
")",
")",
"if",
"not",
"top_id",
":",
"top_id",
"=",
"max",
"(",
"map",
"(",
"lambda",
"x",
":",
"abs",
"(",
"x",
")",
",",
"lits",
")",
")",
"# we are going to return this formula",
"ret",
"=",
"CNFPlus",
"(",
")",
"# MiniCard's native representation is handled separately",
"if",
"encoding",
"==",
"9",
":",
"ret",
".",
"atmosts",
",",
"ret",
".",
"nv",
"=",
"[",
"(",
"lits",
",",
"bound",
")",
"]",
",",
"top_id",
"return",
"ret",
"# saving default SIGINT handler",
"def_sigint_handler",
"=",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"SIG_DFL",
")",
"res",
"=",
"pycard",
".",
"encode_atmost",
"(",
"lits",
",",
"bound",
",",
"top_id",
",",
"encoding",
")",
"# recovering default SIGINT handler",
"def_sigint_handler",
"=",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"def_sigint_handler",
")",
"if",
"res",
":",
"ret",
".",
"clauses",
",",
"ret",
".",
"nv",
"=",
"res",
"return",
"ret"
] | 33.617647 | 23.5 |
def lcsubstrings(seq1, seq2, positions=False):
"""Find the longest common substring(s) in the sequences `seq1` and `seq2`.
If positions evaluates to `True` only their positions will be returned,
together with their length, in a tuple:
(length, [(start pos in seq1, start pos in seq2)..])
Otherwise, the substrings themselves will be returned, in a set.
Example:
>>> lcsubstrings("sedentar", "dentist")
{'dent'}
>>> lcsubstrings("sedentar", "dentist", positions=True)
(4, [(2, 0)])
"""
L1, L2 = len(seq1), len(seq2)
ms = []
mlen = last = 0
if L1 < L2:
seq1, seq2 = seq2, seq1
L1, L2 = L2, L1
column = array('L', range(L2))
for i in range(L1):
for j in range(L2):
old = column[j]
if seq1[i] == seq2[j]:
if i == 0 or j == 0:
column[j] = 1
else:
column[j] = last + 1
if column[j] > mlen:
mlen = column[j]
ms = [(i, j)]
elif column[j] == mlen:
ms.append((i, j))
else:
column[j] = 0
last = old
if positions:
return (mlen, tuple((i - mlen + 1, j - mlen + 1) for i, j in ms if ms))
return set(seq1[i - mlen + 1:i + 1] for i, _ in ms if ms) | [
"def",
"lcsubstrings",
"(",
"seq1",
",",
"seq2",
",",
"positions",
"=",
"False",
")",
":",
"L1",
",",
"L2",
"=",
"len",
"(",
"seq1",
")",
",",
"len",
"(",
"seq2",
")",
"ms",
"=",
"[",
"]",
"mlen",
"=",
"last",
"=",
"0",
"if",
"L1",
"<",
"L2",
":",
"seq1",
",",
"seq2",
"=",
"seq2",
",",
"seq1",
"L1",
",",
"L2",
"=",
"L2",
",",
"L1",
"column",
"=",
"array",
"(",
"'L'",
",",
"range",
"(",
"L2",
")",
")",
"for",
"i",
"in",
"range",
"(",
"L1",
")",
":",
"for",
"j",
"in",
"range",
"(",
"L2",
")",
":",
"old",
"=",
"column",
"[",
"j",
"]",
"if",
"seq1",
"[",
"i",
"]",
"==",
"seq2",
"[",
"j",
"]",
":",
"if",
"i",
"==",
"0",
"or",
"j",
"==",
"0",
":",
"column",
"[",
"j",
"]",
"=",
"1",
"else",
":",
"column",
"[",
"j",
"]",
"=",
"last",
"+",
"1",
"if",
"column",
"[",
"j",
"]",
">",
"mlen",
":",
"mlen",
"=",
"column",
"[",
"j",
"]",
"ms",
"=",
"[",
"(",
"i",
",",
"j",
")",
"]",
"elif",
"column",
"[",
"j",
"]",
"==",
"mlen",
":",
"ms",
".",
"append",
"(",
"(",
"i",
",",
"j",
")",
")",
"else",
":",
"column",
"[",
"j",
"]",
"=",
"0",
"last",
"=",
"old",
"if",
"positions",
":",
"return",
"(",
"mlen",
",",
"tuple",
"(",
"(",
"i",
"-",
"mlen",
"+",
"1",
",",
"j",
"-",
"mlen",
"+",
"1",
")",
"for",
"i",
",",
"j",
"in",
"ms",
"if",
"ms",
")",
")",
"return",
"set",
"(",
"seq1",
"[",
"i",
"-",
"mlen",
"+",
"1",
":",
"i",
"+",
"1",
"]",
"for",
"i",
",",
"_",
"in",
"ms",
"if",
"ms",
")"
] | 23.717391 | 22.630435 |
def quit(self):
"""Remove this user from all channels and reinitialize the user's list
of joined channels.
"""
for c in self.channels:
c.users.remove(self.nick)
self.channels = [] | [
"def",
"quit",
"(",
"self",
")",
":",
"for",
"c",
"in",
"self",
".",
"channels",
":",
"c",
".",
"users",
".",
"remove",
"(",
"self",
".",
"nick",
")",
"self",
".",
"channels",
"=",
"[",
"]"
] | 29.125 | 12 |
def _sanitize_data(runnable_jobs_data):
"""We receive data from runnable jobs api and return the sanitized data that meets our needs.
This is a loop to remove duplicates (including buildsystem -> * transformations if needed)
By doing this, it allows us to have a single database query
It returns sanitized_list which will contain a subset which excludes:
* jobs that don't specify the platform
* jobs that don't specify the testtype
* if the job appears again, we replace build_system_type with '*'. By doing so, if a job appears
under both 'buildbot' and 'taskcluster', its build_system_type will be '*'
"""
job_build_system_type = {}
sanitized_list = []
for job in runnable_jobs_data:
if not valid_platform(job['platform']):
logger.info('Invalid platform %s', job['platform'])
continue
testtype = parse_testtype(
build_system_type=job['build_system_type'],
job_type_name=job['job_type_name'],
platform_option=job['platform_option'],
ref_data_name=job['ref_data_name']
)
if not testtype:
continue
# NOTE: This is *all* the data we need from the runnable API
new_job = {
'build_system_type': job['build_system_type'], # e.g. {buildbot,taskcluster,*}
'platform': job['platform'], # e.g. windows8-64
'platform_option': job['platform_option'], # e.g. {opt,debug}
'testtype': testtype, # e.g. web-platform-tests-1
}
key = _unique_key(new_job)
# Let's build a map of all the jobs and if duplicated change the build_system_type to *
if key not in job_build_system_type:
job_build_system_type[key] = job['build_system_type']
sanitized_list.append(new_job)
elif new_job['build_system_type'] != job_build_system_type[key]:
new_job['build_system_type'] = job_build_system_type[key]
# This will *replace* the previous build system type with '*'
# This guarantees that we don't have duplicates
sanitized_list[sanitized_list.index(new_job)]['build_system_type'] = '*'
return sanitized_list | [
"def",
"_sanitize_data",
"(",
"runnable_jobs_data",
")",
":",
"job_build_system_type",
"=",
"{",
"}",
"sanitized_list",
"=",
"[",
"]",
"for",
"job",
"in",
"runnable_jobs_data",
":",
"if",
"not",
"valid_platform",
"(",
"job",
"[",
"'platform'",
"]",
")",
":",
"logger",
".",
"info",
"(",
"'Invalid platform %s'",
",",
"job",
"[",
"'platform'",
"]",
")",
"continue",
"testtype",
"=",
"parse_testtype",
"(",
"build_system_type",
"=",
"job",
"[",
"'build_system_type'",
"]",
",",
"job_type_name",
"=",
"job",
"[",
"'job_type_name'",
"]",
",",
"platform_option",
"=",
"job",
"[",
"'platform_option'",
"]",
",",
"ref_data_name",
"=",
"job",
"[",
"'ref_data_name'",
"]",
")",
"if",
"not",
"testtype",
":",
"continue",
"# NOTE: This is *all* the data we need from the runnable API",
"new_job",
"=",
"{",
"'build_system_type'",
":",
"job",
"[",
"'build_system_type'",
"]",
",",
"# e.g. {buildbot,taskcluster,*}",
"'platform'",
":",
"job",
"[",
"'platform'",
"]",
",",
"# e.g. windows8-64",
"'platform_option'",
":",
"job",
"[",
"'platform_option'",
"]",
",",
"# e.g. {opt,debug}",
"'testtype'",
":",
"testtype",
",",
"# e.g. web-platform-tests-1",
"}",
"key",
"=",
"_unique_key",
"(",
"new_job",
")",
"# Let's build a map of all the jobs and if duplicated change the build_system_type to *",
"if",
"key",
"not",
"in",
"job_build_system_type",
":",
"job_build_system_type",
"[",
"key",
"]",
"=",
"job",
"[",
"'build_system_type'",
"]",
"sanitized_list",
".",
"append",
"(",
"new_job",
")",
"elif",
"new_job",
"[",
"'build_system_type'",
"]",
"!=",
"job_build_system_type",
"[",
"key",
"]",
":",
"new_job",
"[",
"'build_system_type'",
"]",
"=",
"job_build_system_type",
"[",
"key",
"]",
"# This will *replace* the previous build system type with '*'",
"# This guarantees that we don't have duplicates",
"sanitized_list",
"[",
"sanitized_list",
".",
"index",
"(",
"new_job",
")",
"]",
"[",
"'build_system_type'",
"]",
"=",
"'*'",
"return",
"sanitized_list"
] | 44.591837 | 23.693878 |
def _get_parser():
"""Parse input options."""
import argparse
parser = argparse.ArgumentParser(description=("Convert between mesh formats."))
parser.add_argument("infile", type=str, help="mesh file to be read from")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=input_filetypes,
help="input file format",
default=None,
)
parser.add_argument(
"--output-format",
"-o",
type=str,
choices=output_filetypes,
help="output file format",
default=None,
)
parser.add_argument("outfile", type=str, help="mesh file to be written to")
parser.add_argument(
"--prune",
"-p",
action="store_true",
help="remove lower order cells, remove orphaned nodes",
)
parser.add_argument(
"--prune-z-0",
"-z",
action="store_true",
help="remove third (z) dimension if all points are 0",
)
parser.add_argument(
"--version",
"-v",
action="version",
version="%(prog)s {}, Python {}".format(__version__, sys.version),
help="display version information",
)
return parser | [
"def",
"_get_parser",
"(",
")",
":",
"import",
"argparse",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"(",
"\"Convert between mesh formats.\"",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"infile\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"mesh file to be read from\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--input-format\"",
",",
"\"-i\"",
",",
"type",
"=",
"str",
",",
"choices",
"=",
"input_filetypes",
",",
"help",
"=",
"\"input file format\"",
",",
"default",
"=",
"None",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--output-format\"",
",",
"\"-o\"",
",",
"type",
"=",
"str",
",",
"choices",
"=",
"output_filetypes",
",",
"help",
"=",
"\"output file format\"",
",",
"default",
"=",
"None",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"outfile\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"mesh file to be written to\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--prune\"",
",",
"\"-p\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"remove lower order cells, remove orphaned nodes\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--prune-z-0\"",
",",
"\"-z\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"remove third (z) dimension if all points are 0\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--version\"",
",",
"\"-v\"",
",",
"action",
"=",
"\"version\"",
",",
"version",
"=",
"\"%(prog)s {}, Python {}\"",
".",
"format",
"(",
"__version__",
",",
"sys",
".",
"version",
")",
",",
"help",
"=",
"\"display version information\"",
",",
")",
"return",
"parser"
] | 23.039216 | 24.647059 |
def image(self):
"""
Returns a PngImageFile instance of the chart
You must have PIL installed for this to work
"""
try:
try:
import Image
except ImportError:
from PIL import Image
except ImportError:
raise ImportError('You must install PIL to fetch image objects')
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return Image.open(StringIO(self.urlopen().read())) | [
"def",
"image",
"(",
"self",
")",
":",
"try",
":",
"try",
":",
"import",
"Image",
"except",
"ImportError",
":",
"from",
"PIL",
"import",
"Image",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"'You must install PIL to fetch image objects'",
")",
"try",
":",
"from",
"cStringIO",
"import",
"StringIO",
"except",
"ImportError",
":",
"from",
"StringIO",
"import",
"StringIO",
"return",
"Image",
".",
"open",
"(",
"StringIO",
"(",
"self",
".",
"urlopen",
"(",
")",
".",
"read",
"(",
")",
")",
")"
] | 30.5 | 15.277778 |
def safe_makedirs(path):
"""A safe function for creating a directory tree."""
try:
os.makedirs(path)
except OSError as err:
if err.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise | [
"def",
"safe_makedirs",
"(",
"path",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"except",
"OSError",
"as",
"err",
":",
"if",
"err",
".",
"errno",
"==",
"errno",
".",
"EEXIST",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"raise",
"else",
":",
"raise"
] | 26.6 | 15 |
def model_average(X, penalization):
"""Run ModelAverage in default mode (QuicGraphicalLassoCV) to obtain proportion
matrix.
NOTE: This returns precision_ proportions, not cov, prec estimates, so we
return the raw proportions for "cov" and the threshold support
estimate for prec.
"""
n_trials = 100
print("ModelAverage with:")
print(" estimator: QuicGraphicalLasso (default)")
print(" n_trials: {}".format(n_trials))
print(" penalization: {}".format(penalization))
# if penalization is random, first find a decent scalar lam_ to build
# random perturbation matrix around. lam doesn't matter for fully-random.
lam = 0.5
if penalization == "random":
cv_model = QuicGraphicalLassoCV(
cv=2, n_refinements=6, n_jobs=1, init_method="cov", score_metric=metric
)
cv_model.fit(X)
lam = cv_model.lam_
print(" lam: {}".format(lam))
model = ModelAverage(
n_trials=n_trials, penalization=penalization, lam=lam, n_jobs=1
)
model.fit(X)
print(" lam_: {}".format(model.lam_))
return model.proportion_, model.support_, model.lam_ | [
"def",
"model_average",
"(",
"X",
",",
"penalization",
")",
":",
"n_trials",
"=",
"100",
"print",
"(",
"\"ModelAverage with:\"",
")",
"print",
"(",
"\" estimator: QuicGraphicalLasso (default)\"",
")",
"print",
"(",
"\" n_trials: {}\"",
".",
"format",
"(",
"n_trials",
")",
")",
"print",
"(",
"\" penalization: {}\"",
".",
"format",
"(",
"penalization",
")",
")",
"# if penalization is random, first find a decent scalar lam_ to build",
"# random perturbation matrix around. lam doesn't matter for fully-random.",
"lam",
"=",
"0.5",
"if",
"penalization",
"==",
"\"random\"",
":",
"cv_model",
"=",
"QuicGraphicalLassoCV",
"(",
"cv",
"=",
"2",
",",
"n_refinements",
"=",
"6",
",",
"n_jobs",
"=",
"1",
",",
"init_method",
"=",
"\"cov\"",
",",
"score_metric",
"=",
"metric",
")",
"cv_model",
".",
"fit",
"(",
"X",
")",
"lam",
"=",
"cv_model",
".",
"lam_",
"print",
"(",
"\" lam: {}\"",
".",
"format",
"(",
"lam",
")",
")",
"model",
"=",
"ModelAverage",
"(",
"n_trials",
"=",
"n_trials",
",",
"penalization",
"=",
"penalization",
",",
"lam",
"=",
"lam",
",",
"n_jobs",
"=",
"1",
")",
"model",
".",
"fit",
"(",
"X",
")",
"print",
"(",
"\" lam_: {}\"",
".",
"format",
"(",
"model",
".",
"lam_",
")",
")",
"return",
"model",
".",
"proportion_",
",",
"model",
".",
"support_",
",",
"model",
".",
"lam_"
] | 37.129032 | 20.483871 |
def add_hosted_zone_id_for_alias_target_if_missing(self, rs):
"""Add proper hosted zone id to record set alias target if missing."""
alias_target = getattr(rs, "AliasTarget", None)
if alias_target:
hosted_zone_id = getattr(alias_target, "HostedZoneId", None)
if not hosted_zone_id:
dns_name = alias_target.DNSName
if dns_name.endswith(CF_DOMAIN):
alias_target.HostedZoneId = CLOUDFRONT_ZONE_ID
elif dns_name.endswith(ELB_DOMAIN):
region = dns_name.split('.')[-5]
alias_target.HostedZoneId = ELB_ZONE_IDS[region]
elif dns_name in S3_WEBSITE_ZONE_IDS:
alias_target.HostedZoneId = S3_WEBSITE_ZONE_IDS[dns_name]
else:
alias_target.HostedZoneId = self.hosted_zone_id
return rs | [
"def",
"add_hosted_zone_id_for_alias_target_if_missing",
"(",
"self",
",",
"rs",
")",
":",
"alias_target",
"=",
"getattr",
"(",
"rs",
",",
"\"AliasTarget\"",
",",
"None",
")",
"if",
"alias_target",
":",
"hosted_zone_id",
"=",
"getattr",
"(",
"alias_target",
",",
"\"HostedZoneId\"",
",",
"None",
")",
"if",
"not",
"hosted_zone_id",
":",
"dns_name",
"=",
"alias_target",
".",
"DNSName",
"if",
"dns_name",
".",
"endswith",
"(",
"CF_DOMAIN",
")",
":",
"alias_target",
".",
"HostedZoneId",
"=",
"CLOUDFRONT_ZONE_ID",
"elif",
"dns_name",
".",
"endswith",
"(",
"ELB_DOMAIN",
")",
":",
"region",
"=",
"dns_name",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"5",
"]",
"alias_target",
".",
"HostedZoneId",
"=",
"ELB_ZONE_IDS",
"[",
"region",
"]",
"elif",
"dns_name",
"in",
"S3_WEBSITE_ZONE_IDS",
":",
"alias_target",
".",
"HostedZoneId",
"=",
"S3_WEBSITE_ZONE_IDS",
"[",
"dns_name",
"]",
"else",
":",
"alias_target",
".",
"HostedZoneId",
"=",
"self",
".",
"hosted_zone_id",
"return",
"rs"
] | 52.411765 | 17.705882 |
def getaddresses (addr):
"""Return list of email addresses from given field value."""
parsed = [mail for name, mail in AddressList(addr).addresslist if mail]
if parsed:
addresses = parsed
elif addr:
# we could not parse any mail addresses, so try with the raw string
addresses = [addr]
else:
addresses = []
return addresses | [
"def",
"getaddresses",
"(",
"addr",
")",
":",
"parsed",
"=",
"[",
"mail",
"for",
"name",
",",
"mail",
"in",
"AddressList",
"(",
"addr",
")",
".",
"addresslist",
"if",
"mail",
"]",
"if",
"parsed",
":",
"addresses",
"=",
"parsed",
"elif",
"addr",
":",
"# we could not parse any mail addresses, so try with the raw string",
"addresses",
"=",
"[",
"addr",
"]",
"else",
":",
"addresses",
"=",
"[",
"]",
"return",
"addresses"
] | 33.545455 | 21.363636 |
def get_assessment_parts_by_banks(self, bank_ids):
"""Gets the list of assessment part corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.assessment.authoring.AssessmentPartList) - list of
assessment parts
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resources_by_bins
assessment_part_list = []
for bank_id in bank_ids:
assessment_part_list += list(
self.get_assessment_parts_by_bank(bank_id))
return objects.AssessmentPartList(assessment_part_list) | [
"def",
"get_assessment_parts_by_banks",
"(",
"self",
",",
"bank_ids",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceBinSession.get_resources_by_bins",
"assessment_part_list",
"=",
"[",
"]",
"for",
"bank_id",
"in",
"bank_ids",
":",
"assessment_part_list",
"+=",
"list",
"(",
"self",
".",
"get_assessment_parts_by_bank",
"(",
"bank_id",
")",
")",
"return",
"objects",
".",
"AssessmentPartList",
"(",
"assessment_part_list",
")"
] | 46.210526 | 16.526316 |
def edit_message_reply_markup(self, chat_id, message_id, reply_markup, **options):
"""
Edit a reply markup of message in a chat
:param int chat_id: ID of the chat the message to edit is in
:param int message_id: ID of the message to edit
:param str reply_markup: New inline keyboard markup for the message
:param options: Additional API options
"""
return self.api_call(
"editMessageReplyMarkup",
chat_id=chat_id,
message_id=message_id,
reply_markup=reply_markup,
**options
) | [
"def",
"edit_message_reply_markup",
"(",
"self",
",",
"chat_id",
",",
"message_id",
",",
"reply_markup",
",",
"*",
"*",
"options",
")",
":",
"return",
"self",
".",
"api_call",
"(",
"\"editMessageReplyMarkup\"",
",",
"chat_id",
"=",
"chat_id",
",",
"message_id",
"=",
"message_id",
",",
"reply_markup",
"=",
"reply_markup",
",",
"*",
"*",
"options",
")"
] | 37.0625 | 16.1875 |
def get_manifest_and_response(self, alias):
"""
Request the manifest for an alias and return the manifest and the
response.
:param alias: Alias name.
:type alias: str
:rtype: tuple
:returns: Tuple containing the manifest as a string (JSON) and the `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`_
"""
r = self._request('get',
'manifests/' + alias,
headers={'Accept': _schema2_mimetype + ', ' +
_schema1_mimetype})
return r.content.decode('utf-8'), r | [
"def",
"get_manifest_and_response",
"(",
"self",
",",
"alias",
")",
":",
"r",
"=",
"self",
".",
"_request",
"(",
"'get'",
",",
"'manifests/'",
"+",
"alias",
",",
"headers",
"=",
"{",
"'Accept'",
":",
"_schema2_mimetype",
"+",
"', '",
"+",
"_schema1_mimetype",
"}",
")",
"return",
"r",
".",
"content",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"r"
] | 40.75 | 23.5 |
def _rsadp(self, c):
"""
Internal method providing raw RSA decryption, i.e. simple modular
exponentiation of the given ciphertext representative 'c', a long
between 0 and n-1.
This is the decryption primitive RSADP described in PKCS#1 v2.1,
i.e. RFC 3447 Sect. 5.1.2.
Input:
c: ciphertest representative, a long between 0 and n-1, where
n is the key modulus.
Output:
ciphertext representative, a long between 0 and n-1
Not intended to be used directly. Please, see encrypt() method.
"""
n = self.modulus
if type(c) is int:
c = long(c)
if type(c) is not long or c > n-1:
warning("Key._rsaep() expects a long between 0 and n-1")
return None
return self.key.decrypt(c) | [
"def",
"_rsadp",
"(",
"self",
",",
"c",
")",
":",
"n",
"=",
"self",
".",
"modulus",
"if",
"type",
"(",
"c",
")",
"is",
"int",
":",
"c",
"=",
"long",
"(",
"c",
")",
"if",
"type",
"(",
"c",
")",
"is",
"not",
"long",
"or",
"c",
">",
"n",
"-",
"1",
":",
"warning",
"(",
"\"Key._rsaep() expects a long between 0 and n-1\"",
")",
"return",
"None",
"return",
"self",
".",
"key",
".",
"decrypt",
"(",
"c",
")"
] | 31 | 22.62963 |
def new_pos(self, html_div):
"""factory method pattern"""
pos = self.Position(self, html_div)
pos.bind_mov()
self.positions.append(pos)
return pos | [
"def",
"new_pos",
"(",
"self",
",",
"html_div",
")",
":",
"pos",
"=",
"self",
".",
"Position",
"(",
"self",
",",
"html_div",
")",
"pos",
".",
"bind_mov",
"(",
")",
"self",
".",
"positions",
".",
"append",
"(",
"pos",
")",
"return",
"pos"
] | 30.166667 | 10.166667 |
def _start_ssh_agent(cls):
"""Starts ssh-agent and returns the environment variables related to it"""
env = dict()
stdout = ClHelper.run_command('ssh-agent -s')
lines = stdout.split('\n')
for line in lines:
if not line or line.startswith('echo '):
continue
line = line.split(';')[0]
parts = line.split('=')
if len(parts) == 2:
env[parts[0]] = parts[1]
return env | [
"def",
"_start_ssh_agent",
"(",
"cls",
")",
":",
"env",
"=",
"dict",
"(",
")",
"stdout",
"=",
"ClHelper",
".",
"run_command",
"(",
"'ssh-agent -s'",
")",
"lines",
"=",
"stdout",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"not",
"line",
"or",
"line",
".",
"startswith",
"(",
"'echo '",
")",
":",
"continue",
"line",
"=",
"line",
".",
"split",
"(",
"';'",
")",
"[",
"0",
"]",
"parts",
"=",
"line",
".",
"split",
"(",
"'='",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"2",
":",
"env",
"[",
"parts",
"[",
"0",
"]",
"]",
"=",
"parts",
"[",
"1",
"]",
"return",
"env"
] | 36.769231 | 10.307692 |
def getAttributeValueType(self, index):
"""
Return the type of the attribute at the given index
:param index: index of the attribute
"""
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE] | [
"def",
"getAttributeValueType",
"(",
"self",
",",
"index",
")",
":",
"offset",
"=",
"self",
".",
"_get_attribute_offset",
"(",
"index",
")",
"return",
"self",
".",
"m_attributes",
"[",
"offset",
"+",
"const",
".",
"ATTRIBUTE_IX_VALUE_TYPE",
"]"
] | 35.75 | 13.25 |
def preset_ls(remote):
"""List presets
\b
Usage:
$ be preset ls
- ad
- game
- film
"""
if self.isactive():
lib.echo("ERROR: Exit current project first")
sys.exit(lib.USER_ERROR)
if remote:
presets = _extern.github_presets()
else:
presets = _extern.local_presets()
if not presets:
lib.echo("No presets found")
sys.exit(lib.NORMAL)
for preset in sorted(presets):
lib.echo("- %s" % preset)
sys.exit(lib.NORMAL) | [
"def",
"preset_ls",
"(",
"remote",
")",
":",
"if",
"self",
".",
"isactive",
"(",
")",
":",
"lib",
".",
"echo",
"(",
"\"ERROR: Exit current project first\"",
")",
"sys",
".",
"exit",
"(",
"lib",
".",
"USER_ERROR",
")",
"if",
"remote",
":",
"presets",
"=",
"_extern",
".",
"github_presets",
"(",
")",
"else",
":",
"presets",
"=",
"_extern",
".",
"local_presets",
"(",
")",
"if",
"not",
"presets",
":",
"lib",
".",
"echo",
"(",
"\"No presets found\"",
")",
"sys",
".",
"exit",
"(",
"lib",
".",
"NORMAL",
")",
"for",
"preset",
"in",
"sorted",
"(",
"presets",
")",
":",
"lib",
".",
"echo",
"(",
"\"- %s\"",
"%",
"preset",
")",
"sys",
".",
"exit",
"(",
"lib",
".",
"NORMAL",
")"
] | 19.037037 | 20.148148 |
def __setUpTrakers(self):
''' set symbols '''
for symbol in self.symbols:
self.__trakers[symbol]=OneTraker(symbol, self, self.buyingRatio) | [
"def",
"__setUpTrakers",
"(",
"self",
")",
":",
"for",
"symbol",
"in",
"self",
".",
"symbols",
":",
"self",
".",
"__trakers",
"[",
"symbol",
"]",
"=",
"OneTraker",
"(",
"symbol",
",",
"self",
",",
"self",
".",
"buyingRatio",
")"
] | 40.75 | 17.25 |
def _get_image_information(self):
"""
:returns: Dictionary information about the container image
"""
result = yield from self.manager.query("GET", "images/{}/json".format(self._image))
return result | [
"def",
"_get_image_information",
"(",
"self",
")",
":",
"result",
"=",
"yield",
"from",
"self",
".",
"manager",
".",
"query",
"(",
"\"GET\"",
",",
"\"images/{}/json\"",
".",
"format",
"(",
"self",
".",
"_image",
")",
")",
"return",
"result"
] | 38.833333 | 17.166667 |
def load_table(self, table):
"""
Load the file contents into the supplied Table using the
specified key and filetype. The input table should have the
filenames as values which will be replaced by the loaded
data. If data_key is specified, this key will be used to index
the loaded data to retrive the specified item.
"""
items, data_keys = [], None
for key, filename in table.items():
data_dict = self.filetype.data(filename[0])
current_keys = tuple(sorted(data_dict.keys()))
values = [data_dict[k] for k in current_keys]
if data_keys is None:
data_keys = current_keys
elif data_keys != current_keys:
raise Exception("Data keys are inconsistent")
items.append((key, values))
return Table(items, kdims=table.kdims, vdims=data_keys) | [
"def",
"load_table",
"(",
"self",
",",
"table",
")",
":",
"items",
",",
"data_keys",
"=",
"[",
"]",
",",
"None",
"for",
"key",
",",
"filename",
"in",
"table",
".",
"items",
"(",
")",
":",
"data_dict",
"=",
"self",
".",
"filetype",
".",
"data",
"(",
"filename",
"[",
"0",
"]",
")",
"current_keys",
"=",
"tuple",
"(",
"sorted",
"(",
"data_dict",
".",
"keys",
"(",
")",
")",
")",
"values",
"=",
"[",
"data_dict",
"[",
"k",
"]",
"for",
"k",
"in",
"current_keys",
"]",
"if",
"data_keys",
"is",
"None",
":",
"data_keys",
"=",
"current_keys",
"elif",
"data_keys",
"!=",
"current_keys",
":",
"raise",
"Exception",
"(",
"\"Data keys are inconsistent\"",
")",
"items",
".",
"append",
"(",
"(",
"key",
",",
"values",
")",
")",
"return",
"Table",
"(",
"items",
",",
"kdims",
"=",
"table",
".",
"kdims",
",",
"vdims",
"=",
"data_keys",
")"
] | 44.85 | 14.15 |
def getiddfile(versionid):
"""find the IDD file of the E+ installation"""
vlist = versionid.split('.')
if len(vlist) == 1:
vlist = vlist + ['0', '0']
elif len(vlist) == 2:
vlist = vlist + ['0']
ver_str = '-'.join(vlist)
eplus_exe, _ = eppy.runner.run_functions.install_paths(ver_str)
eplusfolder = os.path.dirname(eplus_exe)
iddfile = '{}/Energy+.idd'.format(eplusfolder, )
return iddfile | [
"def",
"getiddfile",
"(",
"versionid",
")",
":",
"vlist",
"=",
"versionid",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"vlist",
")",
"==",
"1",
":",
"vlist",
"=",
"vlist",
"+",
"[",
"'0'",
",",
"'0'",
"]",
"elif",
"len",
"(",
"vlist",
")",
"==",
"2",
":",
"vlist",
"=",
"vlist",
"+",
"[",
"'0'",
"]",
"ver_str",
"=",
"'-'",
".",
"join",
"(",
"vlist",
")",
"eplus_exe",
",",
"_",
"=",
"eppy",
".",
"runner",
".",
"run_functions",
".",
"install_paths",
"(",
"ver_str",
")",
"eplusfolder",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"eplus_exe",
")",
"iddfile",
"=",
"'{}/Energy+.idd'",
".",
"format",
"(",
"eplusfolder",
",",
")",
"return",
"iddfile"
] | 35.916667 | 12.25 |
def get_weights_from_kmodel(kmodel):
"""
Convert kmodel's weights to bigdl format.
We are supposing the order is the same as the execution order.
:param kmodel: keras model
:return: list of ndarray
"""
layers_with_weights = [layer for layer in kmodel.layers if layer.weights]
bweights = []
for klayer in layers_with_weights:
# bws would be [weights, bias] or [weights]
bws = WeightsConverter.get_bigdl_weights_from_klayer(klayer)
for w in bws:
bweights.append(w)
return bweights | [
"def",
"get_weights_from_kmodel",
"(",
"kmodel",
")",
":",
"layers_with_weights",
"=",
"[",
"layer",
"for",
"layer",
"in",
"kmodel",
".",
"layers",
"if",
"layer",
".",
"weights",
"]",
"bweights",
"=",
"[",
"]",
"for",
"klayer",
"in",
"layers_with_weights",
":",
"# bws would be [weights, bias] or [weights]",
"bws",
"=",
"WeightsConverter",
".",
"get_bigdl_weights_from_klayer",
"(",
"klayer",
")",
"for",
"w",
"in",
"bws",
":",
"bweights",
".",
"append",
"(",
"w",
")",
"return",
"bweights"
] | 39.733333 | 13.6 |
def spkw10(handle, body, center, inframe, first, last, segid, consts, n, elems,
epochs):
"""
Write an SPK type 10 segment to the DAF open and attached to
the input handle.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkw10_c.html
:param handle: The handle of a DAF file open for writing.
:type handle: int
:param body: The NAIF ID code for the body of the segment.
:type body: int
:param center: The center of motion for body.
:type center: int
:param inframe: The reference frame for this segment.
:type inframe: str
:param first: The first epoch for which the segment is valid.
:type first: float
:param last: The last epoch for which the segment is valid.
:type last: float
:param segid: The string to use for segment identifier.
:type segid: str
:param consts: The array of geophysical constants for the segment.
:type consts: 8-Element Array of floats
:param n: The number of element/epoch pairs to be stored.
:type n: int
:param elems: The collection of "two-line" element sets.
:type elems: Array of floats
:param epochs: The epochs associated with the element sets.
:type epochs: Array of floats
"""
handle = ctypes.c_int(handle)
body = ctypes.c_int(body)
center = ctypes.c_int(center)
inframe = stypes.stringToCharP(inframe)
first = ctypes.c_double(first)
last = ctypes.c_double(last)
segid = stypes.stringToCharP(segid)
consts = stypes.toDoubleVector(consts)
n = ctypes.c_int(n)
elems = stypes.toDoubleVector(elems)
epochs = stypes.toDoubleVector(epochs)
libspice.spkw10_c(handle, body, center, inframe, first, last, segid, consts,
n, elems, epochs) | [
"def",
"spkw10",
"(",
"handle",
",",
"body",
",",
"center",
",",
"inframe",
",",
"first",
",",
"last",
",",
"segid",
",",
"consts",
",",
"n",
",",
"elems",
",",
"epochs",
")",
":",
"handle",
"=",
"ctypes",
".",
"c_int",
"(",
"handle",
")",
"body",
"=",
"ctypes",
".",
"c_int",
"(",
"body",
")",
"center",
"=",
"ctypes",
".",
"c_int",
"(",
"center",
")",
"inframe",
"=",
"stypes",
".",
"stringToCharP",
"(",
"inframe",
")",
"first",
"=",
"ctypes",
".",
"c_double",
"(",
"first",
")",
"last",
"=",
"ctypes",
".",
"c_double",
"(",
"last",
")",
"segid",
"=",
"stypes",
".",
"stringToCharP",
"(",
"segid",
")",
"consts",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"consts",
")",
"n",
"=",
"ctypes",
".",
"c_int",
"(",
"n",
")",
"elems",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"elems",
")",
"epochs",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"epochs",
")",
"libspice",
".",
"spkw10_c",
"(",
"handle",
",",
"body",
",",
"center",
",",
"inframe",
",",
"first",
",",
"last",
",",
"segid",
",",
"consts",
",",
"n",
",",
"elems",
",",
"epochs",
")"
] | 39.090909 | 16.545455 |
def _col_type_set(self, col, df):
"""
Determines the set of types present in a DataFrame column.
:param str col: A column name.
:param pandas.DataFrame df: The dataset. Usually ``self._data``.
:return: A set of Types.
"""
type_set = set()
if df[col].dtype == np.dtype(object):
unindexed_col = list(df[col])
for i in range(0, len(df[col])):
if unindexed_col[i] == np.nan:
continue
else:
type_set.add(type(unindexed_col[i]))
return type_set
else:
type_set.add(df[col].dtype)
return type_set | [
"def",
"_col_type_set",
"(",
"self",
",",
"col",
",",
"df",
")",
":",
"type_set",
"=",
"set",
"(",
")",
"if",
"df",
"[",
"col",
"]",
".",
"dtype",
"==",
"np",
".",
"dtype",
"(",
"object",
")",
":",
"unindexed_col",
"=",
"list",
"(",
"df",
"[",
"col",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"df",
"[",
"col",
"]",
")",
")",
":",
"if",
"unindexed_col",
"[",
"i",
"]",
"==",
"np",
".",
"nan",
":",
"continue",
"else",
":",
"type_set",
".",
"add",
"(",
"type",
"(",
"unindexed_col",
"[",
"i",
"]",
")",
")",
"return",
"type_set",
"else",
":",
"type_set",
".",
"add",
"(",
"df",
"[",
"col",
"]",
".",
"dtype",
")",
"return",
"type_set"
] | 33.7 | 12.4 |
def accepts_contributor_roles(func):
"""
Decorator that accepts only contributor roles
:param func:
:return:
"""
if inspect.isclass(func):
apply_function_to_members(func, accepts_contributor_roles)
return func
else:
@functools.wraps(func)
def decorator(*args, **kwargs):
return accepts_roles(*ROLES_CONTRIBUTOR)(func)(*args, **kwargs)
return decorator | [
"def",
"accepts_contributor_roles",
"(",
"func",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"func",
")",
":",
"apply_function_to_members",
"(",
"func",
",",
"accepts_contributor_roles",
")",
"return",
"func",
"else",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"decorator",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"accepts_roles",
"(",
"*",
"ROLES_CONTRIBUTOR",
")",
"(",
"func",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"decorator"
] | 27.866667 | 17.066667 |
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return Checksum(key)
if key not in Checksum._member_map_:
extend_enum(Checksum, key, default)
return Checksum[key] | [
"def",
"get",
"(",
"key",
",",
"default",
"=",
"-",
"1",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"int",
")",
":",
"return",
"Checksum",
"(",
"key",
")",
"if",
"key",
"not",
"in",
"Checksum",
".",
"_member_map_",
":",
"extend_enum",
"(",
"Checksum",
",",
"key",
",",
"default",
")",
"return",
"Checksum",
"[",
"key",
"]"
] | 36.857143 | 7.714286 |
def body_echo(cls, request,
foo: (Ptypes.body, String('A body parameter'))) -> [
(200, 'Ok', String)]:
'''Echo the body parameter.'''
log.info('Echoing body param, value is: {}'.format(foo))
for i in range(randint(0, MAX_LOOP_DURATION)):
yield
msg = 'The value sent was: {}'.format(foo)
Respond(200, msg) | [
"def",
"body_echo",
"(",
"cls",
",",
"request",
",",
"foo",
":",
"(",
"Ptypes",
".",
"body",
",",
"String",
"(",
"'A body parameter'",
")",
")",
")",
"->",
"[",
"(",
"200",
",",
"'Ok'",
",",
"String",
")",
"]",
":",
"log",
".",
"info",
"(",
"'Echoing body param, value is: {}'",
".",
"format",
"(",
"foo",
")",
")",
"for",
"i",
"in",
"range",
"(",
"randint",
"(",
"0",
",",
"MAX_LOOP_DURATION",
")",
")",
":",
"yield",
"msg",
"=",
"'The value sent was: {}'",
".",
"format",
"(",
"foo",
")",
"Respond",
"(",
"200",
",",
"msg",
")"
] | 42 | 15.333333 |
def extract_archive(archive, verbosity=0, outdir=None, program=None, interactive=True):
"""Extract given archive."""
util.check_existing_filename(archive)
if verbosity >= 0:
util.log_info("Extracting %s ..." % archive)
return _extract_archive(archive, verbosity=verbosity, interactive=interactive, outdir=outdir, program=program) | [
"def",
"extract_archive",
"(",
"archive",
",",
"verbosity",
"=",
"0",
",",
"outdir",
"=",
"None",
",",
"program",
"=",
"None",
",",
"interactive",
"=",
"True",
")",
":",
"util",
".",
"check_existing_filename",
"(",
"archive",
")",
"if",
"verbosity",
">=",
"0",
":",
"util",
".",
"log_info",
"(",
"\"Extracting %s ...\"",
"%",
"archive",
")",
"return",
"_extract_archive",
"(",
"archive",
",",
"verbosity",
"=",
"verbosity",
",",
"interactive",
"=",
"interactive",
",",
"outdir",
"=",
"outdir",
",",
"program",
"=",
"program",
")"
] | 58 | 25.333333 |
def options(self, request, *args, **kwargs):
"""
Handles responding to requests for the OPTIONS HTTP verb.
"""
response = Response()
response.headers['Allow'] = ', '.join(self.allowed_methods)
response.headers['Content-Length'] = '0'
return response | [
"def",
"options",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"Response",
"(",
")",
"response",
".",
"headers",
"[",
"'Allow'",
"]",
"=",
"', '",
".",
"join",
"(",
"self",
".",
"allowed_methods",
")",
"response",
".",
"headers",
"[",
"'Content-Length'",
"]",
"=",
"'0'",
"return",
"response"
] | 27.090909 | 19.272727 |
def get_slot_value(payload, slot_name):
""" Return the parsed value of a slot. An intent has the form:
{
"text": "brew me a cappuccino with 3 sugars tomorrow",
"slots": [
{"value": {"slotName": "coffee_type", "value": "cappuccino"}},
...
]
}
This function extracts a slot value given its slot name, and parses
it into a Python object if applicable (e.g. for dates).
Slots can be of various forms, the simplest being just:
{"slotName": "coffee_sugar_amout", "value": "3"}
More complex examples are date times, where we distinguish between
instant times, or intervals. Thus, a slot:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "InstantTime",
"value": {
"value": "2017-07-14 00:00:00 +00:00",
"grain": "Day",
"precision": "Exact"
}
}
}
will be extracted as an `InstantTime` object, with datetime parsed
and granularity set.
Another example is a time interval:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "TimeInterval",
"value": {
"from": "2017-07-14 12:00:00 +00:00",
"to": "2017-07-14 19:00:00 +00:00"
}
},
}
which will be extracted as a TimeInterval object.
:param payload: the intent, in JSON format.
:return: the parsed value, as described above.
"""
if not 'slots' in payload:
return []
slots = []
for candidate in payload['slots']:
if 'slotName' in candidate and candidate['slotName'] == slot_name:
slots.append(candidate)
result = []
for slot in slots:
kind = IntentParser.get_dict_value(slot, ['value', 'kind'])
if kind == "InstantTime":
result.append(IntentParser.parse_instant_time(slot))
elif kind == "TimeInterval":
result.append(IntentParser.parse_time_interval(slot))
else:
result.append(IntentParser.get_dict_value(slot, ['value', 'value', 'value']) \
or IntentParser.get_dict_value(slot, ['value', 'value']))
return result | [
"def",
"get_slot_value",
"(",
"payload",
",",
"slot_name",
")",
":",
"if",
"not",
"'slots'",
"in",
"payload",
":",
"return",
"[",
"]",
"slots",
"=",
"[",
"]",
"for",
"candidate",
"in",
"payload",
"[",
"'slots'",
"]",
":",
"if",
"'slotName'",
"in",
"candidate",
"and",
"candidate",
"[",
"'slotName'",
"]",
"==",
"slot_name",
":",
"slots",
".",
"append",
"(",
"candidate",
")",
"result",
"=",
"[",
"]",
"for",
"slot",
"in",
"slots",
":",
"kind",
"=",
"IntentParser",
".",
"get_dict_value",
"(",
"slot",
",",
"[",
"'value'",
",",
"'kind'",
"]",
")",
"if",
"kind",
"==",
"\"InstantTime\"",
":",
"result",
".",
"append",
"(",
"IntentParser",
".",
"parse_instant_time",
"(",
"slot",
")",
")",
"elif",
"kind",
"==",
"\"TimeInterval\"",
":",
"result",
".",
"append",
"(",
"IntentParser",
".",
"parse_time_interval",
"(",
"slot",
")",
")",
"else",
":",
"result",
".",
"append",
"(",
"IntentParser",
".",
"get_dict_value",
"(",
"slot",
",",
"[",
"'value'",
",",
"'value'",
",",
"'value'",
"]",
")",
"or",
"IntentParser",
".",
"get_dict_value",
"(",
"slot",
",",
"[",
"'value'",
",",
"'value'",
"]",
")",
")",
"return",
"result"
] | 33.4 | 23.253333 |
def Toeplitz(x, d=None, D=None, kind='F'):
""" Creates multilevel Toeplitz TT-matrix with ``D`` levels.
Possible _matrix types:
* 'F' - full Toeplitz _matrix, size(x) = 2^{d+1}
* 'C' - circulant _matrix, size(x) = 2^d
* 'L' - lower triangular Toeplitz _matrix, size(x) = 2^d
* 'U' - upper triangular Toeplitz _matrix, size(x) = 2^d
Sample calls:
>>> # one-level Toeplitz _matrix:
>>> T = tt.Toeplitz(x)
>>> # one-level circulant _matrix:
>>> T = tt.Toeplitz(x, kind='C')
>>> # three-level upper-triangular Toeplitz _matrix:
>>> T = tt.Toeplitz(x, D=3, kind='U')
>>> # two-level mixed-type Toeplitz _matrix:
>>> T = tt.Toeplitz(x, kind=['L', 'U'])
>>> # two-level mixed-size Toeplitz _matrix:
>>> T = tt.Toeplitz(x, [3, 4], kind='C')
"""
# checking for arguments consistency
def check_kinds(D, kind):
if D % len(kind) == 0:
kind.extend(kind * (D // len(kind) - 1))
if len(kind) != D:
raise ValueError(
"Must give proper amount of _matrix kinds (one or D, for example)")
kind = list(kind)
if not set(kind).issubset(['F', 'C', 'L', 'U']):
raise ValueError("Toeplitz _matrix kind must be one of F, C, L, U.")
if d is None:
if D is None:
D = len(kind)
if x.d % D:
raise ValueError(
"x.d must be divisible by D when d is not specified!")
if len(kind) == 1:
d = _np.array([x.d // D - (1 if kind[0] == 'F' else 0)]
* D, dtype=_np.int32)
kind = kind * D
else:
check_kinds(D, kind)
if set(kind).issubset(['F']):
d = _np.array([x.d // D - 1] * D, dtype=_np.int32)
elif set(kind).issubset(['C', 'L', 'U']):
d = _np.array([x.d // D] * D, dtype=_np.int32)
else:
raise ValueError(
"Only similar _matrix kinds (only F or only C, L and U) are accepted when d is not specified!")
elif d is not None:
d = _np.asarray(d, dtype=_np.int32).flatten()
if D is None:
D = d.size
elif d.size == 1:
d = _np.array([d[0]] * D, dtype=_np.int32)
if D != d.size:
raise ValueError("D must be equal to len(d)")
check_kinds(D, kind)
if _np.sum(d) + _np.sum([(1 if knd == 'F' else 0)
for knd in kind]) != x.d:
raise ValueError(
"Dimensions inconsistency: x.d != d_1 + d_2 + ... + d_D")
# predefined matrices and tensors:
I = [[1, 0], [0, 1]]
J = [[0, 1], [0, 0]]
JT = [[0, 0], [1, 0]]
H = [[0, 1], [1, 0]]
S = _np.array([[[0], [1]], [[1], [0]]]).transpose() # 2 x 2 x 1
P = _np.zeros((2, 2, 2, 2))
P[:, :, 0, 0] = I
P[:, :, 1, 0] = H
P[:, :, 0, 1] = H
P[:, :, 1, 1] = I
P = _np.transpose(P) # 2 x 2! x 2 x 2 x '1'
Q = _np.zeros((2, 2, 2, 2))
Q[:, :, 0, 0] = I
Q[:, :, 1, 0] = JT
Q[:, :, 0, 1] = JT
Q = _np.transpose(Q) # 2 x 2! x 2 x 2 x '1'
R = _np.zeros((2, 2, 2, 2))
R[:, :, 1, 0] = J
R[:, :, 0, 1] = J
R[:, :, 1, 1] = I
R = _np.transpose(R) # 2 x 2! x 2 x 2 x '1'
W = _np.zeros([2] * 5) # 2 x 2! x 2 x 2 x 2
W[0, :, :, 0, 0] = W[1, :, :, 1, 1] = I
W[0, :, :, 1, 0] = W[0, :, :, 0, 1] = JT
W[1, :, :, 1, 0] = W[1, :, :, 0, 1] = J
W = _np.transpose(W) # 2 x 2! x 2 x 2 x 2
V = _np.zeros((2, 2, 2, 2))
V[0, :, :, 0] = I
V[0, :, :, 1] = JT
V[1, :, :, 1] = J
V = _np.transpose(V) # '1' x 2! x 2 x 2 x 2
crs = []
xcrs = _vector.vector.to_list(x)
dp = 0 # dimensions passed
for j in xrange(D):
currd = d[j]
xcr = xcrs[dp]
cr = _np.tensordot(V, xcr, (0, 1))
cr = cr.transpose(3, 0, 1, 2, 4) # <r_dp| x 2 x 2 x |2> x |r_{dp+1}>
cr = cr.reshape((x.r[dp], 2, 2, 2 * x.r[dp + 1]),
order='F') # <r_dp| x 2 x 2 x |2r_{dp+1}>
dp += 1
crs.append(cr)
for i in xrange(1, currd - 1):
xcr = xcrs[dp]
# (<2| x 2 x 2 x |2>) x <r_dp| x |r_{dp+1}>
cr = _np.tensordot(W, xcr, (1, 1))
# <2| x <r_dp| x 2 x 2 x |2> x |r_{dp+1}>
cr = cr.transpose([0, 4, 1, 2, 3, 5])
# <2r_dp| x 2 x 2 x |2r_{dp+1}>
cr = cr.reshape((2 * x.r[dp], 2, 2, 2 * x.r[dp + 1]), order='F')
dp += 1
crs.append(cr)
if kind[j] == 'F':
xcr = xcrs[dp] # r_dp x 2 x r_{dp+1}
cr = _np.tensordot(W, xcr, (1, 1)).transpose([0, 4, 1, 2, 3, 5])
# <2r_dp| x 2 x 2 x |2r_{dp+1}>
cr = cr.reshape((2 * x.r[dp], 2, 2, 2 * x.r[dp + 1]), order='F')
dp += 1
xcr = xcrs[dp] # r_dp x 2 x r_{dp+1}
# <2| x |1> x <r_dp| x |r_{dp+1}>
tmp = _np.tensordot(S, xcr, (1, 1))
# tmp = tmp.transpose([0, 2, 1, 3]) # TODO: figure out WHY THE HELL
# this spoils everything
# <2r_dp| x |r_{dp+1}>
tmp = tmp.reshape((2 * x.r[dp], x.r[dp + 1]), order='F')
# <2r_{dp-1}| x 2 x 2 x |r_{dp+1}>
cr = _np.tensordot(cr, tmp, (3, 0))
dp += 1
crs.append(cr)
else:
dotcore = None
if kind[j] == 'C':
dotcore = P
elif kind[j] == 'L':
dotcore = Q
elif kind[j] == 'U':
dotcore = R
xcr = xcrs[dp] # r_dp x 2 x r_{dp+1}
# <2| x 2 x 2 x |'1'> x <r_dp| x |r_{dp+1}>
cr = _np.tensordot(dotcore, xcr, (1, 1))
# <2| x <r_dp| x 2 x 2 x |r_{dp+1}>
cr = cr.transpose([0, 3, 1, 2, 4])
cr = cr.reshape((2 * x.r[dp], 2, 2, x.r[dp + 1]), order='F')
dp += 1
crs.append(cr)
return _matrix.matrix.from_list(crs) | [
"def",
"Toeplitz",
"(",
"x",
",",
"d",
"=",
"None",
",",
"D",
"=",
"None",
",",
"kind",
"=",
"'F'",
")",
":",
"# checking for arguments consistency",
"def",
"check_kinds",
"(",
"D",
",",
"kind",
")",
":",
"if",
"D",
"%",
"len",
"(",
"kind",
")",
"==",
"0",
":",
"kind",
".",
"extend",
"(",
"kind",
"*",
"(",
"D",
"//",
"len",
"(",
"kind",
")",
"-",
"1",
")",
")",
"if",
"len",
"(",
"kind",
")",
"!=",
"D",
":",
"raise",
"ValueError",
"(",
"\"Must give proper amount of _matrix kinds (one or D, for example)\"",
")",
"kind",
"=",
"list",
"(",
"kind",
")",
"if",
"not",
"set",
"(",
"kind",
")",
".",
"issubset",
"(",
"[",
"'F'",
",",
"'C'",
",",
"'L'",
",",
"'U'",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Toeplitz _matrix kind must be one of F, C, L, U.\"",
")",
"if",
"d",
"is",
"None",
":",
"if",
"D",
"is",
"None",
":",
"D",
"=",
"len",
"(",
"kind",
")",
"if",
"x",
".",
"d",
"%",
"D",
":",
"raise",
"ValueError",
"(",
"\"x.d must be divisible by D when d is not specified!\"",
")",
"if",
"len",
"(",
"kind",
")",
"==",
"1",
":",
"d",
"=",
"_np",
".",
"array",
"(",
"[",
"x",
".",
"d",
"//",
"D",
"-",
"(",
"1",
"if",
"kind",
"[",
"0",
"]",
"==",
"'F'",
"else",
"0",
")",
"]",
"*",
"D",
",",
"dtype",
"=",
"_np",
".",
"int32",
")",
"kind",
"=",
"kind",
"*",
"D",
"else",
":",
"check_kinds",
"(",
"D",
",",
"kind",
")",
"if",
"set",
"(",
"kind",
")",
".",
"issubset",
"(",
"[",
"'F'",
"]",
")",
":",
"d",
"=",
"_np",
".",
"array",
"(",
"[",
"x",
".",
"d",
"//",
"D",
"-",
"1",
"]",
"*",
"D",
",",
"dtype",
"=",
"_np",
".",
"int32",
")",
"elif",
"set",
"(",
"kind",
")",
".",
"issubset",
"(",
"[",
"'C'",
",",
"'L'",
",",
"'U'",
"]",
")",
":",
"d",
"=",
"_np",
".",
"array",
"(",
"[",
"x",
".",
"d",
"//",
"D",
"]",
"*",
"D",
",",
"dtype",
"=",
"_np",
".",
"int32",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Only similar _matrix kinds (only F or only C, L and U) are accepted when d is not specified!\"",
")",
"elif",
"d",
"is",
"not",
"None",
":",
"d",
"=",
"_np",
".",
"asarray",
"(",
"d",
",",
"dtype",
"=",
"_np",
".",
"int32",
")",
".",
"flatten",
"(",
")",
"if",
"D",
"is",
"None",
":",
"D",
"=",
"d",
".",
"size",
"elif",
"d",
".",
"size",
"==",
"1",
":",
"d",
"=",
"_np",
".",
"array",
"(",
"[",
"d",
"[",
"0",
"]",
"]",
"*",
"D",
",",
"dtype",
"=",
"_np",
".",
"int32",
")",
"if",
"D",
"!=",
"d",
".",
"size",
":",
"raise",
"ValueError",
"(",
"\"D must be equal to len(d)\"",
")",
"check_kinds",
"(",
"D",
",",
"kind",
")",
"if",
"_np",
".",
"sum",
"(",
"d",
")",
"+",
"_np",
".",
"sum",
"(",
"[",
"(",
"1",
"if",
"knd",
"==",
"'F'",
"else",
"0",
")",
"for",
"knd",
"in",
"kind",
"]",
")",
"!=",
"x",
".",
"d",
":",
"raise",
"ValueError",
"(",
"\"Dimensions inconsistency: x.d != d_1 + d_2 + ... + d_D\"",
")",
"# predefined matrices and tensors:",
"I",
"=",
"[",
"[",
"1",
",",
"0",
"]",
",",
"[",
"0",
",",
"1",
"]",
"]",
"J",
"=",
"[",
"[",
"0",
",",
"1",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
"JT",
"=",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"1",
",",
"0",
"]",
"]",
"H",
"=",
"[",
"[",
"0",
",",
"1",
"]",
",",
"[",
"1",
",",
"0",
"]",
"]",
"S",
"=",
"_np",
".",
"array",
"(",
"[",
"[",
"[",
"0",
"]",
",",
"[",
"1",
"]",
"]",
",",
"[",
"[",
"1",
"]",
",",
"[",
"0",
"]",
"]",
"]",
")",
".",
"transpose",
"(",
")",
"# 2 x 2 x 1",
"P",
"=",
"_np",
".",
"zeros",
"(",
"(",
"2",
",",
"2",
",",
"2",
",",
"2",
")",
")",
"P",
"[",
":",
",",
":",
",",
"0",
",",
"0",
"]",
"=",
"I",
"P",
"[",
":",
",",
":",
",",
"1",
",",
"0",
"]",
"=",
"H",
"P",
"[",
":",
",",
":",
",",
"0",
",",
"1",
"]",
"=",
"H",
"P",
"[",
":",
",",
":",
",",
"1",
",",
"1",
"]",
"=",
"I",
"P",
"=",
"_np",
".",
"transpose",
"(",
"P",
")",
"# 2 x 2! x 2 x 2 x '1'",
"Q",
"=",
"_np",
".",
"zeros",
"(",
"(",
"2",
",",
"2",
",",
"2",
",",
"2",
")",
")",
"Q",
"[",
":",
",",
":",
",",
"0",
",",
"0",
"]",
"=",
"I",
"Q",
"[",
":",
",",
":",
",",
"1",
",",
"0",
"]",
"=",
"JT",
"Q",
"[",
":",
",",
":",
",",
"0",
",",
"1",
"]",
"=",
"JT",
"Q",
"=",
"_np",
".",
"transpose",
"(",
"Q",
")",
"# 2 x 2! x 2 x 2 x '1'",
"R",
"=",
"_np",
".",
"zeros",
"(",
"(",
"2",
",",
"2",
",",
"2",
",",
"2",
")",
")",
"R",
"[",
":",
",",
":",
",",
"1",
",",
"0",
"]",
"=",
"J",
"R",
"[",
":",
",",
":",
",",
"0",
",",
"1",
"]",
"=",
"J",
"R",
"[",
":",
",",
":",
",",
"1",
",",
"1",
"]",
"=",
"I",
"R",
"=",
"_np",
".",
"transpose",
"(",
"R",
")",
"# 2 x 2! x 2 x 2 x '1'",
"W",
"=",
"_np",
".",
"zeros",
"(",
"[",
"2",
"]",
"*",
"5",
")",
"# 2 x 2! x 2 x 2 x 2",
"W",
"[",
"0",
",",
":",
",",
":",
",",
"0",
",",
"0",
"]",
"=",
"W",
"[",
"1",
",",
":",
",",
":",
",",
"1",
",",
"1",
"]",
"=",
"I",
"W",
"[",
"0",
",",
":",
",",
":",
",",
"1",
",",
"0",
"]",
"=",
"W",
"[",
"0",
",",
":",
",",
":",
",",
"0",
",",
"1",
"]",
"=",
"JT",
"W",
"[",
"1",
",",
":",
",",
":",
",",
"1",
",",
"0",
"]",
"=",
"W",
"[",
"1",
",",
":",
",",
":",
",",
"0",
",",
"1",
"]",
"=",
"J",
"W",
"=",
"_np",
".",
"transpose",
"(",
"W",
")",
"# 2 x 2! x 2 x 2 x 2",
"V",
"=",
"_np",
".",
"zeros",
"(",
"(",
"2",
",",
"2",
",",
"2",
",",
"2",
")",
")",
"V",
"[",
"0",
",",
":",
",",
":",
",",
"0",
"]",
"=",
"I",
"V",
"[",
"0",
",",
":",
",",
":",
",",
"1",
"]",
"=",
"JT",
"V",
"[",
"1",
",",
":",
",",
":",
",",
"1",
"]",
"=",
"J",
"V",
"=",
"_np",
".",
"transpose",
"(",
"V",
")",
"# '1' x 2! x 2 x 2 x 2",
"crs",
"=",
"[",
"]",
"xcrs",
"=",
"_vector",
".",
"vector",
".",
"to_list",
"(",
"x",
")",
"dp",
"=",
"0",
"# dimensions passed",
"for",
"j",
"in",
"xrange",
"(",
"D",
")",
":",
"currd",
"=",
"d",
"[",
"j",
"]",
"xcr",
"=",
"xcrs",
"[",
"dp",
"]",
"cr",
"=",
"_np",
".",
"tensordot",
"(",
"V",
",",
"xcr",
",",
"(",
"0",
",",
"1",
")",
")",
"cr",
"=",
"cr",
".",
"transpose",
"(",
"3",
",",
"0",
",",
"1",
",",
"2",
",",
"4",
")",
"# <r_dp| x 2 x 2 x |2> x |r_{dp+1}>",
"cr",
"=",
"cr",
".",
"reshape",
"(",
"(",
"x",
".",
"r",
"[",
"dp",
"]",
",",
"2",
",",
"2",
",",
"2",
"*",
"x",
".",
"r",
"[",
"dp",
"+",
"1",
"]",
")",
",",
"order",
"=",
"'F'",
")",
"# <r_dp| x 2 x 2 x |2r_{dp+1}>",
"dp",
"+=",
"1",
"crs",
".",
"append",
"(",
"cr",
")",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"currd",
"-",
"1",
")",
":",
"xcr",
"=",
"xcrs",
"[",
"dp",
"]",
"# (<2| x 2 x 2 x |2>) x <r_dp| x |r_{dp+1}>",
"cr",
"=",
"_np",
".",
"tensordot",
"(",
"W",
",",
"xcr",
",",
"(",
"1",
",",
"1",
")",
")",
"# <2| x <r_dp| x 2 x 2 x |2> x |r_{dp+1}>",
"cr",
"=",
"cr",
".",
"transpose",
"(",
"[",
"0",
",",
"4",
",",
"1",
",",
"2",
",",
"3",
",",
"5",
"]",
")",
"# <2r_dp| x 2 x 2 x |2r_{dp+1}>",
"cr",
"=",
"cr",
".",
"reshape",
"(",
"(",
"2",
"*",
"x",
".",
"r",
"[",
"dp",
"]",
",",
"2",
",",
"2",
",",
"2",
"*",
"x",
".",
"r",
"[",
"dp",
"+",
"1",
"]",
")",
",",
"order",
"=",
"'F'",
")",
"dp",
"+=",
"1",
"crs",
".",
"append",
"(",
"cr",
")",
"if",
"kind",
"[",
"j",
"]",
"==",
"'F'",
":",
"xcr",
"=",
"xcrs",
"[",
"dp",
"]",
"# r_dp x 2 x r_{dp+1}",
"cr",
"=",
"_np",
".",
"tensordot",
"(",
"W",
",",
"xcr",
",",
"(",
"1",
",",
"1",
")",
")",
".",
"transpose",
"(",
"[",
"0",
",",
"4",
",",
"1",
",",
"2",
",",
"3",
",",
"5",
"]",
")",
"# <2r_dp| x 2 x 2 x |2r_{dp+1}>",
"cr",
"=",
"cr",
".",
"reshape",
"(",
"(",
"2",
"*",
"x",
".",
"r",
"[",
"dp",
"]",
",",
"2",
",",
"2",
",",
"2",
"*",
"x",
".",
"r",
"[",
"dp",
"+",
"1",
"]",
")",
",",
"order",
"=",
"'F'",
")",
"dp",
"+=",
"1",
"xcr",
"=",
"xcrs",
"[",
"dp",
"]",
"# r_dp x 2 x r_{dp+1}",
"# <2| x |1> x <r_dp| x |r_{dp+1}>",
"tmp",
"=",
"_np",
".",
"tensordot",
"(",
"S",
",",
"xcr",
",",
"(",
"1",
",",
"1",
")",
")",
"# tmp = tmp.transpose([0, 2, 1, 3]) # TODO: figure out WHY THE HELL",
"# this spoils everything",
"# <2r_dp| x |r_{dp+1}>",
"tmp",
"=",
"tmp",
".",
"reshape",
"(",
"(",
"2",
"*",
"x",
".",
"r",
"[",
"dp",
"]",
",",
"x",
".",
"r",
"[",
"dp",
"+",
"1",
"]",
")",
",",
"order",
"=",
"'F'",
")",
"# <2r_{dp-1}| x 2 x 2 x |r_{dp+1}>",
"cr",
"=",
"_np",
".",
"tensordot",
"(",
"cr",
",",
"tmp",
",",
"(",
"3",
",",
"0",
")",
")",
"dp",
"+=",
"1",
"crs",
".",
"append",
"(",
"cr",
")",
"else",
":",
"dotcore",
"=",
"None",
"if",
"kind",
"[",
"j",
"]",
"==",
"'C'",
":",
"dotcore",
"=",
"P",
"elif",
"kind",
"[",
"j",
"]",
"==",
"'L'",
":",
"dotcore",
"=",
"Q",
"elif",
"kind",
"[",
"j",
"]",
"==",
"'U'",
":",
"dotcore",
"=",
"R",
"xcr",
"=",
"xcrs",
"[",
"dp",
"]",
"# r_dp x 2 x r_{dp+1}",
"# <2| x 2 x 2 x |'1'> x <r_dp| x |r_{dp+1}>",
"cr",
"=",
"_np",
".",
"tensordot",
"(",
"dotcore",
",",
"xcr",
",",
"(",
"1",
",",
"1",
")",
")",
"# <2| x <r_dp| x 2 x 2 x |r_{dp+1}>",
"cr",
"=",
"cr",
".",
"transpose",
"(",
"[",
"0",
",",
"3",
",",
"1",
",",
"2",
",",
"4",
"]",
")",
"cr",
"=",
"cr",
".",
"reshape",
"(",
"(",
"2",
"*",
"x",
".",
"r",
"[",
"dp",
"]",
",",
"2",
",",
"2",
",",
"x",
".",
"r",
"[",
"dp",
"+",
"1",
"]",
")",
",",
"order",
"=",
"'F'",
")",
"dp",
"+=",
"1",
"crs",
".",
"append",
"(",
"cr",
")",
"return",
"_matrix",
".",
"matrix",
".",
"from_list",
"(",
"crs",
")"
] | 37.348101 | 16.506329 |
def transform(grammar, text):
"""Transform text by replacing matches to grammar."""
results = []
intervals = []
for result, start, stop in all_matches(grammar, text):
if result is not ignore_transform:
internal_assert(isinstance(result, str), "got non-string transform result", result)
if start == 0 and stop == len(text):
return result
results.append(result)
intervals.append((start, stop))
if not results:
return None
split_indices = [0]
split_indices.extend(start for start, _ in intervals)
split_indices.extend(stop for _, stop in intervals)
split_indices.sort()
split_indices.append(None)
out = []
for i in range(len(split_indices) - 1):
if i % 2 == 0:
start, stop = split_indices[i], split_indices[i + 1]
out.append(text[start:stop])
else:
out.append(results[i // 2])
if i // 2 < len(results) - 1:
raise CoconutInternalException("unused transform results", results[i // 2 + 1:])
if stop is not None:
raise CoconutInternalException("failed to properly split text to be transformed")
return "".join(out) | [
"def",
"transform",
"(",
"grammar",
",",
"text",
")",
":",
"results",
"=",
"[",
"]",
"intervals",
"=",
"[",
"]",
"for",
"result",
",",
"start",
",",
"stop",
"in",
"all_matches",
"(",
"grammar",
",",
"text",
")",
":",
"if",
"result",
"is",
"not",
"ignore_transform",
":",
"internal_assert",
"(",
"isinstance",
"(",
"result",
",",
"str",
")",
",",
"\"got non-string transform result\"",
",",
"result",
")",
"if",
"start",
"==",
"0",
"and",
"stop",
"==",
"len",
"(",
"text",
")",
":",
"return",
"result",
"results",
".",
"append",
"(",
"result",
")",
"intervals",
".",
"append",
"(",
"(",
"start",
",",
"stop",
")",
")",
"if",
"not",
"results",
":",
"return",
"None",
"split_indices",
"=",
"[",
"0",
"]",
"split_indices",
".",
"extend",
"(",
"start",
"for",
"start",
",",
"_",
"in",
"intervals",
")",
"split_indices",
".",
"extend",
"(",
"stop",
"for",
"_",
",",
"stop",
"in",
"intervals",
")",
"split_indices",
".",
"sort",
"(",
")",
"split_indices",
".",
"append",
"(",
"None",
")",
"out",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"split_indices",
")",
"-",
"1",
")",
":",
"if",
"i",
"%",
"2",
"==",
"0",
":",
"start",
",",
"stop",
"=",
"split_indices",
"[",
"i",
"]",
",",
"split_indices",
"[",
"i",
"+",
"1",
"]",
"out",
".",
"append",
"(",
"text",
"[",
"start",
":",
"stop",
"]",
")",
"else",
":",
"out",
".",
"append",
"(",
"results",
"[",
"i",
"//",
"2",
"]",
")",
"if",
"i",
"//",
"2",
"<",
"len",
"(",
"results",
")",
"-",
"1",
":",
"raise",
"CoconutInternalException",
"(",
"\"unused transform results\"",
",",
"results",
"[",
"i",
"//",
"2",
"+",
"1",
":",
"]",
")",
"if",
"stop",
"is",
"not",
"None",
":",
"raise",
"CoconutInternalException",
"(",
"\"failed to properly split text to be transformed\"",
")",
"return",
"\"\"",
".",
"join",
"(",
"out",
")"
] | 35.939394 | 19.242424 |
def format_vars(args):
"""Format the given vars in the form: 'flag=value'"""
variables = []
for key, value in args.items():
if value:
variables += ['{0}={1}'.format(key, value)]
return variables | [
"def",
"format_vars",
"(",
"args",
")",
":",
"variables",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"args",
".",
"items",
"(",
")",
":",
"if",
"value",
":",
"variables",
"+=",
"[",
"'{0}={1}'",
".",
"format",
"(",
"key",
",",
"value",
")",
"]",
"return",
"variables"
] | 32 | 14.714286 |
def from_message(cls, message):
"""Creates an instance from CSP report message.
If the message is not valid, the result will still have as much fields set as possible.
@param message: JSON encoded CSP report.
@type message: text
"""
self = cls(json=message)
try:
decoded_data = json.loads(message)
except ValueError:
# Message is not a valid JSON. Return as invalid.
return self
try:
report_data = decoded_data['csp-report']
except KeyError:
# Message is not a valid CSP report. Return as invalid.
return self
# Extract individual fields
for report_name, field_name in REQUIRED_FIELD_MAP + OPTIONAL_FIELD_MAP:
setattr(self, field_name, report_data.get(report_name))
# Extract integer fields
for report_name, field_name in INTEGER_FIELD_MAP:
value = report_data.get(report_name)
field = self._meta.get_field(field_name)
min_value, max_value = connection.ops.integer_field_range(field.get_internal_type())
if min_value is None:
min_value = 0
# All these fields are possitive. Value can't be negative.
min_value = max(min_value, 0)
if value is not None and min_value <= value and (max_value is None or value <= max_value):
setattr(self, field_name, value)
# Extract disposition
disposition = report_data.get('disposition')
if disposition in dict(DISPOSITIONS).keys():
self.disposition = disposition
# Check if report is valid
is_valid = True
for field_name in dict(REQUIRED_FIELD_MAP).values():
if getattr(self, field_name) is None:
is_valid = False
break
self.is_valid = is_valid
return self | [
"def",
"from_message",
"(",
"cls",
",",
"message",
")",
":",
"self",
"=",
"cls",
"(",
"json",
"=",
"message",
")",
"try",
":",
"decoded_data",
"=",
"json",
".",
"loads",
"(",
"message",
")",
"except",
"ValueError",
":",
"# Message is not a valid JSON. Return as invalid.",
"return",
"self",
"try",
":",
"report_data",
"=",
"decoded_data",
"[",
"'csp-report'",
"]",
"except",
"KeyError",
":",
"# Message is not a valid CSP report. Return as invalid.",
"return",
"self",
"# Extract individual fields",
"for",
"report_name",
",",
"field_name",
"in",
"REQUIRED_FIELD_MAP",
"+",
"OPTIONAL_FIELD_MAP",
":",
"setattr",
"(",
"self",
",",
"field_name",
",",
"report_data",
".",
"get",
"(",
"report_name",
")",
")",
"# Extract integer fields",
"for",
"report_name",
",",
"field_name",
"in",
"INTEGER_FIELD_MAP",
":",
"value",
"=",
"report_data",
".",
"get",
"(",
"report_name",
")",
"field",
"=",
"self",
".",
"_meta",
".",
"get_field",
"(",
"field_name",
")",
"min_value",
",",
"max_value",
"=",
"connection",
".",
"ops",
".",
"integer_field_range",
"(",
"field",
".",
"get_internal_type",
"(",
")",
")",
"if",
"min_value",
"is",
"None",
":",
"min_value",
"=",
"0",
"# All these fields are possitive. Value can't be negative.",
"min_value",
"=",
"max",
"(",
"min_value",
",",
"0",
")",
"if",
"value",
"is",
"not",
"None",
"and",
"min_value",
"<=",
"value",
"and",
"(",
"max_value",
"is",
"None",
"or",
"value",
"<=",
"max_value",
")",
":",
"setattr",
"(",
"self",
",",
"field_name",
",",
"value",
")",
"# Extract disposition",
"disposition",
"=",
"report_data",
".",
"get",
"(",
"'disposition'",
")",
"if",
"disposition",
"in",
"dict",
"(",
"DISPOSITIONS",
")",
".",
"keys",
"(",
")",
":",
"self",
".",
"disposition",
"=",
"disposition",
"# Check if report is valid",
"is_valid",
"=",
"True",
"for",
"field_name",
"in",
"dict",
"(",
"REQUIRED_FIELD_MAP",
")",
".",
"values",
"(",
")",
":",
"if",
"getattr",
"(",
"self",
",",
"field_name",
")",
"is",
"None",
":",
"is_valid",
"=",
"False",
"break",
"self",
".",
"is_valid",
"=",
"is_valid",
"return",
"self"
] | 39.145833 | 19.0625 |
def _init_data_map(self):
""" OVERRIDDEN: Initialize required ISO-19115 data map with XPATHS and specialized functions """
if self._data_map is not None:
return # Initiation happens once
# Parse and validate the ISO metadata root
if self._xml_tree is None:
iso_root = ISO_ROOTS[0]
else:
iso_root = get_element_name(self._xml_tree)
if iso_root not in ISO_ROOTS:
raise InvalidContent('Invalid XML root for ISO-19115 standard: {root}', root=iso_root)
iso_data_map = {'_root': iso_root}
iso_data_map.update(_iso_tag_roots)
iso_data_map.update(_iso_tag_formats)
iso_data_structures = {}
# Capture and format complex XPATHs
ad_format = iso_data_map[ATTRIBUTES]
ft_source = iso_data_map['_attr_src'].replace('/carrierOfCharacteristics/FC_FeatureAttribute', '')
iso_data_structures[ATTRIBUTES] = format_xpaths(
_iso_definitions[ATTRIBUTES],
label=ad_format.format(ad_path='memberName/LocalName'),
aliases=ad_format.format(ad_path='aliases/LocalName'), # Not in spec
definition=ad_format.format(ad_path='definition/CharacterString'),
# First try to populate attribute definition source from FC_FeatureAttribute
definition_src=iso_data_map['_attr_src'] + '/organisationName/CharacterString',
_definition_src=iso_data_map['_attr_src'] + '/individualName/CharacterString',
# Then assume feature type source is the same as attribute: populate from FC_FeatureType
__definition_src=ft_source + '/organisationName/CharacterString',
___definition_src=ft_source + '/individualName/CharacterString'
)
bb_format = iso_data_map[BOUNDING_BOX]
iso_data_structures[BOUNDING_BOX] = format_xpaths(
_iso_definitions[BOUNDING_BOX],
east=bb_format.format(bbox_path='eastBoundLongitude/Decimal'),
south=bb_format.format(bbox_path='southBoundLatitude/Decimal'),
west=bb_format.format(bbox_path='westBoundLongitude/Decimal'),
north=bb_format.format(bbox_path='northBoundLatitude/Decimal')
)
ct_format = iso_data_map[CONTACTS]
iso_data_structures[CONTACTS] = format_xpaths(
_iso_definitions[CONTACTS],
name=ct_format.format(ct_path='individualName/CharacterString'),
organization=ct_format.format(ct_path='organisationName/CharacterString'),
position=ct_format.format(ct_path='positionName/CharacterString'),
email=ct_format.format(
ct_path='contactInfo/CI_Contact/address/CI_Address/electronicMailAddress/CharacterString'
)
)
dt_format = iso_data_map[DATES]
iso_data_structures[DATES] = {
DATE_TYPE_MULTIPLE: dt_format.format(type_path='TimeInstant/timePosition'),
DATE_TYPE_RANGE_BEGIN: dt_format.format(type_path='TimePeriod/begin/TimeInstant/timePosition'),
DATE_TYPE_RANGE_END: dt_format.format(type_path='TimePeriod/end/TimeInstant/timePosition'),
DATE_TYPE_SINGLE: dt_format.format(type_path='TimeInstant/timePosition') # Same as multiple
}
iso_data_structures[DATES][DATE_TYPE_RANGE] = [
iso_data_structures[DATES][DATE_TYPE_RANGE_BEGIN],
iso_data_structures[DATES][DATE_TYPE_RANGE_END]
]
df_format = iso_data_map[DIGITAL_FORMS]
iso_data_structures[DIGITAL_FORMS] = format_xpaths(
_iso_definitions[DIGITAL_FORMS],
name=df_format.format(df_path='name/CharacterString'),
content='', # Not supported in ISO-19115 (appending to spec)
decompression=df_format.format(df_path='fileDecompressionTechnique/CharacterString'),
version=df_format.format(df_path='version/CharacterString'),
specification=df_format.format(df_path='specification/CharacterString'),
access_desc=iso_data_map['_access_desc'],
access_instrs=iso_data_map['_access_instrs'],
network_resource=iso_data_map['_network_resource']
)
keywords_structure = {
'keyword_root': 'MD_Keywords/keyword',
'keyword_type': 'MD_Keywords/type/MD_KeywordTypeCode',
'keyword': 'MD_Keywords/keyword/CharacterString'
}
for keyword_prop in KEYWORD_PROPS:
iso_data_structures[keyword_prop] = deepcopy(keywords_structure)
lw_format = iso_data_map[LARGER_WORKS]
iso_data_structures[LARGER_WORKS] = format_xpaths(
_iso_definitions[LARGER_WORKS],
title=lw_format.format(lw_path='title/CharacterString'),
edition=lw_format.format(lw_path='edition/CharacterString'),
origin=iso_data_map['_lw_citation'].format(lw_path='individualName/CharacterString'),
online_linkage=iso_data_map['_lw_linkage'].format(lw_path='linkage/URL'),
other_citation=lw_format.format(lw_path='otherCitationDetails/CharacterString'),
date=lw_format.format(lw_path='editionDate/Date'),
place=iso_data_map['_lw_contact'].format(lw_path='address/CI_Address/city/CharacterString'),
info=iso_data_map['_lw_citation'].format(lw_path='organisationName/CharacterString')
)
ps_format = iso_data_map[PROCESS_STEPS]
iso_data_structures[PROCESS_STEPS] = format_xpaths(
_iso_definitions[PROCESS_STEPS],
description=ps_format.format(ps_path='description/CharacterString'),
date=ps_format.format(ps_path='dateTime/DateTime'),
sources=ps_format.format(
ps_path='source/LI_Source/sourceCitation/CI_Citation/alternateTitle/CharacterString'
)
)
ri_format = iso_data_map[RASTER_INFO]
iso_data_structures[RASTER_INFO] = format_xpaths(
_iso_definitions[RASTER_DIMS],
type=ri_format.format(ri_path='dimensionName/MD_DimensionNameTypeCode'),
_type=ri_format.format(ri_path='dimensionName/MD_DimensionNameTypeCode/@codeListValue'),
size=ri_format.format(ri_path='dimensionSize/Integer'),
value=ri_format.format(ri_path='resolution/Measure'),
units=ri_format.format(ri_path='resolution/Measure/@uom')
)
# Assign XPATHS and gis_metadata.utils.ParserProperties to data map
for prop, xpath in iteritems(dict(iso_data_map)):
if prop == ATTRIBUTES:
iso_data_map[prop] = ParserProperty(self._parse_attribute_details, self._update_attribute_details)
elif prop in (CONTACTS, PROCESS_STEPS):
iso_data_map[prop] = ParserProperty(self._parse_complex_list, self._update_complex_list)
elif prop in (BOUNDING_BOX, LARGER_WORKS):
iso_data_map[prop] = ParserProperty(self._parse_complex, self._update_complex)
elif prop == DATES:
iso_data_map[prop] = ParserProperty(self._parse_dates, self._update_dates)
elif prop == DIGITAL_FORMS:
iso_data_map[prop] = ParserProperty(self._parse_digital_forms, self._update_digital_forms)
elif prop in KEYWORD_PROPS:
iso_data_map[prop] = ParserProperty(self._parse_keywords, self._update_keywords)
elif prop == RASTER_INFO:
iso_data_map[prop] = ParserProperty(self._parse_raster_info, self._update_raster_info)
else:
iso_data_map[prop] = xpath
self._data_map = iso_data_map
self._data_structures = iso_data_structures | [
"def",
"_init_data_map",
"(",
"self",
")",
":",
"if",
"self",
".",
"_data_map",
"is",
"not",
"None",
":",
"return",
"# Initiation happens once",
"# Parse and validate the ISO metadata root",
"if",
"self",
".",
"_xml_tree",
"is",
"None",
":",
"iso_root",
"=",
"ISO_ROOTS",
"[",
"0",
"]",
"else",
":",
"iso_root",
"=",
"get_element_name",
"(",
"self",
".",
"_xml_tree",
")",
"if",
"iso_root",
"not",
"in",
"ISO_ROOTS",
":",
"raise",
"InvalidContent",
"(",
"'Invalid XML root for ISO-19115 standard: {root}'",
",",
"root",
"=",
"iso_root",
")",
"iso_data_map",
"=",
"{",
"'_root'",
":",
"iso_root",
"}",
"iso_data_map",
".",
"update",
"(",
"_iso_tag_roots",
")",
"iso_data_map",
".",
"update",
"(",
"_iso_tag_formats",
")",
"iso_data_structures",
"=",
"{",
"}",
"# Capture and format complex XPATHs",
"ad_format",
"=",
"iso_data_map",
"[",
"ATTRIBUTES",
"]",
"ft_source",
"=",
"iso_data_map",
"[",
"'_attr_src'",
"]",
".",
"replace",
"(",
"'/carrierOfCharacteristics/FC_FeatureAttribute'",
",",
"''",
")",
"iso_data_structures",
"[",
"ATTRIBUTES",
"]",
"=",
"format_xpaths",
"(",
"_iso_definitions",
"[",
"ATTRIBUTES",
"]",
",",
"label",
"=",
"ad_format",
".",
"format",
"(",
"ad_path",
"=",
"'memberName/LocalName'",
")",
",",
"aliases",
"=",
"ad_format",
".",
"format",
"(",
"ad_path",
"=",
"'aliases/LocalName'",
")",
",",
"# Not in spec",
"definition",
"=",
"ad_format",
".",
"format",
"(",
"ad_path",
"=",
"'definition/CharacterString'",
")",
",",
"# First try to populate attribute definition source from FC_FeatureAttribute",
"definition_src",
"=",
"iso_data_map",
"[",
"'_attr_src'",
"]",
"+",
"'/organisationName/CharacterString'",
",",
"_definition_src",
"=",
"iso_data_map",
"[",
"'_attr_src'",
"]",
"+",
"'/individualName/CharacterString'",
",",
"# Then assume feature type source is the same as attribute: populate from FC_FeatureType",
"__definition_src",
"=",
"ft_source",
"+",
"'/organisationName/CharacterString'",
",",
"___definition_src",
"=",
"ft_source",
"+",
"'/individualName/CharacterString'",
")",
"bb_format",
"=",
"iso_data_map",
"[",
"BOUNDING_BOX",
"]",
"iso_data_structures",
"[",
"BOUNDING_BOX",
"]",
"=",
"format_xpaths",
"(",
"_iso_definitions",
"[",
"BOUNDING_BOX",
"]",
",",
"east",
"=",
"bb_format",
".",
"format",
"(",
"bbox_path",
"=",
"'eastBoundLongitude/Decimal'",
")",
",",
"south",
"=",
"bb_format",
".",
"format",
"(",
"bbox_path",
"=",
"'southBoundLatitude/Decimal'",
")",
",",
"west",
"=",
"bb_format",
".",
"format",
"(",
"bbox_path",
"=",
"'westBoundLongitude/Decimal'",
")",
",",
"north",
"=",
"bb_format",
".",
"format",
"(",
"bbox_path",
"=",
"'northBoundLatitude/Decimal'",
")",
")",
"ct_format",
"=",
"iso_data_map",
"[",
"CONTACTS",
"]",
"iso_data_structures",
"[",
"CONTACTS",
"]",
"=",
"format_xpaths",
"(",
"_iso_definitions",
"[",
"CONTACTS",
"]",
",",
"name",
"=",
"ct_format",
".",
"format",
"(",
"ct_path",
"=",
"'individualName/CharacterString'",
")",
",",
"organization",
"=",
"ct_format",
".",
"format",
"(",
"ct_path",
"=",
"'organisationName/CharacterString'",
")",
",",
"position",
"=",
"ct_format",
".",
"format",
"(",
"ct_path",
"=",
"'positionName/CharacterString'",
")",
",",
"email",
"=",
"ct_format",
".",
"format",
"(",
"ct_path",
"=",
"'contactInfo/CI_Contact/address/CI_Address/electronicMailAddress/CharacterString'",
")",
")",
"dt_format",
"=",
"iso_data_map",
"[",
"DATES",
"]",
"iso_data_structures",
"[",
"DATES",
"]",
"=",
"{",
"DATE_TYPE_MULTIPLE",
":",
"dt_format",
".",
"format",
"(",
"type_path",
"=",
"'TimeInstant/timePosition'",
")",
",",
"DATE_TYPE_RANGE_BEGIN",
":",
"dt_format",
".",
"format",
"(",
"type_path",
"=",
"'TimePeriod/begin/TimeInstant/timePosition'",
")",
",",
"DATE_TYPE_RANGE_END",
":",
"dt_format",
".",
"format",
"(",
"type_path",
"=",
"'TimePeriod/end/TimeInstant/timePosition'",
")",
",",
"DATE_TYPE_SINGLE",
":",
"dt_format",
".",
"format",
"(",
"type_path",
"=",
"'TimeInstant/timePosition'",
")",
"# Same as multiple",
"}",
"iso_data_structures",
"[",
"DATES",
"]",
"[",
"DATE_TYPE_RANGE",
"]",
"=",
"[",
"iso_data_structures",
"[",
"DATES",
"]",
"[",
"DATE_TYPE_RANGE_BEGIN",
"]",
",",
"iso_data_structures",
"[",
"DATES",
"]",
"[",
"DATE_TYPE_RANGE_END",
"]",
"]",
"df_format",
"=",
"iso_data_map",
"[",
"DIGITAL_FORMS",
"]",
"iso_data_structures",
"[",
"DIGITAL_FORMS",
"]",
"=",
"format_xpaths",
"(",
"_iso_definitions",
"[",
"DIGITAL_FORMS",
"]",
",",
"name",
"=",
"df_format",
".",
"format",
"(",
"df_path",
"=",
"'name/CharacterString'",
")",
",",
"content",
"=",
"''",
",",
"# Not supported in ISO-19115 (appending to spec)",
"decompression",
"=",
"df_format",
".",
"format",
"(",
"df_path",
"=",
"'fileDecompressionTechnique/CharacterString'",
")",
",",
"version",
"=",
"df_format",
".",
"format",
"(",
"df_path",
"=",
"'version/CharacterString'",
")",
",",
"specification",
"=",
"df_format",
".",
"format",
"(",
"df_path",
"=",
"'specification/CharacterString'",
")",
",",
"access_desc",
"=",
"iso_data_map",
"[",
"'_access_desc'",
"]",
",",
"access_instrs",
"=",
"iso_data_map",
"[",
"'_access_instrs'",
"]",
",",
"network_resource",
"=",
"iso_data_map",
"[",
"'_network_resource'",
"]",
")",
"keywords_structure",
"=",
"{",
"'keyword_root'",
":",
"'MD_Keywords/keyword'",
",",
"'keyword_type'",
":",
"'MD_Keywords/type/MD_KeywordTypeCode'",
",",
"'keyword'",
":",
"'MD_Keywords/keyword/CharacterString'",
"}",
"for",
"keyword_prop",
"in",
"KEYWORD_PROPS",
":",
"iso_data_structures",
"[",
"keyword_prop",
"]",
"=",
"deepcopy",
"(",
"keywords_structure",
")",
"lw_format",
"=",
"iso_data_map",
"[",
"LARGER_WORKS",
"]",
"iso_data_structures",
"[",
"LARGER_WORKS",
"]",
"=",
"format_xpaths",
"(",
"_iso_definitions",
"[",
"LARGER_WORKS",
"]",
",",
"title",
"=",
"lw_format",
".",
"format",
"(",
"lw_path",
"=",
"'title/CharacterString'",
")",
",",
"edition",
"=",
"lw_format",
".",
"format",
"(",
"lw_path",
"=",
"'edition/CharacterString'",
")",
",",
"origin",
"=",
"iso_data_map",
"[",
"'_lw_citation'",
"]",
".",
"format",
"(",
"lw_path",
"=",
"'individualName/CharacterString'",
")",
",",
"online_linkage",
"=",
"iso_data_map",
"[",
"'_lw_linkage'",
"]",
".",
"format",
"(",
"lw_path",
"=",
"'linkage/URL'",
")",
",",
"other_citation",
"=",
"lw_format",
".",
"format",
"(",
"lw_path",
"=",
"'otherCitationDetails/CharacterString'",
")",
",",
"date",
"=",
"lw_format",
".",
"format",
"(",
"lw_path",
"=",
"'editionDate/Date'",
")",
",",
"place",
"=",
"iso_data_map",
"[",
"'_lw_contact'",
"]",
".",
"format",
"(",
"lw_path",
"=",
"'address/CI_Address/city/CharacterString'",
")",
",",
"info",
"=",
"iso_data_map",
"[",
"'_lw_citation'",
"]",
".",
"format",
"(",
"lw_path",
"=",
"'organisationName/CharacterString'",
")",
")",
"ps_format",
"=",
"iso_data_map",
"[",
"PROCESS_STEPS",
"]",
"iso_data_structures",
"[",
"PROCESS_STEPS",
"]",
"=",
"format_xpaths",
"(",
"_iso_definitions",
"[",
"PROCESS_STEPS",
"]",
",",
"description",
"=",
"ps_format",
".",
"format",
"(",
"ps_path",
"=",
"'description/CharacterString'",
")",
",",
"date",
"=",
"ps_format",
".",
"format",
"(",
"ps_path",
"=",
"'dateTime/DateTime'",
")",
",",
"sources",
"=",
"ps_format",
".",
"format",
"(",
"ps_path",
"=",
"'source/LI_Source/sourceCitation/CI_Citation/alternateTitle/CharacterString'",
")",
")",
"ri_format",
"=",
"iso_data_map",
"[",
"RASTER_INFO",
"]",
"iso_data_structures",
"[",
"RASTER_INFO",
"]",
"=",
"format_xpaths",
"(",
"_iso_definitions",
"[",
"RASTER_DIMS",
"]",
",",
"type",
"=",
"ri_format",
".",
"format",
"(",
"ri_path",
"=",
"'dimensionName/MD_DimensionNameTypeCode'",
")",
",",
"_type",
"=",
"ri_format",
".",
"format",
"(",
"ri_path",
"=",
"'dimensionName/MD_DimensionNameTypeCode/@codeListValue'",
")",
",",
"size",
"=",
"ri_format",
".",
"format",
"(",
"ri_path",
"=",
"'dimensionSize/Integer'",
")",
",",
"value",
"=",
"ri_format",
".",
"format",
"(",
"ri_path",
"=",
"'resolution/Measure'",
")",
",",
"units",
"=",
"ri_format",
".",
"format",
"(",
"ri_path",
"=",
"'resolution/Measure/@uom'",
")",
")",
"# Assign XPATHS and gis_metadata.utils.ParserProperties to data map",
"for",
"prop",
",",
"xpath",
"in",
"iteritems",
"(",
"dict",
"(",
"iso_data_map",
")",
")",
":",
"if",
"prop",
"==",
"ATTRIBUTES",
":",
"iso_data_map",
"[",
"prop",
"]",
"=",
"ParserProperty",
"(",
"self",
".",
"_parse_attribute_details",
",",
"self",
".",
"_update_attribute_details",
")",
"elif",
"prop",
"in",
"(",
"CONTACTS",
",",
"PROCESS_STEPS",
")",
":",
"iso_data_map",
"[",
"prop",
"]",
"=",
"ParserProperty",
"(",
"self",
".",
"_parse_complex_list",
",",
"self",
".",
"_update_complex_list",
")",
"elif",
"prop",
"in",
"(",
"BOUNDING_BOX",
",",
"LARGER_WORKS",
")",
":",
"iso_data_map",
"[",
"prop",
"]",
"=",
"ParserProperty",
"(",
"self",
".",
"_parse_complex",
",",
"self",
".",
"_update_complex",
")",
"elif",
"prop",
"==",
"DATES",
":",
"iso_data_map",
"[",
"prop",
"]",
"=",
"ParserProperty",
"(",
"self",
".",
"_parse_dates",
",",
"self",
".",
"_update_dates",
")",
"elif",
"prop",
"==",
"DIGITAL_FORMS",
":",
"iso_data_map",
"[",
"prop",
"]",
"=",
"ParserProperty",
"(",
"self",
".",
"_parse_digital_forms",
",",
"self",
".",
"_update_digital_forms",
")",
"elif",
"prop",
"in",
"KEYWORD_PROPS",
":",
"iso_data_map",
"[",
"prop",
"]",
"=",
"ParserProperty",
"(",
"self",
".",
"_parse_keywords",
",",
"self",
".",
"_update_keywords",
")",
"elif",
"prop",
"==",
"RASTER_INFO",
":",
"iso_data_map",
"[",
"prop",
"]",
"=",
"ParserProperty",
"(",
"self",
".",
"_parse_raster_info",
",",
"self",
".",
"_update_raster_info",
")",
"else",
":",
"iso_data_map",
"[",
"prop",
"]",
"=",
"xpath",
"self",
".",
"_data_map",
"=",
"iso_data_map",
"self",
".",
"_data_structures",
"=",
"iso_data_structures"
] | 47.797468 | 28.886076 |
def binarize(obj, threshold=0):
"""
Return a copy of the object with binarized piano-roll(s).
Parameters
----------
threshold : int or float
Threshold to binarize the piano-roll(s). Default to zero.
"""
_check_supported(obj)
copied = deepcopy(obj)
copied.binarize(threshold)
return copied | [
"def",
"binarize",
"(",
"obj",
",",
"threshold",
"=",
"0",
")",
":",
"_check_supported",
"(",
"obj",
")",
"copied",
"=",
"deepcopy",
"(",
"obj",
")",
"copied",
".",
"binarize",
"(",
"threshold",
")",
"return",
"copied"
] | 23.214286 | 18.642857 |
def webhooks(request):
"""
Handles all known webhooks from stripe, and calls signals.
Plug in as you need.
"""
if request.method != "POST":
return HttpResponse("Invalid Request.", status=400)
json = simplejson.loads(request.POST["json"])
if json["event"] == "recurring_payment_failed":
zebra_webhook_recurring_payment_failed.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json)
elif json["event"] == "invoice_ready":
zebra_webhook_invoice_ready.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json)
elif json["event"] == "recurring_payment_succeeded":
zebra_webhook_recurring_payment_succeeded.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json)
elif json["event"] == "subscription_trial_ending":
zebra_webhook_subscription_trial_ending.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json)
elif json["event"] == "subscription_final_payment_attempt_failed":
zebra_webhook_subscription_final_payment_attempt_failed.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json)
elif json["event"] == "ping":
zebra_webhook_subscription_ping_sent.send(sender=None)
else:
return HttpResponse(status=400)
return HttpResponse(status=200) | [
"def",
"webhooks",
"(",
"request",
")",
":",
"if",
"request",
".",
"method",
"!=",
"\"POST\"",
":",
"return",
"HttpResponse",
"(",
"\"Invalid Request.\"",
",",
"status",
"=",
"400",
")",
"json",
"=",
"simplejson",
".",
"loads",
"(",
"request",
".",
"POST",
"[",
"\"json\"",
"]",
")",
"if",
"json",
"[",
"\"event\"",
"]",
"==",
"\"recurring_payment_failed\"",
":",
"zebra_webhook_recurring_payment_failed",
".",
"send",
"(",
"sender",
"=",
"None",
",",
"customer",
"=",
"_try_to_get_customer_from_customer_id",
"(",
"json",
"[",
"\"customer\"",
"]",
")",
",",
"full_json",
"=",
"json",
")",
"elif",
"json",
"[",
"\"event\"",
"]",
"==",
"\"invoice_ready\"",
":",
"zebra_webhook_invoice_ready",
".",
"send",
"(",
"sender",
"=",
"None",
",",
"customer",
"=",
"_try_to_get_customer_from_customer_id",
"(",
"json",
"[",
"\"customer\"",
"]",
")",
",",
"full_json",
"=",
"json",
")",
"elif",
"json",
"[",
"\"event\"",
"]",
"==",
"\"recurring_payment_succeeded\"",
":",
"zebra_webhook_recurring_payment_succeeded",
".",
"send",
"(",
"sender",
"=",
"None",
",",
"customer",
"=",
"_try_to_get_customer_from_customer_id",
"(",
"json",
"[",
"\"customer\"",
"]",
")",
",",
"full_json",
"=",
"json",
")",
"elif",
"json",
"[",
"\"event\"",
"]",
"==",
"\"subscription_trial_ending\"",
":",
"zebra_webhook_subscription_trial_ending",
".",
"send",
"(",
"sender",
"=",
"None",
",",
"customer",
"=",
"_try_to_get_customer_from_customer_id",
"(",
"json",
"[",
"\"customer\"",
"]",
")",
",",
"full_json",
"=",
"json",
")",
"elif",
"json",
"[",
"\"event\"",
"]",
"==",
"\"subscription_final_payment_attempt_failed\"",
":",
"zebra_webhook_subscription_final_payment_attempt_failed",
".",
"send",
"(",
"sender",
"=",
"None",
",",
"customer",
"=",
"_try_to_get_customer_from_customer_id",
"(",
"json",
"[",
"\"customer\"",
"]",
")",
",",
"full_json",
"=",
"json",
")",
"elif",
"json",
"[",
"\"event\"",
"]",
"==",
"\"ping\"",
":",
"zebra_webhook_subscription_ping_sent",
".",
"send",
"(",
"sender",
"=",
"None",
")",
"else",
":",
"return",
"HttpResponse",
"(",
"status",
"=",
"400",
")",
"return",
"HttpResponse",
"(",
"status",
"=",
"200",
")"
] | 44.030303 | 35.484848 |
async def find(self, **kwargs):
"""Find all entries with given search key.
Accepts named parameter key and arbitrary values.
Returns list of entry id`s.
find(**kwargs) => document (if exist)
find(**kwargs) => {"error":404,"reason":"Not found"} (if does not exist)
find() => {"error":400, "reason":"Missed required fields"}
"""
if not isinstance(kwargs, dict) and len(kwargs) != 1:
return {"error":400,
"reason":"Bad request"}
document = await self.collection.find_one(kwargs)
if document:
return document
else:
return {"error":404, "reason":"Not found"} | [
"async",
"def",
"find",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"kwargs",
",",
"dict",
")",
"and",
"len",
"(",
"kwargs",
")",
"!=",
"1",
":",
"return",
"{",
"\"error\"",
":",
"400",
",",
"\"reason\"",
":",
"\"Bad request\"",
"}",
"document",
"=",
"await",
"self",
".",
"collection",
".",
"find_one",
"(",
"kwargs",
")",
"if",
"document",
":",
"return",
"document",
"else",
":",
"return",
"{",
"\"error\"",
":",
"404",
",",
"\"reason\"",
":",
"\"Not found\"",
"}"
] | 33.823529 | 15.647059 |
def resolve_widget(self, field):
"""
Given a Field or BoundField, return widget instance.
Todo:
Raise an exception if given field object does not have a
widget.
Arguments:
field (Field or BoundField): A field instance.
Returns:
django.forms.widgets.Widget: Retrieved widget from given field.
"""
# When filter is used within template we have to reach the field
# instance through the BoundField.
if hasattr(field, 'field'):
widget = field.field.widget
# When used out of template, we have a direct field instance
else:
widget = field.widget
return widget | [
"def",
"resolve_widget",
"(",
"self",
",",
"field",
")",
":",
"# When filter is used within template we have to reach the field",
"# instance through the BoundField.",
"if",
"hasattr",
"(",
"field",
",",
"'field'",
")",
":",
"widget",
"=",
"field",
".",
"field",
".",
"widget",
"# When used out of template, we have a direct field instance",
"else",
":",
"widget",
"=",
"field",
".",
"widget",
"return",
"widget"
] | 30.608696 | 21.043478 |
def cookietostr(self):
"Cookie values are bytes in Python3. This function Convert bytes to string with env.encoding(default to utf-8)."
self.cookies = dict((k, (v.decode(self.encoding) if not isinstance(v, str) else v)) for k,v in self.cookies.items())
return self.cookies | [
"def",
"cookietostr",
"(",
"self",
")",
":",
"self",
".",
"cookies",
"=",
"dict",
"(",
"(",
"k",
",",
"(",
"v",
".",
"decode",
"(",
"self",
".",
"encoding",
")",
"if",
"not",
"isinstance",
"(",
"v",
",",
"str",
")",
"else",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"cookies",
".",
"items",
"(",
")",
")",
"return",
"self",
".",
"cookies"
] | 73.25 | 48.75 |
def clear_tab_stop(self, how=0):
"""Clear a horizontal tab stop.
:param int how: defines a way the tab stop should be cleared:
* ``0`` or nothing -- Clears a horizontal tab stop at cursor
position.
* ``3`` -- Clears all horizontal tab stops.
"""
if how == 0:
# Clears a horizontal tab stop at cursor position, if it's
# present, or silently fails if otherwise.
self.tabstops.discard(self.cursor.x)
elif how == 3:
self.tabstops = set() | [
"def",
"clear_tab_stop",
"(",
"self",
",",
"how",
"=",
"0",
")",
":",
"if",
"how",
"==",
"0",
":",
"# Clears a horizontal tab stop at cursor position, if it's",
"# present, or silently fails if otherwise.",
"self",
".",
"tabstops",
".",
"discard",
"(",
"self",
".",
"cursor",
".",
"x",
")",
"elif",
"how",
"==",
"3",
":",
"self",
".",
"tabstops",
"=",
"set",
"(",
")"
] | 36.533333 | 18.533333 |
def get_election(self, row, race):
"""
Gets the Election object for the given row of election results.
Depends on knowing the Race object.
If this is the presidential election, this will determine the
Division attached to the election based on the row's statename.
This function depends on knowing the Race object from `get_race`.
"""
election_day = election.ElectionDay.objects.get(
date=row["electiondate"]
)
if row["racetypeid"] in ["D", "E"]:
party = government.Party.objects.get(ap_code="Dem")
elif row["racetypeid"] in ["R", "S"]:
party = government.Party.objects.get(ap_code="GOP")
else:
party = None
if row["racetype"] == "Runoff" and party:
election_type = election.ElectionType.objects.get_or_create(
slug=election.ElectionType.PRIMARY_RUNOFF,
label="Primary Runoff",
number_of_winners=1,
)[0]
return election.Election.objects.get_or_create(
election_type=election_type,
election_day=election_day,
division=race.office.division,
race=race,
party=party,
)[0]
try:
return election.Election.objects.get(
election_day=election_day,
division=race.office.division,
race=race,
party=party,
)
except ObjectDoesNotExist:
print(
"Could not find election for {0} {1} {2}".format(
race, row["party"], row["last"]
)
)
return None | [
"def",
"get_election",
"(",
"self",
",",
"row",
",",
"race",
")",
":",
"election_day",
"=",
"election",
".",
"ElectionDay",
".",
"objects",
".",
"get",
"(",
"date",
"=",
"row",
"[",
"\"electiondate\"",
"]",
")",
"if",
"row",
"[",
"\"racetypeid\"",
"]",
"in",
"[",
"\"D\"",
",",
"\"E\"",
"]",
":",
"party",
"=",
"government",
".",
"Party",
".",
"objects",
".",
"get",
"(",
"ap_code",
"=",
"\"Dem\"",
")",
"elif",
"row",
"[",
"\"racetypeid\"",
"]",
"in",
"[",
"\"R\"",
",",
"\"S\"",
"]",
":",
"party",
"=",
"government",
".",
"Party",
".",
"objects",
".",
"get",
"(",
"ap_code",
"=",
"\"GOP\"",
")",
"else",
":",
"party",
"=",
"None",
"if",
"row",
"[",
"\"racetype\"",
"]",
"==",
"\"Runoff\"",
"and",
"party",
":",
"election_type",
"=",
"election",
".",
"ElectionType",
".",
"objects",
".",
"get_or_create",
"(",
"slug",
"=",
"election",
".",
"ElectionType",
".",
"PRIMARY_RUNOFF",
",",
"label",
"=",
"\"Primary Runoff\"",
",",
"number_of_winners",
"=",
"1",
",",
")",
"[",
"0",
"]",
"return",
"election",
".",
"Election",
".",
"objects",
".",
"get_or_create",
"(",
"election_type",
"=",
"election_type",
",",
"election_day",
"=",
"election_day",
",",
"division",
"=",
"race",
".",
"office",
".",
"division",
",",
"race",
"=",
"race",
",",
"party",
"=",
"party",
",",
")",
"[",
"0",
"]",
"try",
":",
"return",
"election",
".",
"Election",
".",
"objects",
".",
"get",
"(",
"election_day",
"=",
"election_day",
",",
"division",
"=",
"race",
".",
"office",
".",
"division",
",",
"race",
"=",
"race",
",",
"party",
"=",
"party",
",",
")",
"except",
"ObjectDoesNotExist",
":",
"print",
"(",
"\"Could not find election for {0} {1} {2}\"",
".",
"format",
"(",
"race",
",",
"row",
"[",
"\"party\"",
"]",
",",
"row",
"[",
"\"last\"",
"]",
")",
")",
"return",
"None"
] | 34.06 | 18.38 |
def visit_functiondef(self, node):
'''
Verifies no logger statements inside __virtual__
'''
if (not isinstance(node, astroid.FunctionDef) or
node.is_method()
or node.type != 'function'
or not node.body
):
# only process functions
return
try:
if not node.name == '__virtual__':
# only need to process the __virtual__ function
return
except AttributeError:
return
# walk contents of __virtual__ function
for child in node.get_children():
for functions in child.get_children():
if isinstance(functions, astroid.Call):
if isinstance(functions.func, astroid.Attribute):
try:
# Inspect each statement for an instance of 'logging'
for inferred in functions.func.expr.infer():
try:
instance_type = inferred.pytype().split('.')[0]
except TypeError:
continue
if instance_type == 'logging':
self.add_message(
self.VIRT_LOG, node=functions
)
# Found logger, don't need to keep processing this line
break
except AttributeError:
# Not a log function
return | [
"def",
"visit_functiondef",
"(",
"self",
",",
"node",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"node",
",",
"astroid",
".",
"FunctionDef",
")",
"or",
"node",
".",
"is_method",
"(",
")",
"or",
"node",
".",
"type",
"!=",
"'function'",
"or",
"not",
"node",
".",
"body",
")",
":",
"# only process functions",
"return",
"try",
":",
"if",
"not",
"node",
".",
"name",
"==",
"'__virtual__'",
":",
"# only need to process the __virtual__ function",
"return",
"except",
"AttributeError",
":",
"return",
"# walk contents of __virtual__ function",
"for",
"child",
"in",
"node",
".",
"get_children",
"(",
")",
":",
"for",
"functions",
"in",
"child",
".",
"get_children",
"(",
")",
":",
"if",
"isinstance",
"(",
"functions",
",",
"astroid",
".",
"Call",
")",
":",
"if",
"isinstance",
"(",
"functions",
".",
"func",
",",
"astroid",
".",
"Attribute",
")",
":",
"try",
":",
"# Inspect each statement for an instance of 'logging'",
"for",
"inferred",
"in",
"functions",
".",
"func",
".",
"expr",
".",
"infer",
"(",
")",
":",
"try",
":",
"instance_type",
"=",
"inferred",
".",
"pytype",
"(",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"except",
"TypeError",
":",
"continue",
"if",
"instance_type",
"==",
"'logging'",
":",
"self",
".",
"add_message",
"(",
"self",
".",
"VIRT_LOG",
",",
"node",
"=",
"functions",
")",
"# Found logger, don't need to keep processing this line",
"break",
"except",
"AttributeError",
":",
"# Not a log function",
"return"
] | 41.4 | 17.7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.