text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def _set_scan_parameters(self, interval=2100, window=2100, active=False):
"""
Set the scan interval and window in units of ms and set whether active scanning is performed
"""
active_num = 0
if bool(active):
active_num = 1
interval_num = int(interval*1000/625)
window_num = int(window*1000/625)
payload = struct.pack("<HHB", interval_num, window_num, active_num)
try:
response = self._send_command(6, 7, payload)
if response.payload[0] != 0:
return False, {'reason': "Could not set scanning parameters", 'error': response.payload[0]}
except InternalTimeoutError:
return False, {'reason': 'Timeout waiting for response'}
return True, None | [
"def",
"_set_scan_parameters",
"(",
"self",
",",
"interval",
"=",
"2100",
",",
"window",
"=",
"2100",
",",
"active",
"=",
"False",
")",
":",
"active_num",
"=",
"0",
"if",
"bool",
"(",
"active",
")",
":",
"active_num",
"=",
"1",
"interval_num",
"=",
"int",
"(",
"interval",
"*",
"1000",
"/",
"625",
")",
"window_num",
"=",
"int",
"(",
"window",
"*",
"1000",
"/",
"625",
")",
"payload",
"=",
"struct",
".",
"pack",
"(",
"\"<HHB\"",
",",
"interval_num",
",",
"window_num",
",",
"active_num",
")",
"try",
":",
"response",
"=",
"self",
".",
"_send_command",
"(",
"6",
",",
"7",
",",
"payload",
")",
"if",
"response",
".",
"payload",
"[",
"0",
"]",
"!=",
"0",
":",
"return",
"False",
",",
"{",
"'reason'",
":",
"\"Could not set scanning parameters\"",
",",
"'error'",
":",
"response",
".",
"payload",
"[",
"0",
"]",
"}",
"except",
"InternalTimeoutError",
":",
"return",
"False",
",",
"{",
"'reason'",
":",
"'Timeout waiting for response'",
"}",
"return",
"True",
",",
"None"
] | 35.090909 | 24.545455 |
def sample(self, X, y, quantity='y', sample_at_X=None,
weights=None, n_draws=100, n_bootstraps=5, objective='auto'):
"""Simulate from the posterior of the coefficients and smoothing params.
Samples are drawn from the posterior of the coefficients and smoothing
parameters given the response in an approximate way. The GAM must
already be fitted before calling this method; if the model has not
been fitted, then an exception is raised. Moreover, it is recommended
that the model and its hyperparameters be chosen with `gridsearch`
(with the parameter `keep_best=True`) before calling `sample`, so that
the result of that gridsearch can be used to generate useful response
data and so that the model's coefficients (and their covariance matrix)
can be used as the first bootstrap sample.
These samples are drawn as follows. Details are in the reference below.
1. ``n_bootstraps`` many "bootstrap samples" of the response (``y``) are
simulated by drawing random samples from the model's distribution
evaluated at the expected values (``mu``) for each sample in ``X``.
2. A copy of the model is fitted to each of those bootstrap samples of
the response. The result is an approximation of the distribution over
the smoothing parameter ``lam`` given the response data ``y``.
3. Samples of the coefficients are simulated from a multivariate normal
using the bootstrap samples of the coefficients and their covariance
matrices.
Notes
-----
A ``gridsearch`` is done ``n_bootstraps`` many times, so keep
``n_bootstraps`` small. Make ``n_bootstraps < n_draws`` to take advantage
of the expensive bootstrap samples of the smoothing parameters.
Parameters
-----------
X : array of shape (n_samples, m_features)
empirical input data
y : array of shape (n_samples,)
empirical response vector
quantity : {'y', 'coef', 'mu'}, default: 'y'
What quantity to return pseudorandom samples of.
If `sample_at_X` is not None and `quantity` is either `'y'` or
`'mu'`, then samples are drawn at the values of `X` specified in
`sample_at_X`.
sample_at_X : array of shape (n_samples_to_simulate, m_features) or
None, optional
Input data at which to draw new samples.
Only applies for `quantity` equal to `'y'` or to `'mu`'.
If `None`, then `sample_at_X` is replaced by `X`.
weights : np.array of shape (n_samples,)
sample weights
n_draws : positive int, optional (default=100)
The number of samples to draw from the posterior distribution of
the coefficients and smoothing parameters
n_bootstraps : positive int, optional (default=5)
The number of bootstrap samples to draw from simulations of the
response (from the already fitted model) to estimate the
distribution of the smoothing parameters given the response data.
If `n_bootstraps` is 1, then only the already fitted model's
smoothing parameter is used, and the distribution over the
smoothing parameters is not estimated using bootstrap sampling.
objective : string, optional (default='auto'
metric to optimize in grid search. must be in
['AIC', 'AICc', 'GCV', 'UBRE', 'auto']
if 'auto', then grid search will optimize GCV for models with
unknown scale and UBRE for models with known scale.
Returns
-------
draws : 2D array of length n_draws
Simulations of the given `quantity` using samples from the
posterior distribution of the coefficients and smoothing parameter
given the response data. Each row is a pseudorandom sample.
If `quantity == 'coef'`, then the number of columns of `draws` is
the number of coefficients (`len(self.coef_)`).
Otherwise, the number of columns of `draws` is the number of
rows of `sample_at_X` if `sample_at_X` is not `None` or else
the number of rows of `X`.
References
----------
Simon N. Wood, 2006. Generalized Additive Models: an introduction with
R. Section 4.9.3 (pages 198–199) and Section 5.4.2 (page 256–257).
"""
if quantity not in {'mu', 'coef', 'y'}:
raise ValueError("`quantity` must be one of 'mu', 'coef', 'y';"
" got {}".format(quantity))
coef_draws = self._sample_coef(
X, y, weights=weights, n_draws=n_draws,
n_bootstraps=n_bootstraps, objective=objective)
if quantity == 'coef':
return coef_draws
if sample_at_X is None:
sample_at_X = X
linear_predictor = self._modelmat(sample_at_X).dot(coef_draws.T)
mu_shape_n_draws_by_n_samples = self.link.mu(
linear_predictor, self.distribution).T
if quantity == 'mu':
return mu_shape_n_draws_by_n_samples
else:
return self.distribution.sample(mu_shape_n_draws_by_n_samples) | [
"def",
"sample",
"(",
"self",
",",
"X",
",",
"y",
",",
"quantity",
"=",
"'y'",
",",
"sample_at_X",
"=",
"None",
",",
"weights",
"=",
"None",
",",
"n_draws",
"=",
"100",
",",
"n_bootstraps",
"=",
"5",
",",
"objective",
"=",
"'auto'",
")",
":",
"if",
"quantity",
"not",
"in",
"{",
"'mu'",
",",
"'coef'",
",",
"'y'",
"}",
":",
"raise",
"ValueError",
"(",
"\"`quantity` must be one of 'mu', 'coef', 'y';\"",
"\" got {}\"",
".",
"format",
"(",
"quantity",
")",
")",
"coef_draws",
"=",
"self",
".",
"_sample_coef",
"(",
"X",
",",
"y",
",",
"weights",
"=",
"weights",
",",
"n_draws",
"=",
"n_draws",
",",
"n_bootstraps",
"=",
"n_bootstraps",
",",
"objective",
"=",
"objective",
")",
"if",
"quantity",
"==",
"'coef'",
":",
"return",
"coef_draws",
"if",
"sample_at_X",
"is",
"None",
":",
"sample_at_X",
"=",
"X",
"linear_predictor",
"=",
"self",
".",
"_modelmat",
"(",
"sample_at_X",
")",
".",
"dot",
"(",
"coef_draws",
".",
"T",
")",
"mu_shape_n_draws_by_n_samples",
"=",
"self",
".",
"link",
".",
"mu",
"(",
"linear_predictor",
",",
"self",
".",
"distribution",
")",
".",
"T",
"if",
"quantity",
"==",
"'mu'",
":",
"return",
"mu_shape_n_draws_by_n_samples",
"else",
":",
"return",
"self",
".",
"distribution",
".",
"sample",
"(",
"mu_shape_n_draws_by_n_samples",
")"
] | 45.017241 | 26.87069 |
def reset(self):
"""
Clear all cell and segment activity.
"""
super(ApicalTiebreakSequenceMemory, self).reset()
self.prevApicalInput = np.empty(0, dtype="uint32")
self.prevApicalGrowthCandidates = np.empty(0, dtype="uint32")
self.prevPredictedCells = np.empty(0, dtype="uint32") | [
"def",
"reset",
"(",
"self",
")",
":",
"super",
"(",
"ApicalTiebreakSequenceMemory",
",",
"self",
")",
".",
"reset",
"(",
")",
"self",
".",
"prevApicalInput",
"=",
"np",
".",
"empty",
"(",
"0",
",",
"dtype",
"=",
"\"uint32\"",
")",
"self",
".",
"prevApicalGrowthCandidates",
"=",
"np",
".",
"empty",
"(",
"0",
",",
"dtype",
"=",
"\"uint32\"",
")",
"self",
".",
"prevPredictedCells",
"=",
"np",
".",
"empty",
"(",
"0",
",",
"dtype",
"=",
"\"uint32\"",
")"
] | 33.222222 | 14.777778 |
def nacm_enable_external_groups(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm")
enable_external_groups = ET.SubElement(nacm, "enable-external-groups")
enable_external_groups.text = kwargs.pop('enable_external_groups')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"nacm_enable_external_groups",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"nacm",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"nacm\"",
",",
"xmlns",
"=",
"\"urn:ietf:params:xml:ns:yang:ietf-netconf-acm\"",
")",
"enable_external_groups",
"=",
"ET",
".",
"SubElement",
"(",
"nacm",
",",
"\"enable-external-groups\"",
")",
"enable_external_groups",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'enable_external_groups'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 46.4 | 20.7 |
def multiply(self, other):
"""Return the operator self + other.
Args:
other (complex): a complex number.
Returns:
Operator: the operator other * self.
Raises:
QiskitError: if other is not a valid complex number.
"""
if not isinstance(other, Number):
raise QiskitError("other is not a number")
return Operator(other * self.data, self.input_dims(),
self.output_dims()) | [
"def",
"multiply",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"Number",
")",
":",
"raise",
"QiskitError",
"(",
"\"other is not a number\"",
")",
"return",
"Operator",
"(",
"other",
"*",
"self",
".",
"data",
",",
"self",
".",
"input_dims",
"(",
")",
",",
"self",
".",
"output_dims",
"(",
")",
")"
] | 30.125 | 17.9375 |
def get_template_vars(self, slides):
""" Computes template vars from slides html source code.
"""
try:
head_title = slides[0]['title']
except (IndexError, TypeError):
head_title = "Untitled Presentation"
for slide_index, slide_vars in enumerate(slides):
if not slide_vars:
continue
self.num_slides += 1
slide_number = slide_vars['number'] = self.num_slides
if slide_vars['level'] and slide_vars['level'] <= TOC_MAX_LEVEL:
self.add_toc_entry(slide_vars['title'], slide_vars['level'],
slide_number)
else:
# Put something in the TOC even if it doesn't have a title or level
self.add_toc_entry(u"-", 1, slide_number)
return {'head_title': head_title, 'num_slides': str(self.num_slides),
'slides': slides, 'toc': self.toc, 'embed': self.embed,
'css': self.get_css(), 'js': self.get_js(),
'user_css': self.user_css, 'user_js': self.user_js,
'math_output': self.math_output} | [
"def",
"get_template_vars",
"(",
"self",
",",
"slides",
")",
":",
"try",
":",
"head_title",
"=",
"slides",
"[",
"0",
"]",
"[",
"'title'",
"]",
"except",
"(",
"IndexError",
",",
"TypeError",
")",
":",
"head_title",
"=",
"\"Untitled Presentation\"",
"for",
"slide_index",
",",
"slide_vars",
"in",
"enumerate",
"(",
"slides",
")",
":",
"if",
"not",
"slide_vars",
":",
"continue",
"self",
".",
"num_slides",
"+=",
"1",
"slide_number",
"=",
"slide_vars",
"[",
"'number'",
"]",
"=",
"self",
".",
"num_slides",
"if",
"slide_vars",
"[",
"'level'",
"]",
"and",
"slide_vars",
"[",
"'level'",
"]",
"<=",
"TOC_MAX_LEVEL",
":",
"self",
".",
"add_toc_entry",
"(",
"slide_vars",
"[",
"'title'",
"]",
",",
"slide_vars",
"[",
"'level'",
"]",
",",
"slide_number",
")",
"else",
":",
"# Put something in the TOC even if it doesn't have a title or level",
"self",
".",
"add_toc_entry",
"(",
"u\"-\"",
",",
"1",
",",
"slide_number",
")",
"return",
"{",
"'head_title'",
":",
"head_title",
",",
"'num_slides'",
":",
"str",
"(",
"self",
".",
"num_slides",
")",
",",
"'slides'",
":",
"slides",
",",
"'toc'",
":",
"self",
".",
"toc",
",",
"'embed'",
":",
"self",
".",
"embed",
",",
"'css'",
":",
"self",
".",
"get_css",
"(",
")",
",",
"'js'",
":",
"self",
".",
"get_js",
"(",
")",
",",
"'user_css'",
":",
"self",
".",
"user_css",
",",
"'user_js'",
":",
"self",
".",
"user_js",
",",
"'math_output'",
":",
"self",
".",
"math_output",
"}"
] | 45.6 | 19.4 |
def __make_points_for_label(self, ts, data, label, prefix, gun_stats):
"""x
Make a set of points for `this` label
overall_quantiles, overall_meta, net_codes, proto_codes, histograms
"""
label_points = list()
label_points.extend(
(
# overall quantiles for label
self.__make_points(
prefix + "overall_quantiles",
{"label": label},
ts,
self.__make_quantile_fields(data)
),
# overall meta (gun status) for label
self.__make_points(
prefix + "overall_meta",
{"label": label},
ts,
self.__make_overall_meta_fields(data, gun_stats)
),
# net codes for label
self.__make_points(
prefix + "net_codes",
{"label": label},
ts,
self.__make_netcodes_fields(data)
),
# proto codes for label
self.__make_points(
prefix + "proto_codes",
{"label": label},
ts,
self.__make_protocodes_fields(data)
)
)
)
# histograms, one row for each bin
if self.histograms:
for bin_, count in zip(data["interval_real"]["hist"]["bins"],
data["interval_real"]["hist"]["data"]):
label_points.append(
self.__make_points(
prefix + "histograms",
{"label": label},
ts,
{"bin": bin_, "count": count}
)
)
return label_points | [
"def",
"__make_points_for_label",
"(",
"self",
",",
"ts",
",",
"data",
",",
"label",
",",
"prefix",
",",
"gun_stats",
")",
":",
"label_points",
"=",
"list",
"(",
")",
"label_points",
".",
"extend",
"(",
"(",
"# overall quantiles for label",
"self",
".",
"__make_points",
"(",
"prefix",
"+",
"\"overall_quantiles\"",
",",
"{",
"\"label\"",
":",
"label",
"}",
",",
"ts",
",",
"self",
".",
"__make_quantile_fields",
"(",
"data",
")",
")",
",",
"# overall meta (gun status) for label",
"self",
".",
"__make_points",
"(",
"prefix",
"+",
"\"overall_meta\"",
",",
"{",
"\"label\"",
":",
"label",
"}",
",",
"ts",
",",
"self",
".",
"__make_overall_meta_fields",
"(",
"data",
",",
"gun_stats",
")",
")",
",",
"# net codes for label",
"self",
".",
"__make_points",
"(",
"prefix",
"+",
"\"net_codes\"",
",",
"{",
"\"label\"",
":",
"label",
"}",
",",
"ts",
",",
"self",
".",
"__make_netcodes_fields",
"(",
"data",
")",
")",
",",
"# proto codes for label",
"self",
".",
"__make_points",
"(",
"prefix",
"+",
"\"proto_codes\"",
",",
"{",
"\"label\"",
":",
"label",
"}",
",",
"ts",
",",
"self",
".",
"__make_protocodes_fields",
"(",
"data",
")",
")",
")",
")",
"# histograms, one row for each bin",
"if",
"self",
".",
"histograms",
":",
"for",
"bin_",
",",
"count",
"in",
"zip",
"(",
"data",
"[",
"\"interval_real\"",
"]",
"[",
"\"hist\"",
"]",
"[",
"\"bins\"",
"]",
",",
"data",
"[",
"\"interval_real\"",
"]",
"[",
"\"hist\"",
"]",
"[",
"\"data\"",
"]",
")",
":",
"label_points",
".",
"append",
"(",
"self",
".",
"__make_points",
"(",
"prefix",
"+",
"\"histograms\"",
",",
"{",
"\"label\"",
":",
"label",
"}",
",",
"ts",
",",
"{",
"\"bin\"",
":",
"bin_",
",",
"\"count\"",
":",
"count",
"}",
")",
")",
"return",
"label_points"
] | 35.075472 | 13.773585 |
def icmp(a, b):
"Like cmp(), but for any iterator."
for xa in a:
try:
xb = next(b)
d = cmp(xa, xb)
if d: return d
except StopIteration:
return 1
try:
next(b)
return -1
except StopIteration:
return 0 | [
"def",
"icmp",
"(",
"a",
",",
"b",
")",
":",
"for",
"xa",
"in",
"a",
":",
"try",
":",
"xb",
"=",
"next",
"(",
"b",
")",
"d",
"=",
"cmp",
"(",
"xa",
",",
"xb",
")",
"if",
"d",
":",
"return",
"d",
"except",
"StopIteration",
":",
"return",
"1",
"try",
":",
"next",
"(",
"b",
")",
"return",
"-",
"1",
"except",
"StopIteration",
":",
"return",
"0"
] | 20.642857 | 19.357143 |
def format_time(x):
"""Formats date values
This function formats :class:`datetime.datetime` and
:class:`datetime.timedelta` objects (and the corresponding numpy objects)
using the :func:`xarray.core.formatting.format_timestamp` and the
:func:`xarray.core.formatting.format_timedelta` functions.
Parameters
----------
x: object
The value to format. If not a time object, the value is returned
Returns
-------
str or `x`
Either the formatted time object or the initial `x`"""
if isinstance(x, (datetime64, datetime)):
return format_timestamp(x)
elif isinstance(x, (timedelta64, timedelta)):
return format_timedelta(x)
elif isinstance(x, ndarray):
return list(x) if x.ndim else x[()]
return x | [
"def",
"format_time",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"(",
"datetime64",
",",
"datetime",
")",
")",
":",
"return",
"format_timestamp",
"(",
"x",
")",
"elif",
"isinstance",
"(",
"x",
",",
"(",
"timedelta64",
",",
"timedelta",
")",
")",
":",
"return",
"format_timedelta",
"(",
"x",
")",
"elif",
"isinstance",
"(",
"x",
",",
"ndarray",
")",
":",
"return",
"list",
"(",
"x",
")",
"if",
"x",
".",
"ndim",
"else",
"x",
"[",
"(",
")",
"]",
"return",
"x"
] | 32.041667 | 21.041667 |
def apply(self, func, axis='major', **kwargs):
"""
Apply function along axis (or axes) of the Panel.
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
e.g. if axis = 'items', the combination of major_axis/minor_axis
will each be passed as a Series; if axis = ('items', 'major'),
DataFrames of items & major axis will be passed
axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two
axes
**kwargs
Additional keyword arguments will be passed to the function.
Returns
-------
result : Panel, DataFrame, or Series
Examples
--------
Returns a Panel with the square root of each element
>>> p = pd.Panel(np.random.rand(4, 3, 2)) # doctest: +SKIP
>>> p.apply(np.sqrt)
Equivalent to p.sum(1), returning a DataFrame
>>> p.apply(lambda x: x.sum(), axis=1) # doctest: +SKIP
Equivalent to previous:
>>> p.apply(lambda x: x.sum(), axis='major') # doctest: +SKIP
Return the shapes of each DataFrame over axis 2 (i.e the shapes of
items x major), as a Series
>>> p.apply(lambda x: x.shape, axis=(0,1)) # doctest: +SKIP
"""
if kwargs and not isinstance(func, np.ufunc):
f = lambda x: func(x, **kwargs)
else:
f = func
# 2d-slabs
if isinstance(axis, (tuple, list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
# try ufunc like
if isinstance(f, np.ufunc):
try:
with np.errstate(all='ignore'):
result = np.apply_along_axis(func, axis, self.values)
return self._wrap_result(result, axis=axis)
except (AttributeError):
pass
# 1d
return self._apply_1d(f, axis=axis) | [
"def",
"apply",
"(",
"self",
",",
"func",
",",
"axis",
"=",
"'major'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
"and",
"not",
"isinstance",
"(",
"func",
",",
"np",
".",
"ufunc",
")",
":",
"f",
"=",
"lambda",
"x",
":",
"func",
"(",
"x",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"f",
"=",
"func",
"# 2d-slabs",
"if",
"isinstance",
"(",
"axis",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"len",
"(",
"axis",
")",
"==",
"2",
":",
"return",
"self",
".",
"_apply_2d",
"(",
"f",
",",
"axis",
"=",
"axis",
")",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"# try ufunc like",
"if",
"isinstance",
"(",
"f",
",",
"np",
".",
"ufunc",
")",
":",
"try",
":",
"with",
"np",
".",
"errstate",
"(",
"all",
"=",
"'ignore'",
")",
":",
"result",
"=",
"np",
".",
"apply_along_axis",
"(",
"func",
",",
"axis",
",",
"self",
".",
"values",
")",
"return",
"self",
".",
"_wrap_result",
"(",
"result",
",",
"axis",
"=",
"axis",
")",
"except",
"(",
"AttributeError",
")",
":",
"pass",
"# 1d",
"return",
"self",
".",
"_apply_1d",
"(",
"f",
",",
"axis",
"=",
"axis",
")"
] | 30.59375 | 23.96875 |
def toLily(self):
'''
Method which converts the object instance, its attributes and children to a string of lilypond code
:return: str of lilypond code
'''
lilystring = ""
if self.item is not None:
if not isinstance(self.GetChild(0), NoteNode):
if hasattr(self.item, "chord") and self.item.chord:
self.item.chord = "stop"
if isinstance(self.GetChild(0), NoteNode):
if not hasattr(self.item, "chord") or not self.item.chord:
self.item.chord = "start"
lilystring += self.item.toLily()
children = self.GetChildrenIndexes()
written = False
for child in children:
if self.GetChild(child) is not None:
if isinstance(self.GetChild(child), NoteNode):
lilystring += " "
return_val = self.GetChild(child).toLily()
if isinstance(return_val, str):
lilystring += return_val
else:
lilystring = return_val[0] + lilystring + return_val[1]
if isinstance(child, OtherNodes.ExpressionNode):
written = True
lilystring += self.item.GetClosingNotationLilies()
if len(children) == 0 or not written:
lilystring += self.item.GetClosingNotationLilies()
return lilystring | [
"def",
"toLily",
"(",
"self",
")",
":",
"lilystring",
"=",
"\"\"",
"if",
"self",
".",
"item",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"GetChild",
"(",
"0",
")",
",",
"NoteNode",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"item",
",",
"\"chord\"",
")",
"and",
"self",
".",
"item",
".",
"chord",
":",
"self",
".",
"item",
".",
"chord",
"=",
"\"stop\"",
"if",
"isinstance",
"(",
"self",
".",
"GetChild",
"(",
"0",
")",
",",
"NoteNode",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
".",
"item",
",",
"\"chord\"",
")",
"or",
"not",
"self",
".",
"item",
".",
"chord",
":",
"self",
".",
"item",
".",
"chord",
"=",
"\"start\"",
"lilystring",
"+=",
"self",
".",
"item",
".",
"toLily",
"(",
")",
"children",
"=",
"self",
".",
"GetChildrenIndexes",
"(",
")",
"written",
"=",
"False",
"for",
"child",
"in",
"children",
":",
"if",
"self",
".",
"GetChild",
"(",
"child",
")",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"self",
".",
"GetChild",
"(",
"child",
")",
",",
"NoteNode",
")",
":",
"lilystring",
"+=",
"\" \"",
"return_val",
"=",
"self",
".",
"GetChild",
"(",
"child",
")",
".",
"toLily",
"(",
")",
"if",
"isinstance",
"(",
"return_val",
",",
"str",
")",
":",
"lilystring",
"+=",
"return_val",
"else",
":",
"lilystring",
"=",
"return_val",
"[",
"0",
"]",
"+",
"lilystring",
"+",
"return_val",
"[",
"1",
"]",
"if",
"isinstance",
"(",
"child",
",",
"OtherNodes",
".",
"ExpressionNode",
")",
":",
"written",
"=",
"True",
"lilystring",
"+=",
"self",
".",
"item",
".",
"GetClosingNotationLilies",
"(",
")",
"if",
"len",
"(",
"children",
")",
"==",
"0",
"or",
"not",
"written",
":",
"lilystring",
"+=",
"self",
".",
"item",
".",
"GetClosingNotationLilies",
"(",
")",
"return",
"lilystring"
] | 42.848485 | 18.484848 |
def encrypt(base_field, key=None, ttl=None):
"""
A decorator for creating encrypted model fields.
:type base_field: models.Field[T]
:param bytes key: This is an optional argument.
Allows for specifying an instance specific encryption key.
:param int ttl: This is an optional argument.
The amount of time in seconds that a value can be stored for. If the
time to live of the data has passed, it will become unreadable.
The expired value will return an :class:`Expired` object.
:rtype: models.Field[EncryptedMixin, T]
"""
if not isinstance(base_field, models.Field):
assert key is None
assert ttl is None
return get_encrypted_field(base_field)
name, path, args, kwargs = base_field.deconstruct()
kwargs.update({'key': key, 'ttl': ttl})
return get_encrypted_field(base_field.__class__)(*args, **kwargs) | [
"def",
"encrypt",
"(",
"base_field",
",",
"key",
"=",
"None",
",",
"ttl",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"base_field",
",",
"models",
".",
"Field",
")",
":",
"assert",
"key",
"is",
"None",
"assert",
"ttl",
"is",
"None",
"return",
"get_encrypted_field",
"(",
"base_field",
")",
"name",
",",
"path",
",",
"args",
",",
"kwargs",
"=",
"base_field",
".",
"deconstruct",
"(",
")",
"kwargs",
".",
"update",
"(",
"{",
"'key'",
":",
"key",
",",
"'ttl'",
":",
"ttl",
"}",
")",
"return",
"get_encrypted_field",
"(",
"base_field",
".",
"__class__",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 38.304348 | 17.782609 |
def upload_file(filename, session):
""" Uploads a file """
print('Uploading file %s' % filename)
outfilesource = os.path.join(os.getcwd(), filename)
outfiletarget = 'sftp://' + ADDRESS + WORKING_DIR
out = saga.filesystem.File(outfilesource, session=session, flags=OVERWRITE)
out.copy(outfiletarget)
print('Transfer of `%s` to `%s` successful' % (filename, outfiletarget)) | [
"def",
"upload_file",
"(",
"filename",
",",
"session",
")",
":",
"print",
"(",
"'Uploading file %s'",
"%",
"filename",
")",
"outfilesource",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"filename",
")",
"outfiletarget",
"=",
"'sftp://'",
"+",
"ADDRESS",
"+",
"WORKING_DIR",
"out",
"=",
"saga",
".",
"filesystem",
".",
"File",
"(",
"outfilesource",
",",
"session",
"=",
"session",
",",
"flags",
"=",
"OVERWRITE",
")",
"out",
".",
"copy",
"(",
"outfiletarget",
")",
"print",
"(",
"'Transfer of `%s` to `%s` successful'",
"%",
"(",
"filename",
",",
"outfiletarget",
")",
")"
] | 49 | 15.25 |
def _remove_observation(self, x_to_remove, y_to_remove):
"""Remove observation from window, updating means/variance efficiently."""
self._remove_observation_from_variances(x_to_remove, y_to_remove)
self._remove_observation_from_means(x_to_remove, y_to_remove)
self.window_size -= 1 | [
"def",
"_remove_observation",
"(",
"self",
",",
"x_to_remove",
",",
"y_to_remove",
")",
":",
"self",
".",
"_remove_observation_from_variances",
"(",
"x_to_remove",
",",
"y_to_remove",
")",
"self",
".",
"_remove_observation_from_means",
"(",
"x_to_remove",
",",
"y_to_remove",
")",
"self",
".",
"window_size",
"-=",
"1"
] | 61.8 | 17.8 |
def relativeAreaSTE(self):
'''
return STE area - relative to image area
'''
s = self.noSTE.shape
return np.sum(self.mask_STE) / (s[0] * s[1]) | [
"def",
"relativeAreaSTE",
"(",
"self",
")",
":",
"s",
"=",
"self",
".",
"noSTE",
".",
"shape",
"return",
"np",
".",
"sum",
"(",
"self",
".",
"mask_STE",
")",
"/",
"(",
"s",
"[",
"0",
"]",
"*",
"s",
"[",
"1",
"]",
")"
] | 30.166667 | 16.833333 |
def _set_fill_word(self, v, load=False):
"""
Setter method for fill_word, mapped from YANG variable /interface/fc_port/fill_word (fc-fillword-cfg-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_fill_word is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fill_word() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'idle-idle': {'value': 0}, u'arbff-arbff': {'value': 1}, u'idle-arbff': {'value': 2}, u'aa-then-ia': {'value': 3}},), default=unicode("idle-idle"), is_leaf=True, yang_name="fill-word", rest_name="fill-word", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Fill Word', u'hidden': u'full', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='fc-fillword-cfg-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fill_word must be of a type compatible with fc-fillword-cfg-type""",
'defined-type': "brocade-interface:fc-fillword-cfg-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'idle-idle': {'value': 0}, u'arbff-arbff': {'value': 1}, u'idle-arbff': {'value': 2}, u'aa-then-ia': {'value': 3}},), default=unicode("idle-idle"), is_leaf=True, yang_name="fill-word", rest_name="fill-word", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Fill Word', u'hidden': u'full', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='fc-fillword-cfg-type', is_config=True)""",
})
self.__fill_word = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_fill_word",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"unicode",
",",
"restriction_type",
"=",
"\"dict_key\"",
",",
"restriction_arg",
"=",
"{",
"u'idle-idle'",
":",
"{",
"'value'",
":",
"0",
"}",
",",
"u'arbff-arbff'",
":",
"{",
"'value'",
":",
"1",
"}",
",",
"u'idle-arbff'",
":",
"{",
"'value'",
":",
"2",
"}",
",",
"u'aa-then-ia'",
":",
"{",
"'value'",
":",
"3",
"}",
"}",
",",
")",
",",
"default",
"=",
"unicode",
"(",
"\"idle-idle\"",
")",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"fill-word\"",
",",
"rest_name",
"=",
"\"fill-word\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Configure Fill Word'",
",",
"u'hidden'",
":",
"u'full'",
",",
"u'cli-suppress-no'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-interface'",
",",
"defining_module",
"=",
"'brocade-interface'",
",",
"yang_type",
"=",
"'fc-fillword-cfg-type'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"fill_word must be of a type compatible with fc-fillword-cfg-type\"\"\"",
",",
"'defined-type'",
":",
"\"brocade-interface:fc-fillword-cfg-type\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'idle-idle': {'value': 0}, u'arbff-arbff': {'value': 1}, u'idle-arbff': {'value': 2}, u'aa-then-ia': {'value': 3}},), default=unicode(\"idle-idle\"), is_leaf=True, yang_name=\"fill-word\", rest_name=\"fill-word\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Fill Word', u'hidden': u'full', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='fc-fillword-cfg-type', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__fill_word",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 103 | 49.136364 |
def get_attachment_model():
"""
Returns the Attachment model that is active in this project.
"""
try:
from .models import AbstractAttachment
klass = apps.get_model(config["attachment_model"])
if not issubclass(klass, AbstractAttachment):
raise ImproperlyConfigured(
"SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that is not "
"inherited from 'django_summernote.models.AbstractAttachment'" % config["attachment_model"]
)
return klass
except ValueError:
raise ImproperlyConfigured("SUMMERNOTE_CONFIG['attachment_model'] must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that has not been installed" % config["attachment_model"]
) | [
"def",
"get_attachment_model",
"(",
")",
":",
"try",
":",
"from",
".",
"models",
"import",
"AbstractAttachment",
"klass",
"=",
"apps",
".",
"get_model",
"(",
"config",
"[",
"\"attachment_model\"",
"]",
")",
"if",
"not",
"issubclass",
"(",
"klass",
",",
"AbstractAttachment",
")",
":",
"raise",
"ImproperlyConfigured",
"(",
"\"SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that is not \"",
"\"inherited from 'django_summernote.models.AbstractAttachment'\"",
"%",
"config",
"[",
"\"attachment_model\"",
"]",
")",
"return",
"klass",
"except",
"ValueError",
":",
"raise",
"ImproperlyConfigured",
"(",
"\"SUMMERNOTE_CONFIG['attachment_model'] must be of the form 'app_label.model_name'\"",
")",
"except",
"LookupError",
":",
"raise",
"ImproperlyConfigured",
"(",
"\"SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that has not been installed\"",
"%",
"config",
"[",
"\"attachment_model\"",
"]",
")"
] | 43.7 | 27.4 |
def _Open(self, path_spec, mode='rb'):
"""Opens the file system defined by path specification.
Args:
path_spec (PathSpec): a path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)
encryption_method = getattr(path_spec, 'encryption_method', None)
if not encryption_method:
raise errors.PathSpecError(
'Unsupported path specification without encryption method.')
self._encryption_method = encryption_method | [
"def",
"_Open",
"(",
"self",
",",
"path_spec",
",",
"mode",
"=",
"'rb'",
")",
":",
"if",
"not",
"path_spec",
".",
"HasParent",
"(",
")",
":",
"raise",
"errors",
".",
"PathSpecError",
"(",
"'Unsupported path specification without parent.'",
")",
"resolver",
".",
"Resolver",
".",
"key_chain",
".",
"ExtractCredentialsFromPathSpec",
"(",
"path_spec",
")",
"encryption_method",
"=",
"getattr",
"(",
"path_spec",
",",
"'encryption_method'",
",",
"None",
")",
"if",
"not",
"encryption_method",
":",
"raise",
"errors",
".",
"PathSpecError",
"(",
"'Unsupported path specification without encryption method.'",
")",
"self",
".",
"_encryption_method",
"=",
"encryption_method"
] | 36.846154 | 20.153846 |
def main (args):
"""Usage: create_sedml2 output-filename
"""
if (len(args) != 2):
print(main.__doc__)
sys.exit(1);
# create the document
doc = libsedml.SedDocument();
doc.setLevel(1);
doc.setVersion(3);
# create a data description
ddesc = doc.createDataDescription()
ddesc.setId('data1')
ddesc.setName('Oscli Timecourse data')
ddesc.setSource('foo.numl')
# create data source
dsource = ddesc.createDataSource()
dsource.setId('dataS1')
# create slice
slice = dsource.createSlice()
slice.setReference('SpeciesIds')
slice.setValue('S1')
# specify mapping
timeDesc = libsedml.CompositeDescription()
timeDesc.setIndexType('double')
timeDesc.setId('time')
timeDesc.setName('time')
speciesDesc = timeDesc.createCompositeDescription()
speciesDesc.setIndexType('string')
speciesDesc.setId('SpeciesIds')
speciesDesc.setName('SpeciesIds')
concentrationDesc = speciesDesc.createAtomicDescription()
concentrationDesc.setValueType("double")
concentrationDesc.setName("Concentrations")
dimDesc = ddesc.createDimensionDescription()
dimDesc.append(timeDesc)
# write the document
libsedml.writeSedML(doc, args[1]); | [
"def",
"main",
"(",
"args",
")",
":",
"if",
"(",
"len",
"(",
"args",
")",
"!=",
"2",
")",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# create the document",
"doc",
"=",
"libsedml",
".",
"SedDocument",
"(",
")",
"doc",
".",
"setLevel",
"(",
"1",
")",
"doc",
".",
"setVersion",
"(",
"3",
")",
"# create a data description",
"ddesc",
"=",
"doc",
".",
"createDataDescription",
"(",
")",
"ddesc",
".",
"setId",
"(",
"'data1'",
")",
"ddesc",
".",
"setName",
"(",
"'Oscli Timecourse data'",
")",
"ddesc",
".",
"setSource",
"(",
"'foo.numl'",
")",
"# create data source ",
"dsource",
"=",
"ddesc",
".",
"createDataSource",
"(",
")",
"dsource",
".",
"setId",
"(",
"'dataS1'",
")",
"# create slice ",
"slice",
"=",
"dsource",
".",
"createSlice",
"(",
")",
"slice",
".",
"setReference",
"(",
"'SpeciesIds'",
")",
"slice",
".",
"setValue",
"(",
"'S1'",
")",
"# specify mapping",
"timeDesc",
"=",
"libsedml",
".",
"CompositeDescription",
"(",
")",
"timeDesc",
".",
"setIndexType",
"(",
"'double'",
")",
"timeDesc",
".",
"setId",
"(",
"'time'",
")",
"timeDesc",
".",
"setName",
"(",
"'time'",
")",
"speciesDesc",
"=",
"timeDesc",
".",
"createCompositeDescription",
"(",
")",
"speciesDesc",
".",
"setIndexType",
"(",
"'string'",
")",
"speciesDesc",
".",
"setId",
"(",
"'SpeciesIds'",
")",
"speciesDesc",
".",
"setName",
"(",
"'SpeciesIds'",
")",
"concentrationDesc",
"=",
"speciesDesc",
".",
"createAtomicDescription",
"(",
")",
"concentrationDesc",
".",
"setValueType",
"(",
"\"double\"",
")",
"concentrationDesc",
".",
"setName",
"(",
"\"Concentrations\"",
")",
"dimDesc",
"=",
"ddesc",
".",
"createDimensionDescription",
"(",
")",
"dimDesc",
".",
"append",
"(",
"timeDesc",
")",
"# write the document",
"libsedml",
".",
"writeSedML",
"(",
"doc",
",",
"args",
"[",
"1",
"]",
")"
] | 24.0625 | 17.270833 |
def admin(self):
"""points to the adminstrative side of ArcGIS Server"""
if self._securityHandler is None:
raise Exception("Cannot connect to adminstrative server without authentication")
from ..manageags import AGSAdministration
return AGSAdministration(url=self._adminUrl,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=False) | [
"def",
"admin",
"(",
"self",
")",
":",
"if",
"self",
".",
"_securityHandler",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Cannot connect to adminstrative server without authentication\"",
")",
"from",
".",
".",
"manageags",
"import",
"AGSAdministration",
"return",
"AGSAdministration",
"(",
"url",
"=",
"self",
".",
"_adminUrl",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
",",
"initialize",
"=",
"False",
")"
] | 55.4 | 17.9 |
def delete_blacklist_entry(self, blacklist_entry_id):
"""Delete an existing blacklist entry.
Keyword arguments:
blacklist_entry_id -- The unique identifier of the blacklist entry to delete.
"""
delete_blacklist_endpoint = Template("${rest_root}/blacklist/${public_key}/${blacklist_entry_id}/delete")
url = delete_blacklist_endpoint.substitute(rest_root=self._rest_root, public_key=self._public_key, blacklist_entry_id=blacklist_entry_id)
self.__post_request(url, {}) | [
"def",
"delete_blacklist_entry",
"(",
"self",
",",
"blacklist_entry_id",
")",
":",
"delete_blacklist_endpoint",
"=",
"Template",
"(",
"\"${rest_root}/blacklist/${public_key}/${blacklist_entry_id}/delete\"",
")",
"url",
"=",
"delete_blacklist_endpoint",
".",
"substitute",
"(",
"rest_root",
"=",
"self",
".",
"_rest_root",
",",
"public_key",
"=",
"self",
".",
"_public_key",
",",
"blacklist_entry_id",
"=",
"blacklist_entry_id",
")",
"self",
".",
"__post_request",
"(",
"url",
",",
"{",
"}",
")"
] | 53.1 | 31.8 |
def find_vpid(self, url, res=None):
"""
Find the Video Packet ID in the HTML for the provided URL
:param url: URL to download, if res is not provided.
:param res: Provide a cached version of the HTTP response to search
:type url: string
:type res: requests.Response
:return: Video Packet ID for a Programme in iPlayer
:rtype: string
"""
log.debug("Looking for vpid on {0}", url)
# Use pre-fetched page if available
res = res or self.session.http.get(url)
m = self.mediator_re.search(res.text)
vpid = m and parse_json(m.group(1), schema=self.mediator_schema)
return vpid | [
"def",
"find_vpid",
"(",
"self",
",",
"url",
",",
"res",
"=",
"None",
")",
":",
"log",
".",
"debug",
"(",
"\"Looking for vpid on {0}\"",
",",
"url",
")",
"# Use pre-fetched page if available",
"res",
"=",
"res",
"or",
"self",
".",
"session",
".",
"http",
".",
"get",
"(",
"url",
")",
"m",
"=",
"self",
".",
"mediator_re",
".",
"search",
"(",
"res",
".",
"text",
")",
"vpid",
"=",
"m",
"and",
"parse_json",
"(",
"m",
".",
"group",
"(",
"1",
")",
",",
"schema",
"=",
"self",
".",
"mediator_schema",
")",
"return",
"vpid"
] | 39.647059 | 15.176471 |
def create_raw(self, create_missing=None):
"""Create an entity.
Possibly call :meth:`create_missing`. Then make an HTTP POST call to
``self.path('base')``. The request payload consists of whatever is
returned by :meth:`create_payload`. Return the response.
:param create_missing: Should :meth:`create_missing` be called? In
other words, should values be generated for required, empty fields?
Defaults to :data:`nailgun.entity_mixins.CREATE_MISSING`.
:return: A ``requests.response`` object.
"""
if create_missing is None:
create_missing = CREATE_MISSING
if create_missing is True:
self.create_missing()
return client.post(
self.path('base'),
self.create_payload(),
**self._server_config.get_client_kwargs()
) | [
"def",
"create_raw",
"(",
"self",
",",
"create_missing",
"=",
"None",
")",
":",
"if",
"create_missing",
"is",
"None",
":",
"create_missing",
"=",
"CREATE_MISSING",
"if",
"create_missing",
"is",
"True",
":",
"self",
".",
"create_missing",
"(",
")",
"return",
"client",
".",
"post",
"(",
"self",
".",
"path",
"(",
"'base'",
")",
",",
"self",
".",
"create_payload",
"(",
")",
",",
"*",
"*",
"self",
".",
"_server_config",
".",
"get_client_kwargs",
"(",
")",
")"
] | 39.181818 | 19.136364 |
def parse_xml_string(self, xml, id_generator=None):
"""Parse a string of XML, returning a usage id."""
if id_generator is not None:
warnings.warn(
"Passing an id_generator directly is deprecated "
"in favor of constructing the Runtime with the id_generator",
DeprecationWarning,
stacklevel=2,
)
id_generator = id_generator or self.id_generator
if isinstance(xml, six.binary_type):
io_type = BytesIO
else:
io_type = StringIO
return self.parse_xml_file(io_type(xml), id_generator) | [
"def",
"parse_xml_string",
"(",
"self",
",",
"xml",
",",
"id_generator",
"=",
"None",
")",
":",
"if",
"id_generator",
"is",
"not",
"None",
":",
"warnings",
".",
"warn",
"(",
"\"Passing an id_generator directly is deprecated \"",
"\"in favor of constructing the Runtime with the id_generator\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
",",
")",
"id_generator",
"=",
"id_generator",
"or",
"self",
".",
"id_generator",
"if",
"isinstance",
"(",
"xml",
",",
"six",
".",
"binary_type",
")",
":",
"io_type",
"=",
"BytesIO",
"else",
":",
"io_type",
"=",
"StringIO",
"return",
"self",
".",
"parse_xml_file",
"(",
"io_type",
"(",
"xml",
")",
",",
"id_generator",
")"
] | 39 | 16.5 |
def casefold_parts(self, parts):
"""Return the lower-case version of parts for a Windows filesystem."""
if self.filesystem.is_windows_fs:
return [p.lower() for p in parts]
return parts | [
"def",
"casefold_parts",
"(",
"self",
",",
"parts",
")",
":",
"if",
"self",
".",
"filesystem",
".",
"is_windows_fs",
":",
"return",
"[",
"p",
".",
"lower",
"(",
")",
"for",
"p",
"in",
"parts",
"]",
"return",
"parts"
] | 43.2 | 6.8 |
def resend_presence(self):
"""
Re-send the currently configured presence.
:return: Stanza token of the presence stanza or :data:`None` if the
stream is not established.
:rtype: :class:`~.stream.StanzaToken`
.. note::
:meth:`set_presence` automatically broadcasts the new presence if
any of the parameters changed.
"""
if self.client.established:
return self.client.enqueue(self.make_stanza()) | [
"def",
"resend_presence",
"(",
"self",
")",
":",
"if",
"self",
".",
"client",
".",
"established",
":",
"return",
"self",
".",
"client",
".",
"enqueue",
"(",
"self",
".",
"make_stanza",
"(",
")",
")"
] | 30.5 | 19.375 |
def get_extra_imts(self, imts):
"""
Returns the extra IMTs in the risk functions, i.e. the ones not in
the `imts` set (the set of IMTs for which there is hazard).
"""
extra_imts = set()
for taxonomy in self.taxonomies:
for (lt, kind), rf in self[taxonomy].risk_functions.items():
if rf.imt not in imts:
extra_imts.add(rf.imt)
return extra_imts | [
"def",
"get_extra_imts",
"(",
"self",
",",
"imts",
")",
":",
"extra_imts",
"=",
"set",
"(",
")",
"for",
"taxonomy",
"in",
"self",
".",
"taxonomies",
":",
"for",
"(",
"lt",
",",
"kind",
")",
",",
"rf",
"in",
"self",
"[",
"taxonomy",
"]",
".",
"risk_functions",
".",
"items",
"(",
")",
":",
"if",
"rf",
".",
"imt",
"not",
"in",
"imts",
":",
"extra_imts",
".",
"add",
"(",
"rf",
".",
"imt",
")",
"return",
"extra_imts"
] | 39.727273 | 12.272727 |
def yview(self, *args):
"""Update inplace widgets position when doing vertical scroll"""
self.after_idle(self.__updateWnds)
ttk.Treeview.yview(self, *args) | [
"def",
"yview",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"after_idle",
"(",
"self",
".",
"__updateWnds",
")",
"ttk",
".",
"Treeview",
".",
"yview",
"(",
"self",
",",
"*",
"args",
")"
] | 44 | 5 |
def concat(self, tailvec):
'''Returns the result of concatenating tailvec to the implicit
parameter'''
newvec = ImmutableVector()
vallist = [(i + self._length, tailvec[i]) \
for i in range(0, tailvec._length)]
newvec.tree = self.tree.multi_assoc(vallist)
newvec._length = self._length + tailvec._length
return newvec | [
"def",
"concat",
"(",
"self",
",",
"tailvec",
")",
":",
"newvec",
"=",
"ImmutableVector",
"(",
")",
"vallist",
"=",
"[",
"(",
"i",
"+",
"self",
".",
"_length",
",",
"tailvec",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"tailvec",
".",
"_length",
")",
"]",
"newvec",
".",
"tree",
"=",
"self",
".",
"tree",
".",
"multi_assoc",
"(",
"vallist",
")",
"newvec",
".",
"_length",
"=",
"self",
".",
"_length",
"+",
"tailvec",
".",
"_length",
"return",
"newvec"
] | 42.222222 | 15.333333 |
def _enumerator(opener, entry_cls, format_code=None, filter_code=None):
"""Return an archive enumerator from a user-defined source, using a user-
defined entry type.
"""
archive_res = _archive_read_new()
try:
r = _set_read_context(archive_res, format_code, filter_code)
opener(archive_res)
def it():
while 1:
with _archive_read_next_header(archive_res) as entry_res:
if entry_res is None:
break
e = entry_cls(archive_res, entry_res)
yield e
if e.is_consumed is False:
_archive_read_data_skip(archive_res)
yield it()
finally:
_archive_read_free(archive_res) | [
"def",
"_enumerator",
"(",
"opener",
",",
"entry_cls",
",",
"format_code",
"=",
"None",
",",
"filter_code",
"=",
"None",
")",
":",
"archive_res",
"=",
"_archive_read_new",
"(",
")",
"try",
":",
"r",
"=",
"_set_read_context",
"(",
"archive_res",
",",
"format_code",
",",
"filter_code",
")",
"opener",
"(",
"archive_res",
")",
"def",
"it",
"(",
")",
":",
"while",
"1",
":",
"with",
"_archive_read_next_header",
"(",
"archive_res",
")",
"as",
"entry_res",
":",
"if",
"entry_res",
"is",
"None",
":",
"break",
"e",
"=",
"entry_cls",
"(",
"archive_res",
",",
"entry_res",
")",
"yield",
"e",
"if",
"e",
".",
"is_consumed",
"is",
"False",
":",
"_archive_read_data_skip",
"(",
"archive_res",
")",
"yield",
"it",
"(",
")",
"finally",
":",
"_archive_read_free",
"(",
"archive_res",
")"
] | 31.541667 | 19.958333 |
def load_secret(self, secret):
"""
Ask YubiHSM to load a pre-existing YubiKey secret.
The data is stored internally in the YubiHSM in temporary memory -
this operation would typically be followed by one or more L{generate_aead}
commands to actually retreive the generated secret (in encrypted form).
@param secret: YubiKey secret to load
@type secret: L{pyhsm.aead_cmd.YHSM_YubiKeySecret} or string
@returns: Number of bytes in YubiHSM internal buffer after load
@rtype: integer
@see: L{pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load}
"""
if isinstance(secret, pyhsm.aead_cmd.YHSM_YubiKeySecret):
secret = secret.pack()
return pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load(self.stick, secret).execute() | [
"def",
"load_secret",
"(",
"self",
",",
"secret",
")",
":",
"if",
"isinstance",
"(",
"secret",
",",
"pyhsm",
".",
"aead_cmd",
".",
"YHSM_YubiKeySecret",
")",
":",
"secret",
"=",
"secret",
".",
"pack",
"(",
")",
"return",
"pyhsm",
".",
"buffer_cmd",
".",
"YHSM_Cmd_Buffer_Load",
"(",
"self",
".",
"stick",
",",
"secret",
")",
".",
"execute",
"(",
")"
] | 41.421053 | 24.789474 |
def _replay_index(replay_dir):
"""Output information for a directory of replays."""
run_config = run_configs.get()
replay_dir = run_config.abs_replay_path(replay_dir)
print("Checking: ", replay_dir)
with run_config.start(want_rgb=False) as controller:
print("-" * 60)
print(",".join((
"filename",
"build",
"map_name",
"game_duration_loops",
"players",
"P1-outcome",
"P1-race",
"P1-apm",
"P2-race",
"P2-apm",
)))
try:
bad_replays = []
for file_path in run_config.replay_paths(replay_dir):
file_name = os.path.basename(file_path)
try:
info = controller.replay_info(run_config.replay_data(file_path))
except remote_controller.RequestError as e:
bad_replays.append("%s: %s" % (file_name, e))
continue
if info.HasField("error"):
print("failed:", file_name, info.error, info.error_details)
bad_replays.append(file_name)
else:
out = [
file_name,
info.base_build,
info.map_name,
info.game_duration_loops,
len(info.player_info),
sc_pb.Result.Name(info.player_info[0].player_result.result),
sc_common.Race.Name(info.player_info[0].player_info.race_actual),
info.player_info[0].player_apm,
]
if len(info.player_info) >= 2:
out += [
sc_common.Race.Name(
info.player_info[1].player_info.race_actual),
info.player_info[1].player_apm,
]
print(u",".join(str(s) for s in out))
except KeyboardInterrupt:
pass
finally:
if bad_replays:
print("\n")
print("Replays with errors:")
print("\n".join(bad_replays)) | [
"def",
"_replay_index",
"(",
"replay_dir",
")",
":",
"run_config",
"=",
"run_configs",
".",
"get",
"(",
")",
"replay_dir",
"=",
"run_config",
".",
"abs_replay_path",
"(",
"replay_dir",
")",
"print",
"(",
"\"Checking: \"",
",",
"replay_dir",
")",
"with",
"run_config",
".",
"start",
"(",
"want_rgb",
"=",
"False",
")",
"as",
"controller",
":",
"print",
"(",
"\"-\"",
"*",
"60",
")",
"print",
"(",
"\",\"",
".",
"join",
"(",
"(",
"\"filename\"",
",",
"\"build\"",
",",
"\"map_name\"",
",",
"\"game_duration_loops\"",
",",
"\"players\"",
",",
"\"P1-outcome\"",
",",
"\"P1-race\"",
",",
"\"P1-apm\"",
",",
"\"P2-race\"",
",",
"\"P2-apm\"",
",",
")",
")",
")",
"try",
":",
"bad_replays",
"=",
"[",
"]",
"for",
"file_path",
"in",
"run_config",
".",
"replay_paths",
"(",
"replay_dir",
")",
":",
"file_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"file_path",
")",
"try",
":",
"info",
"=",
"controller",
".",
"replay_info",
"(",
"run_config",
".",
"replay_data",
"(",
"file_path",
")",
")",
"except",
"remote_controller",
".",
"RequestError",
"as",
"e",
":",
"bad_replays",
".",
"append",
"(",
"\"%s: %s\"",
"%",
"(",
"file_name",
",",
"e",
")",
")",
"continue",
"if",
"info",
".",
"HasField",
"(",
"\"error\"",
")",
":",
"print",
"(",
"\"failed:\"",
",",
"file_name",
",",
"info",
".",
"error",
",",
"info",
".",
"error_details",
")",
"bad_replays",
".",
"append",
"(",
"file_name",
")",
"else",
":",
"out",
"=",
"[",
"file_name",
",",
"info",
".",
"base_build",
",",
"info",
".",
"map_name",
",",
"info",
".",
"game_duration_loops",
",",
"len",
"(",
"info",
".",
"player_info",
")",
",",
"sc_pb",
".",
"Result",
".",
"Name",
"(",
"info",
".",
"player_info",
"[",
"0",
"]",
".",
"player_result",
".",
"result",
")",
",",
"sc_common",
".",
"Race",
".",
"Name",
"(",
"info",
".",
"player_info",
"[",
"0",
"]",
".",
"player_info",
".",
"race_actual",
")",
",",
"info",
".",
"player_info",
"[",
"0",
"]",
".",
"player_apm",
",",
"]",
"if",
"len",
"(",
"info",
".",
"player_info",
")",
">=",
"2",
":",
"out",
"+=",
"[",
"sc_common",
".",
"Race",
".",
"Name",
"(",
"info",
".",
"player_info",
"[",
"1",
"]",
".",
"player_info",
".",
"race_actual",
")",
",",
"info",
".",
"player_info",
"[",
"1",
"]",
".",
"player_apm",
",",
"]",
"print",
"(",
"u\",\"",
".",
"join",
"(",
"str",
"(",
"s",
")",
"for",
"s",
"in",
"out",
")",
")",
"except",
"KeyboardInterrupt",
":",
"pass",
"finally",
":",
"if",
"bad_replays",
":",
"print",
"(",
"\"\\n\"",
")",
"print",
"(",
"\"Replays with errors:\"",
")",
"print",
"(",
"\"\\n\"",
".",
"join",
"(",
"bad_replays",
")",
")"
] | 31.241379 | 17.931034 |
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False | [
"def",
"has_membership",
"(",
"self",
",",
"user",
",",
"role",
")",
":",
"targetRecord",
"=",
"AuthMembership",
".",
"objects",
"(",
"creator",
"=",
"self",
".",
"client",
",",
"user",
"=",
"user",
")",
".",
"first",
"(",
")",
"if",
"targetRecord",
":",
"return",
"role",
"in",
"[",
"i",
".",
"role",
"for",
"i",
"in",
"targetRecord",
".",
"groups",
"]",
"return",
"False"
] | 46.666667 | 18 |
def parse_tstv_by_qual(self):
""" Create the HTML for the TsTv by quality linegraph plot. """
self.vcftools_tstv_by_qual = dict()
for f in self.find_log_files('vcftools/tstv_by_qual', filehandles=True):
d = {}
for line in f['f'].readlines()[1:]: # don't add the header line (first row)
key = float(line.split()[0]) # taking the first column (QUAL_THRESHOLD) as key
val = float(line.split()[6]) # taking Ts/Tv_GT_QUAL_THRESHOLD as value
if (val == float('inf')) or (val == float('-inf')):
val = float('nan')
d[key] = val
self.vcftools_tstv_by_qual[f['s_name']] = d
# Filter out ignored sample names
self.vcftools_tstv_by_qual = self.ignore_samples(self.vcftools_tstv_by_qual)
if len(self.vcftools_tstv_by_qual) == 0:
return 0
pconfig = {
'id': 'vcftools_tstv_by_qual',
'title': 'VCFTools: TsTv by Qual',
'ylab': 'TsTv Ratio',
'xlab': 'SNP Quality Threshold',
'xmin': 0,
'ymin': 0,
'smooth_points': 400, # this limits huge filesizes and prevents browser crashing
'smooth_points_sumcounts': False
}
helptext = '''
`Transition` is a purine-to-purine or pyrimidine-to-pyrimidine point mutations.
`Transversion` is a purine-to-pyrimidine or pyrimidine-to-purine point mutation.
`Quality` here is the Phred-scaled quality score as given in the QUAL column of VCF.
Note: only bi-allelic SNPs are used (multi-allelic sites and INDELs are skipped.)
Refer to Vcftools's manual (https://vcftools.github.io/man_latest.html) on `--TsTv-by-qual`
'''
self.add_section(
name = 'TsTv by Qual',
anchor = 'vcftools-tstv-by-qual',
description = "Plot of `TSTV-BY-QUAL` - the transition to transversion ratio as a function of SNP quality from the output of vcftools TsTv-by-qual.",
helptext = helptext,
plot = linegraph.plot(self.vcftools_tstv_by_qual,pconfig)
)
return len(self.vcftools_tstv_by_qual) | [
"def",
"parse_tstv_by_qual",
"(",
"self",
")",
":",
"self",
".",
"vcftools_tstv_by_qual",
"=",
"dict",
"(",
")",
"for",
"f",
"in",
"self",
".",
"find_log_files",
"(",
"'vcftools/tstv_by_qual'",
",",
"filehandles",
"=",
"True",
")",
":",
"d",
"=",
"{",
"}",
"for",
"line",
"in",
"f",
"[",
"'f'",
"]",
".",
"readlines",
"(",
")",
"[",
"1",
":",
"]",
":",
"# don't add the header line (first row)",
"key",
"=",
"float",
"(",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"# taking the first column (QUAL_THRESHOLD) as key",
"val",
"=",
"float",
"(",
"line",
".",
"split",
"(",
")",
"[",
"6",
"]",
")",
"# taking Ts/Tv_GT_QUAL_THRESHOLD as value",
"if",
"(",
"val",
"==",
"float",
"(",
"'inf'",
")",
")",
"or",
"(",
"val",
"==",
"float",
"(",
"'-inf'",
")",
")",
":",
"val",
"=",
"float",
"(",
"'nan'",
")",
"d",
"[",
"key",
"]",
"=",
"val",
"self",
".",
"vcftools_tstv_by_qual",
"[",
"f",
"[",
"'s_name'",
"]",
"]",
"=",
"d",
"# Filter out ignored sample names",
"self",
".",
"vcftools_tstv_by_qual",
"=",
"self",
".",
"ignore_samples",
"(",
"self",
".",
"vcftools_tstv_by_qual",
")",
"if",
"len",
"(",
"self",
".",
"vcftools_tstv_by_qual",
")",
"==",
"0",
":",
"return",
"0",
"pconfig",
"=",
"{",
"'id'",
":",
"'vcftools_tstv_by_qual'",
",",
"'title'",
":",
"'VCFTools: TsTv by Qual'",
",",
"'ylab'",
":",
"'TsTv Ratio'",
",",
"'xlab'",
":",
"'SNP Quality Threshold'",
",",
"'xmin'",
":",
"0",
",",
"'ymin'",
":",
"0",
",",
"'smooth_points'",
":",
"400",
",",
"# this limits huge filesizes and prevents browser crashing",
"'smooth_points_sumcounts'",
":",
"False",
"}",
"helptext",
"=",
"'''\n `Transition` is a purine-to-purine or pyrimidine-to-pyrimidine point mutations.\n `Transversion` is a purine-to-pyrimidine or pyrimidine-to-purine point mutation.\n `Quality` here is the Phred-scaled quality score as given in the QUAL column of VCF.\n Note: only bi-allelic SNPs are used (multi-allelic sites and INDELs are skipped.)\n Refer to Vcftools's manual (https://vcftools.github.io/man_latest.html) on `--TsTv-by-qual`\n '''",
"self",
".",
"add_section",
"(",
"name",
"=",
"'TsTv by Qual'",
",",
"anchor",
"=",
"'vcftools-tstv-by-qual'",
",",
"description",
"=",
"\"Plot of `TSTV-BY-QUAL` - the transition to transversion ratio as a function of SNP quality from the output of vcftools TsTv-by-qual.\"",
",",
"helptext",
"=",
"helptext",
",",
"plot",
"=",
"linegraph",
".",
"plot",
"(",
"self",
".",
"vcftools_tstv_by_qual",
",",
"pconfig",
")",
")",
"return",
"len",
"(",
"self",
".",
"vcftools_tstv_by_qual",
")"
] | 45.229167 | 27.458333 |
def rolling_performances(self, timestamp='one_month'):
''' Filters self.perfs '''
# TODO Study the impact of month choice
# TODO Check timestamp in an enumeration
# TODO Implement other benchmarks for perf computation
# (zipline issue, maybe expected)
if self.metrics:
perfs = {}
length = range(len(self.metrics[timestamp]))
index = self._get_index(self.metrics[timestamp])
perf_keys = self.metrics[timestamp][0].keys()
perf_keys.pop(perf_keys.index('period_label'))
perfs['period'] = np.array(
[pd.datetime.date(date) for date in index])
for key in perf_keys:
perfs[key] = self._to_perf_array(timestamp, key, length)
else:
# TODO Get it from DB if it exists
raise NotImplementedError()
return pd.DataFrame(perfs, index=index) | [
"def",
"rolling_performances",
"(",
"self",
",",
"timestamp",
"=",
"'one_month'",
")",
":",
"# TODO Study the impact of month choice",
"# TODO Check timestamp in an enumeration",
"# TODO Implement other benchmarks for perf computation",
"# (zipline issue, maybe expected)",
"if",
"self",
".",
"metrics",
":",
"perfs",
"=",
"{",
"}",
"length",
"=",
"range",
"(",
"len",
"(",
"self",
".",
"metrics",
"[",
"timestamp",
"]",
")",
")",
"index",
"=",
"self",
".",
"_get_index",
"(",
"self",
".",
"metrics",
"[",
"timestamp",
"]",
")",
"perf_keys",
"=",
"self",
".",
"metrics",
"[",
"timestamp",
"]",
"[",
"0",
"]",
".",
"keys",
"(",
")",
"perf_keys",
".",
"pop",
"(",
"perf_keys",
".",
"index",
"(",
"'period_label'",
")",
")",
"perfs",
"[",
"'period'",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"pd",
".",
"datetime",
".",
"date",
"(",
"date",
")",
"for",
"date",
"in",
"index",
"]",
")",
"for",
"key",
"in",
"perf_keys",
":",
"perfs",
"[",
"key",
"]",
"=",
"self",
".",
"_to_perf_array",
"(",
"timestamp",
",",
"key",
",",
"length",
")",
"else",
":",
"# TODO Get it from DB if it exists",
"raise",
"NotImplementedError",
"(",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"perfs",
",",
"index",
"=",
"index",
")"
] | 39.608696 | 16.652174 |
def _dump_multipolygon(obj, big_endian, meta):
"""
Dump a GeoJSON-like `dict` to a multipolygon WKB string.
Input parameters and output are similar to :funct:`_dump_point`.
"""
coords = obj['coordinates']
vertex = coords[0][0][0]
num_dims = len(vertex)
wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder(
'MultiPolygon', num_dims, big_endian, meta
)
poly_type = _WKB[_INT_TO_DIM_LABEL.get(num_dims)]['Polygon']
if big_endian:
poly_type = BIG_ENDIAN + poly_type
else:
poly_type = LITTLE_ENDIAN + poly_type[::-1]
# apped the number of polygons
wkb_string += struct.pack('%sl' % byte_order, len(coords))
for polygon in coords:
# append polygon header
wkb_string += poly_type
# append the number of rings in this polygon
wkb_string += struct.pack('%sl' % byte_order, len(polygon))
for ring in polygon:
# append the number of vertices in this ring
wkb_string += struct.pack('%sl' % byte_order, len(ring))
for vertex in ring:
wkb_string += struct.pack(byte_fmt, *vertex)
return wkb_string | [
"def",
"_dump_multipolygon",
"(",
"obj",
",",
"big_endian",
",",
"meta",
")",
":",
"coords",
"=",
"obj",
"[",
"'coordinates'",
"]",
"vertex",
"=",
"coords",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"num_dims",
"=",
"len",
"(",
"vertex",
")",
"wkb_string",
",",
"byte_fmt",
",",
"byte_order",
"=",
"_header_bytefmt_byteorder",
"(",
"'MultiPolygon'",
",",
"num_dims",
",",
"big_endian",
",",
"meta",
")",
"poly_type",
"=",
"_WKB",
"[",
"_INT_TO_DIM_LABEL",
".",
"get",
"(",
"num_dims",
")",
"]",
"[",
"'Polygon'",
"]",
"if",
"big_endian",
":",
"poly_type",
"=",
"BIG_ENDIAN",
"+",
"poly_type",
"else",
":",
"poly_type",
"=",
"LITTLE_ENDIAN",
"+",
"poly_type",
"[",
":",
":",
"-",
"1",
"]",
"# apped the number of polygons",
"wkb_string",
"+=",
"struct",
".",
"pack",
"(",
"'%sl'",
"%",
"byte_order",
",",
"len",
"(",
"coords",
")",
")",
"for",
"polygon",
"in",
"coords",
":",
"# append polygon header",
"wkb_string",
"+=",
"poly_type",
"# append the number of rings in this polygon",
"wkb_string",
"+=",
"struct",
".",
"pack",
"(",
"'%sl'",
"%",
"byte_order",
",",
"len",
"(",
"polygon",
")",
")",
"for",
"ring",
"in",
"polygon",
":",
"# append the number of vertices in this ring",
"wkb_string",
"+=",
"struct",
".",
"pack",
"(",
"'%sl'",
"%",
"byte_order",
",",
"len",
"(",
"ring",
")",
")",
"for",
"vertex",
"in",
"ring",
":",
"wkb_string",
"+=",
"struct",
".",
"pack",
"(",
"byte_fmt",
",",
"*",
"vertex",
")",
"return",
"wkb_string"
] | 32.685714 | 19.771429 |
def monthly_cooling_design_days_020(self):
"""A list of 12 objects representing monthly 2.0% cooling design days."""
if self.monthly_found is False or self._monthly_db_20 == [] \
or self._monthly_wb_20 == []:
return []
else:
db_conds = [DryBulbCondition(x, y) for x, y in zip(
self._monthly_db_20, self._monthly_db_range_50)]
hu_conds = [HumidityCondition(
'Wetbulb', x, self._stand_press_at_elev) for x in self._monthly_wb_20]
ws_conds = self.monthly_wind_conditions
sky_conds = self.monthly_clear_sky_conditions
return [DesignDay(
'2% Cooling Design Day for {}'.format(self._months[i]),
'SummerDesignDay', self._location,
db_conds[i], hu_conds[i], ws_conds[i], sky_conds[i])
for i in xrange(12)] | [
"def",
"monthly_cooling_design_days_020",
"(",
"self",
")",
":",
"if",
"self",
".",
"monthly_found",
"is",
"False",
"or",
"self",
".",
"_monthly_db_20",
"==",
"[",
"]",
"or",
"self",
".",
"_monthly_wb_20",
"==",
"[",
"]",
":",
"return",
"[",
"]",
"else",
":",
"db_conds",
"=",
"[",
"DryBulbCondition",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"self",
".",
"_monthly_db_20",
",",
"self",
".",
"_monthly_db_range_50",
")",
"]",
"hu_conds",
"=",
"[",
"HumidityCondition",
"(",
"'Wetbulb'",
",",
"x",
",",
"self",
".",
"_stand_press_at_elev",
")",
"for",
"x",
"in",
"self",
".",
"_monthly_wb_20",
"]",
"ws_conds",
"=",
"self",
".",
"monthly_wind_conditions",
"sky_conds",
"=",
"self",
".",
"monthly_clear_sky_conditions",
"return",
"[",
"DesignDay",
"(",
"'2% Cooling Design Day for {}'",
".",
"format",
"(",
"self",
".",
"_months",
"[",
"i",
"]",
")",
",",
"'SummerDesignDay'",
",",
"self",
".",
"_location",
",",
"db_conds",
"[",
"i",
"]",
",",
"hu_conds",
"[",
"i",
"]",
",",
"ws_conds",
"[",
"i",
"]",
",",
"sky_conds",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"xrange",
"(",
"12",
")",
"]"
] | 53 | 16.235294 |
def filter_nremoved(self, filt=True, quiet=False):
"""
Report how many data are removed by the active filters.
"""
rminfo = {}
for n in self.subsets['All_Samples']:
s = self.data[n]
rminfo[n] = s.filt_nremoved(filt)
if not quiet:
maxL = max([len(s) for s in rminfo.keys()])
print('{string:{number}s}'.format(string='Sample ', number=maxL + 3) +
'{total:4s}'.format(total='tot') +
'{removed:4s}'.format(removed='flt') +
'{percent:4s}'.format(percent='%rm'))
for k, (ntot, nfilt, pcrm) in rminfo.items():
print('{string:{number}s}'.format(string=k, number=maxL + 3) +
'{total:4.0f}'.format(total=ntot) +
'{removed:4.0f}'.format(removed=nfilt) +
'{percent:4.0f}'.format(percent=pcrm))
return rminfo | [
"def",
"filter_nremoved",
"(",
"self",
",",
"filt",
"=",
"True",
",",
"quiet",
"=",
"False",
")",
":",
"rminfo",
"=",
"{",
"}",
"for",
"n",
"in",
"self",
".",
"subsets",
"[",
"'All_Samples'",
"]",
":",
"s",
"=",
"self",
".",
"data",
"[",
"n",
"]",
"rminfo",
"[",
"n",
"]",
"=",
"s",
".",
"filt_nremoved",
"(",
"filt",
")",
"if",
"not",
"quiet",
":",
"maxL",
"=",
"max",
"(",
"[",
"len",
"(",
"s",
")",
"for",
"s",
"in",
"rminfo",
".",
"keys",
"(",
")",
"]",
")",
"print",
"(",
"'{string:{number}s}'",
".",
"format",
"(",
"string",
"=",
"'Sample '",
",",
"number",
"=",
"maxL",
"+",
"3",
")",
"+",
"'{total:4s}'",
".",
"format",
"(",
"total",
"=",
"'tot'",
")",
"+",
"'{removed:4s}'",
".",
"format",
"(",
"removed",
"=",
"'flt'",
")",
"+",
"'{percent:4s}'",
".",
"format",
"(",
"percent",
"=",
"'%rm'",
")",
")",
"for",
"k",
",",
"(",
"ntot",
",",
"nfilt",
",",
"pcrm",
")",
"in",
"rminfo",
".",
"items",
"(",
")",
":",
"print",
"(",
"'{string:{number}s}'",
".",
"format",
"(",
"string",
"=",
"k",
",",
"number",
"=",
"maxL",
"+",
"3",
")",
"+",
"'{total:4.0f}'",
".",
"format",
"(",
"total",
"=",
"ntot",
")",
"+",
"'{removed:4.0f}'",
".",
"format",
"(",
"removed",
"=",
"nfilt",
")",
"+",
"'{percent:4.0f}'",
".",
"format",
"(",
"percent",
"=",
"pcrm",
")",
")",
"return",
"rminfo"
] | 44.190476 | 17.52381 |
def dec(data, **kwargs):
'''
Alias to `{box_type}_decrypt`
box_type: secretbox, sealedbox(default)
'''
kwargs['opts'] = __opts__
return salt.utils.nacl.dec(data, **kwargs) | [
"def",
"dec",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'opts'",
"]",
"=",
"__opts__",
"return",
"salt",
".",
"utils",
".",
"nacl",
".",
"dec",
"(",
"data",
",",
"*",
"*",
"kwargs",
")"
] | 23.625 | 18.625 |
def decode_timeseries(self, resp, tsobj,
convert_timestamp=False):
"""
Fills an TsObject with the appropriate data and
metadata from a TsGetResp / TsQueryResp.
:param resp: the protobuf message from which to process data
:type resp: riak.pb.riak_ts_pb2.TsQueryRsp or
riak.pb.riak_ts_pb2.TsGetResp
:param tsobj: a TsObject
:type tsobj: TsObject
:param convert_timestamp: Convert timestamps to datetime objects
:type tsobj: boolean
"""
if resp.columns is not None:
col_names = []
col_types = []
for col in resp.columns:
col_names.append(bytes_to_str(col.name))
col_type = self.decode_timeseries_col_type(col.type)
col_types.append(col_type)
tsobj.columns = TsColumns(col_names, col_types)
tsobj.rows = []
if resp.rows is not None:
for row in resp.rows:
tsobj.rows.append(
self.decode_timeseries_row(
row, resp.columns, convert_timestamp)) | [
"def",
"decode_timeseries",
"(",
"self",
",",
"resp",
",",
"tsobj",
",",
"convert_timestamp",
"=",
"False",
")",
":",
"if",
"resp",
".",
"columns",
"is",
"not",
"None",
":",
"col_names",
"=",
"[",
"]",
"col_types",
"=",
"[",
"]",
"for",
"col",
"in",
"resp",
".",
"columns",
":",
"col_names",
".",
"append",
"(",
"bytes_to_str",
"(",
"col",
".",
"name",
")",
")",
"col_type",
"=",
"self",
".",
"decode_timeseries_col_type",
"(",
"col",
".",
"type",
")",
"col_types",
".",
"append",
"(",
"col_type",
")",
"tsobj",
".",
"columns",
"=",
"TsColumns",
"(",
"col_names",
",",
"col_types",
")",
"tsobj",
".",
"rows",
"=",
"[",
"]",
"if",
"resp",
".",
"rows",
"is",
"not",
"None",
":",
"for",
"row",
"in",
"resp",
".",
"rows",
":",
"tsobj",
".",
"rows",
".",
"append",
"(",
"self",
".",
"decode_timeseries_row",
"(",
"row",
",",
"resp",
".",
"columns",
",",
"convert_timestamp",
")",
")"
] | 38.896552 | 13.586207 |
def DbImportEvent(self, argin):
""" Get event channel info from database
:param argin: name of event channel or factory
:type: tango.DevString
:return: export information e.g. IOR
:rtype: tango.DevVarLongStringArray """
self._log.debug("In DbImportEvent()")
argin = replace_wildcard(argin.lower())
return self.db.import_event(argin) | [
"def",
"DbImportEvent",
"(",
"self",
",",
"argin",
")",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"In DbImportEvent()\"",
")",
"argin",
"=",
"replace_wildcard",
"(",
"argin",
".",
"lower",
"(",
")",
")",
"return",
"self",
".",
"db",
".",
"import_event",
"(",
"argin",
")"
] | 38.8 | 9.1 |
def _renderResource(resource, request):
"""
Render a given resource.
See `IResource.render <twisted:twisted.web.resource.IResource.render>`.
"""
meth = getattr(resource, 'render_' + nativeString(request.method), None)
if meth is None:
try:
allowedMethods = resource.allowedMethods
except AttributeError:
allowedMethods = _computeAllowedMethods(resource)
raise UnsupportedMethod(allowedMethods)
return meth(request) | [
"def",
"_renderResource",
"(",
"resource",
",",
"request",
")",
":",
"meth",
"=",
"getattr",
"(",
"resource",
",",
"'render_'",
"+",
"nativeString",
"(",
"request",
".",
"method",
")",
",",
"None",
")",
"if",
"meth",
"is",
"None",
":",
"try",
":",
"allowedMethods",
"=",
"resource",
".",
"allowedMethods",
"except",
"AttributeError",
":",
"allowedMethods",
"=",
"_computeAllowedMethods",
"(",
"resource",
")",
"raise",
"UnsupportedMethod",
"(",
"allowedMethods",
")",
"return",
"meth",
"(",
"request",
")"
] | 34.142857 | 17 |
def seqToKV(seq, strict=False):
"""Represent a sequence of pairs of strings as newline-terminated
key:value pairs. The pairs are generated in the order given.
@param seq: The pairs
@type seq: [(str, (unicode|str))]
@return: A string representation of the sequence
@rtype: str
"""
def err(msg):
formatted = 'seqToKV warning: %s: %r' % (msg, seq)
if strict:
raise KVFormError(formatted)
else:
logging.warn(formatted)
lines = []
for k, v in seq:
if isinstance(k, types.StringType):
k = k.decode('UTF8')
elif not isinstance(k, types.UnicodeType):
err('Converting key to string: %r' % k)
k = str(k)
if '\n' in k:
raise KVFormError(
'Invalid input for seqToKV: key contains newline: %r' % (k,))
if ':' in k:
raise KVFormError(
'Invalid input for seqToKV: key contains colon: %r' % (k,))
if k.strip() != k:
err('Key has whitespace at beginning or end: %r' % (k,))
if isinstance(v, types.StringType):
v = v.decode('UTF8')
elif not isinstance(v, types.UnicodeType):
err('Converting value to string: %r' % (v,))
v = str(v)
if '\n' in v:
raise KVFormError(
'Invalid input for seqToKV: value contains newline: %r' % (v,))
if v.strip() != v:
err('Value has whitespace at beginning or end: %r' % (v,))
lines.append(k + ':' + v + '\n')
return ''.join(lines).encode('UTF8') | [
"def",
"seqToKV",
"(",
"seq",
",",
"strict",
"=",
"False",
")",
":",
"def",
"err",
"(",
"msg",
")",
":",
"formatted",
"=",
"'seqToKV warning: %s: %r'",
"%",
"(",
"msg",
",",
"seq",
")",
"if",
"strict",
":",
"raise",
"KVFormError",
"(",
"formatted",
")",
"else",
":",
"logging",
".",
"warn",
"(",
"formatted",
")",
"lines",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"seq",
":",
"if",
"isinstance",
"(",
"k",
",",
"types",
".",
"StringType",
")",
":",
"k",
"=",
"k",
".",
"decode",
"(",
"'UTF8'",
")",
"elif",
"not",
"isinstance",
"(",
"k",
",",
"types",
".",
"UnicodeType",
")",
":",
"err",
"(",
"'Converting key to string: %r'",
"%",
"k",
")",
"k",
"=",
"str",
"(",
"k",
")",
"if",
"'\\n'",
"in",
"k",
":",
"raise",
"KVFormError",
"(",
"'Invalid input for seqToKV: key contains newline: %r'",
"%",
"(",
"k",
",",
")",
")",
"if",
"':'",
"in",
"k",
":",
"raise",
"KVFormError",
"(",
"'Invalid input for seqToKV: key contains colon: %r'",
"%",
"(",
"k",
",",
")",
")",
"if",
"k",
".",
"strip",
"(",
")",
"!=",
"k",
":",
"err",
"(",
"'Key has whitespace at beginning or end: %r'",
"%",
"(",
"k",
",",
")",
")",
"if",
"isinstance",
"(",
"v",
",",
"types",
".",
"StringType",
")",
":",
"v",
"=",
"v",
".",
"decode",
"(",
"'UTF8'",
")",
"elif",
"not",
"isinstance",
"(",
"v",
",",
"types",
".",
"UnicodeType",
")",
":",
"err",
"(",
"'Converting value to string: %r'",
"%",
"(",
"v",
",",
")",
")",
"v",
"=",
"str",
"(",
"v",
")",
"if",
"'\\n'",
"in",
"v",
":",
"raise",
"KVFormError",
"(",
"'Invalid input for seqToKV: value contains newline: %r'",
"%",
"(",
"v",
",",
")",
")",
"if",
"v",
".",
"strip",
"(",
")",
"!=",
"v",
":",
"err",
"(",
"'Value has whitespace at beginning or end: %r'",
"%",
"(",
"v",
",",
")",
")",
"lines",
".",
"append",
"(",
"k",
"+",
"':'",
"+",
"v",
"+",
"'\\n'",
")",
"return",
"''",
".",
"join",
"(",
"lines",
")",
".",
"encode",
"(",
"'UTF8'",
")"
] | 30.173077 | 20.365385 |
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', None)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options += [(k, 1) for k in FASTCGI_OPTIONS]
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear',
'sqlcustom', 'sqlindexes', 'sqlsequencereset', 'test'):
try:
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options += [(app_config.label, 0) for app_config in app_configs]
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
parser = subcommand_cls.create_parser('', cwords[0])
if subcommand_cls.use_argparse:
options += [(sorted(s_opt.option_strings)[0], s_opt.nargs != 0) for s_opt in
parser._actions if s_opt.option_strings]
else:
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in
parser.option_list]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1) | [
"def",
"autocomplete",
"(",
"self",
")",
":",
"# Don't complete if user hasn't sourced bash_completion file.",
"if",
"'DJANGO_AUTO_COMPLETE'",
"not",
"in",
"os",
".",
"environ",
":",
"return",
"cwords",
"=",
"os",
".",
"environ",
"[",
"'COMP_WORDS'",
"]",
".",
"split",
"(",
")",
"[",
"1",
":",
"]",
"cword",
"=",
"int",
"(",
"os",
".",
"environ",
"[",
"'COMP_CWORD'",
"]",
")",
"try",
":",
"curr",
"=",
"cwords",
"[",
"cword",
"-",
"1",
"]",
"except",
"IndexError",
":",
"curr",
"=",
"''",
"subcommands",
"=",
"list",
"(",
"get_commands",
"(",
")",
")",
"+",
"[",
"'help'",
"]",
"options",
"=",
"[",
"(",
"'--help'",
",",
"None",
")",
"]",
"# subcommand",
"if",
"cword",
"==",
"1",
":",
"print",
"(",
"' '",
".",
"join",
"(",
"sorted",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"startswith",
"(",
"curr",
")",
",",
"subcommands",
")",
")",
")",
")",
"# subcommand options",
"# special case: the 'help' subcommand has no options",
"elif",
"cwords",
"[",
"0",
"]",
"in",
"subcommands",
"and",
"cwords",
"[",
"0",
"]",
"!=",
"'help'",
":",
"subcommand_cls",
"=",
"self",
".",
"fetch_command",
"(",
"cwords",
"[",
"0",
"]",
")",
"# special case: 'runfcgi' stores additional options as",
"# 'key=value' pairs",
"if",
"cwords",
"[",
"0",
"]",
"==",
"'runfcgi'",
":",
"from",
"django",
".",
"core",
".",
"servers",
".",
"fastcgi",
"import",
"FASTCGI_OPTIONS",
"options",
"+=",
"[",
"(",
"k",
",",
"1",
")",
"for",
"k",
"in",
"FASTCGI_OPTIONS",
"]",
"# special case: add the names of installed apps to options",
"elif",
"cwords",
"[",
"0",
"]",
"in",
"(",
"'dumpdata'",
",",
"'sql'",
",",
"'sqlall'",
",",
"'sqlclear'",
",",
"'sqlcustom'",
",",
"'sqlindexes'",
",",
"'sqlsequencereset'",
",",
"'test'",
")",
":",
"try",
":",
"app_configs",
"=",
"apps",
".",
"get_app_configs",
"(",
")",
"# Get the last part of the dotted path as the app name.",
"options",
"+=",
"[",
"(",
"app_config",
".",
"label",
",",
"0",
")",
"for",
"app_config",
"in",
"app_configs",
"]",
"except",
"ImportError",
":",
"# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The",
"# user will find out once they execute the command.",
"pass",
"parser",
"=",
"subcommand_cls",
".",
"create_parser",
"(",
"''",
",",
"cwords",
"[",
"0",
"]",
")",
"if",
"subcommand_cls",
".",
"use_argparse",
":",
"options",
"+=",
"[",
"(",
"sorted",
"(",
"s_opt",
".",
"option_strings",
")",
"[",
"0",
"]",
",",
"s_opt",
".",
"nargs",
"!=",
"0",
")",
"for",
"s_opt",
"in",
"parser",
".",
"_actions",
"if",
"s_opt",
".",
"option_strings",
"]",
"else",
":",
"options",
"+=",
"[",
"(",
"s_opt",
".",
"get_opt_string",
"(",
")",
",",
"s_opt",
".",
"nargs",
")",
"for",
"s_opt",
"in",
"parser",
".",
"option_list",
"]",
"# filter out previously specified options from available options",
"prev_opts",
"=",
"[",
"x",
".",
"split",
"(",
"'='",
")",
"[",
"0",
"]",
"for",
"x",
"in",
"cwords",
"[",
"1",
":",
"cword",
"-",
"1",
"]",
"]",
"options",
"=",
"[",
"opt",
"for",
"opt",
"in",
"options",
"if",
"opt",
"[",
"0",
"]",
"not",
"in",
"prev_opts",
"]",
"# filter options by current input",
"options",
"=",
"sorted",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"options",
"if",
"k",
".",
"startswith",
"(",
"curr",
")",
")",
"for",
"option",
"in",
"options",
":",
"opt_label",
"=",
"option",
"[",
"0",
"]",
"# append '=' to options which require args",
"if",
"option",
"[",
"1",
"]",
":",
"opt_label",
"+=",
"'='",
"print",
"(",
"opt_label",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | 46.278481 | 23.392405 |
async def home_z(self, mount: top_types.Mount = None):
""" Home the two z-axes """
if not mount:
axes = [Axis.Z, Axis.A]
else:
axes = [Axis.by_mount(mount)]
await self.home(axes) | [
"async",
"def",
"home_z",
"(",
"self",
",",
"mount",
":",
"top_types",
".",
"Mount",
"=",
"None",
")",
":",
"if",
"not",
"mount",
":",
"axes",
"=",
"[",
"Axis",
".",
"Z",
",",
"Axis",
".",
"A",
"]",
"else",
":",
"axes",
"=",
"[",
"Axis",
".",
"by_mount",
"(",
"mount",
")",
"]",
"await",
"self",
".",
"home",
"(",
"axes",
")"
] | 32.571429 | 11 |
def snapshot(opts):
"""snapshot a seqrepo data directory by hardlinking sequence files,
copying sqlite databases, and remove write permissions from directories
"""
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
dst_dir = opts.destination_name
if not dst_dir.startswith("/"):
# interpret dst_dir as relative to parent dir of seqrepo_dir
dst_dir = os.path.join(opts.root_directory, dst_dir)
src_dir = os.path.realpath(seqrepo_dir)
dst_dir = os.path.realpath(dst_dir)
if commonpath([src_dir, dst_dir]).startswith(src_dir):
raise RuntimeError("Cannot nest seqrepo directories " "({} is within {})".format(dst_dir, src_dir))
if os.path.exists(dst_dir):
raise IOError(dst_dir + ": File exists")
tmp_dir = tempfile.mkdtemp(prefix=dst_dir + ".")
_logger.debug("src_dir = " + src_dir)
_logger.debug("dst_dir = " + dst_dir)
_logger.debug("tmp_dir = " + tmp_dir)
# TODO: cleanup of tmpdir on failure
makedirs(tmp_dir, exist_ok=True)
wd = os.getcwd()
os.chdir(src_dir)
# make destination directories (walk is top-down)
for rp in (os.path.join(dirpath, dirname) for dirpath, dirnames, _ in os.walk(".") for dirname in dirnames):
dp = os.path.join(tmp_dir, rp)
os.mkdir(dp)
# hard link sequence files
for rp in (os.path.join(dirpath, filename) for dirpath, _, filenames in os.walk(".") for filename in filenames
if ".bgz" in filename):
dp = os.path.join(tmp_dir, rp)
os.link(rp, dp)
# copy sqlite databases
for rp in ["aliases.sqlite3", "sequences/db.sqlite3"]:
dp = os.path.join(tmp_dir, rp)
shutil.copyfile(rp, dp)
# recursively drop write perms on snapshot
mode_aw = stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
def _drop_write(p):
mode = os.lstat(p).st_mode
new_mode = mode & ~mode_aw
os.chmod(p, new_mode)
for dp in (os.path.join(dirpath, dirent)
for dirpath, dirnames, filenames in os.walk(tmp_dir) for dirent in dirnames + filenames):
_drop_write(dp)
_drop_write(tmp_dir)
os.rename(tmp_dir, dst_dir)
_logger.info("snapshot created in " + dst_dir)
os.chdir(wd) | [
"def",
"snapshot",
"(",
"opts",
")",
":",
"seqrepo_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"opts",
".",
"root_directory",
",",
"opts",
".",
"instance_name",
")",
"dst_dir",
"=",
"opts",
".",
"destination_name",
"if",
"not",
"dst_dir",
".",
"startswith",
"(",
"\"/\"",
")",
":",
"# interpret dst_dir as relative to parent dir of seqrepo_dir",
"dst_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"opts",
".",
"root_directory",
",",
"dst_dir",
")",
"src_dir",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"seqrepo_dir",
")",
"dst_dir",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"dst_dir",
")",
"if",
"commonpath",
"(",
"[",
"src_dir",
",",
"dst_dir",
"]",
")",
".",
"startswith",
"(",
"src_dir",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot nest seqrepo directories \"",
"\"({} is within {})\"",
".",
"format",
"(",
"dst_dir",
",",
"src_dir",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dst_dir",
")",
":",
"raise",
"IOError",
"(",
"dst_dir",
"+",
"\": File exists\"",
")",
"tmp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"prefix",
"=",
"dst_dir",
"+",
"\".\"",
")",
"_logger",
".",
"debug",
"(",
"\"src_dir = \"",
"+",
"src_dir",
")",
"_logger",
".",
"debug",
"(",
"\"dst_dir = \"",
"+",
"dst_dir",
")",
"_logger",
".",
"debug",
"(",
"\"tmp_dir = \"",
"+",
"tmp_dir",
")",
"# TODO: cleanup of tmpdir on failure",
"makedirs",
"(",
"tmp_dir",
",",
"exist_ok",
"=",
"True",
")",
"wd",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"src_dir",
")",
"# make destination directories (walk is top-down)",
"for",
"rp",
"in",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"dirname",
")",
"for",
"dirpath",
",",
"dirnames",
",",
"_",
"in",
"os",
".",
"walk",
"(",
"\".\"",
")",
"for",
"dirname",
"in",
"dirnames",
")",
":",
"dp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"rp",
")",
"os",
".",
"mkdir",
"(",
"dp",
")",
"# hard link sequence files",
"for",
"rp",
"in",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"filename",
")",
"for",
"dirpath",
",",
"_",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"\".\"",
")",
"for",
"filename",
"in",
"filenames",
"if",
"\".bgz\"",
"in",
"filename",
")",
":",
"dp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"rp",
")",
"os",
".",
"link",
"(",
"rp",
",",
"dp",
")",
"# copy sqlite databases",
"for",
"rp",
"in",
"[",
"\"aliases.sqlite3\"",
",",
"\"sequences/db.sqlite3\"",
"]",
":",
"dp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"rp",
")",
"shutil",
".",
"copyfile",
"(",
"rp",
",",
"dp",
")",
"# recursively drop write perms on snapshot",
"mode_aw",
"=",
"stat",
".",
"S_IWUSR",
"|",
"stat",
".",
"S_IWGRP",
"|",
"stat",
".",
"S_IWOTH",
"def",
"_drop_write",
"(",
"p",
")",
":",
"mode",
"=",
"os",
".",
"lstat",
"(",
"p",
")",
".",
"st_mode",
"new_mode",
"=",
"mode",
"&",
"~",
"mode_aw",
"os",
".",
"chmod",
"(",
"p",
",",
"new_mode",
")",
"for",
"dp",
"in",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"dirent",
")",
"for",
"dirpath",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"tmp_dir",
")",
"for",
"dirent",
"in",
"dirnames",
"+",
"filenames",
")",
":",
"_drop_write",
"(",
"dp",
")",
"_drop_write",
"(",
"tmp_dir",
")",
"os",
".",
"rename",
"(",
"tmp_dir",
",",
"dst_dir",
")",
"_logger",
".",
"info",
"(",
"\"snapshot created in \"",
"+",
"dst_dir",
")",
"os",
".",
"chdir",
"(",
"wd",
")"
] | 34.265625 | 21.390625 |
def run_query(ont, aset, args):
"""
Basic querying by positive/negative class lists
"""
subjects = aset.query(args.query, args.negative)
for s in subjects:
print("{} {}".format(s, str(aset.label(s))))
if args.plot:
import plotly.plotly as py
import plotly.graph_objs as go
tups = aset.query_associations(subjects=subjects)
z, xaxis, yaxis = tuple_to_matrix(tups)
spacechar = " "
xaxis = mk_axis(xaxis, aset, args, spacechar=" ")
yaxis = mk_axis(yaxis, aset, args, spacechar=" ")
logging.info("PLOTTING: {} x {} = {}".format(xaxis, yaxis, z))
trace = go.Heatmap(z=z,
x=xaxis,
y=yaxis)
data=[trace]
py.plot(data, filename='labelled-heatmap') | [
"def",
"run_query",
"(",
"ont",
",",
"aset",
",",
"args",
")",
":",
"subjects",
"=",
"aset",
".",
"query",
"(",
"args",
".",
"query",
",",
"args",
".",
"negative",
")",
"for",
"s",
"in",
"subjects",
":",
"print",
"(",
"\"{} {}\"",
".",
"format",
"(",
"s",
",",
"str",
"(",
"aset",
".",
"label",
"(",
"s",
")",
")",
")",
")",
"if",
"args",
".",
"plot",
":",
"import",
"plotly",
".",
"plotly",
"as",
"py",
"import",
"plotly",
".",
"graph_objs",
"as",
"go",
"tups",
"=",
"aset",
".",
"query_associations",
"(",
"subjects",
"=",
"subjects",
")",
"z",
",",
"xaxis",
",",
"yaxis",
"=",
"tuple_to_matrix",
"(",
"tups",
")",
"spacechar",
"=",
"\" \"",
"xaxis",
"=",
"mk_axis",
"(",
"xaxis",
",",
"aset",
",",
"args",
",",
"spacechar",
"=",
"\" \"",
")",
"yaxis",
"=",
"mk_axis",
"(",
"yaxis",
",",
"aset",
",",
"args",
",",
"spacechar",
"=",
"\" \"",
")",
"logging",
".",
"info",
"(",
"\"PLOTTING: {} x {} = {}\"",
".",
"format",
"(",
"xaxis",
",",
"yaxis",
",",
"z",
")",
")",
"trace",
"=",
"go",
".",
"Heatmap",
"(",
"z",
"=",
"z",
",",
"x",
"=",
"xaxis",
",",
"y",
"=",
"yaxis",
")",
"data",
"=",
"[",
"trace",
"]",
"py",
".",
"plot",
"(",
"data",
",",
"filename",
"=",
"'labelled-heatmap'",
")"
] | 36.045455 | 13.045455 |
def _create_dictionary_of_ned_d(
self):
"""create a list of dictionaries containing all the rows in the ned_d catalogue
**Return:**
- ``dictList`` - a list of dictionaries containing all the rows in the ned_d catalogue
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_create_dictionary_of_ned_d`` method')
count = 0
with open(self.pathToDataFile, 'rb') as csvFile:
csvReader = csv.reader(
csvFile, dialect='excel', delimiter=',', quotechar='"')
totalRows = sum(1 for row in csvReader)
csvFile.close()
totalCount = totalRows
with open(self.pathToDataFile, 'rb') as csvFile:
csvReader = csv.reader(
csvFile, dialect='excel', delimiter=',', quotechar='"')
theseKeys = []
dictList = []
for row in csvReader:
if len(theseKeys) == 0:
totalRows -= 1
if "Exclusion Code" in row and "Hubble const." in row:
for i in row:
if i == "redshift (z)":
theseKeys.append("redshift")
elif i == "Hubble const.":
theseKeys.append("hubble_const")
elif i == "G":
theseKeys.append("galaxy_index_id")
elif i == "err":
theseKeys.append("dist_mod_err")
elif i == "D (Mpc)":
theseKeys.append("dist_mpc")
elif i == "Date (Yr. - 1980)":
theseKeys.append("ref_date")
elif i == "REFCODE":
theseKeys.append("ref")
elif i == "Exclusion Code":
theseKeys.append("dist_in_ned_flag")
elif i == "Adopted LMC modulus":
theseKeys.append("lmc_mod")
elif i == "m-M":
theseKeys.append("dist_mod")
elif i == "Notes":
theseKeys.append("notes")
elif i == "SN ID":
theseKeys.append("dist_derived_from_sn")
elif i == "method":
theseKeys.append("dist_method")
elif i == "Galaxy ID":
theseKeys.append("primary_ned_id")
elif i == "D":
theseKeys.append("dist_index_id")
else:
theseKeys.append(i)
continue
if len(theseKeys):
count += 1
if count > 1:
# Cursor up one line and clear line
sys.stdout.write("\x1b[1A\x1b[2K")
if count > totalCount:
count = totalCount
percent = (float(count) / float(totalCount)) * 100.
print "%(count)s / %(totalCount)s (%(percent)1.1f%%) rows added to memory" % locals()
rowDict = {}
for t, r in zip(theseKeys, row):
rowDict[t] = r
if t == "ref_date":
try:
rowDict[t] = int(r) + 1980
except:
rowDict[t] = None
if rowDict["dist_index_id"] != "999999":
dictList.append(rowDict)
csvFile.close()
self.log.debug(
'completed the ``_create_dictionary_of_ned_d`` method')
return dictList | [
"def",
"_create_dictionary_of_ned_d",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_create_dictionary_of_ned_d`` method'",
")",
"count",
"=",
"0",
"with",
"open",
"(",
"self",
".",
"pathToDataFile",
",",
"'rb'",
")",
"as",
"csvFile",
":",
"csvReader",
"=",
"csv",
".",
"reader",
"(",
"csvFile",
",",
"dialect",
"=",
"'excel'",
",",
"delimiter",
"=",
"','",
",",
"quotechar",
"=",
"'\"'",
")",
"totalRows",
"=",
"sum",
"(",
"1",
"for",
"row",
"in",
"csvReader",
")",
"csvFile",
".",
"close",
"(",
")",
"totalCount",
"=",
"totalRows",
"with",
"open",
"(",
"self",
".",
"pathToDataFile",
",",
"'rb'",
")",
"as",
"csvFile",
":",
"csvReader",
"=",
"csv",
".",
"reader",
"(",
"csvFile",
",",
"dialect",
"=",
"'excel'",
",",
"delimiter",
"=",
"','",
",",
"quotechar",
"=",
"'\"'",
")",
"theseKeys",
"=",
"[",
"]",
"dictList",
"=",
"[",
"]",
"for",
"row",
"in",
"csvReader",
":",
"if",
"len",
"(",
"theseKeys",
")",
"==",
"0",
":",
"totalRows",
"-=",
"1",
"if",
"\"Exclusion Code\"",
"in",
"row",
"and",
"\"Hubble const.\"",
"in",
"row",
":",
"for",
"i",
"in",
"row",
":",
"if",
"i",
"==",
"\"redshift (z)\"",
":",
"theseKeys",
".",
"append",
"(",
"\"redshift\"",
")",
"elif",
"i",
"==",
"\"Hubble const.\"",
":",
"theseKeys",
".",
"append",
"(",
"\"hubble_const\"",
")",
"elif",
"i",
"==",
"\"G\"",
":",
"theseKeys",
".",
"append",
"(",
"\"galaxy_index_id\"",
")",
"elif",
"i",
"==",
"\"err\"",
":",
"theseKeys",
".",
"append",
"(",
"\"dist_mod_err\"",
")",
"elif",
"i",
"==",
"\"D (Mpc)\"",
":",
"theseKeys",
".",
"append",
"(",
"\"dist_mpc\"",
")",
"elif",
"i",
"==",
"\"Date (Yr. - 1980)\"",
":",
"theseKeys",
".",
"append",
"(",
"\"ref_date\"",
")",
"elif",
"i",
"==",
"\"REFCODE\"",
":",
"theseKeys",
".",
"append",
"(",
"\"ref\"",
")",
"elif",
"i",
"==",
"\"Exclusion Code\"",
":",
"theseKeys",
".",
"append",
"(",
"\"dist_in_ned_flag\"",
")",
"elif",
"i",
"==",
"\"Adopted LMC modulus\"",
":",
"theseKeys",
".",
"append",
"(",
"\"lmc_mod\"",
")",
"elif",
"i",
"==",
"\"m-M\"",
":",
"theseKeys",
".",
"append",
"(",
"\"dist_mod\"",
")",
"elif",
"i",
"==",
"\"Notes\"",
":",
"theseKeys",
".",
"append",
"(",
"\"notes\"",
")",
"elif",
"i",
"==",
"\"SN ID\"",
":",
"theseKeys",
".",
"append",
"(",
"\"dist_derived_from_sn\"",
")",
"elif",
"i",
"==",
"\"method\"",
":",
"theseKeys",
".",
"append",
"(",
"\"dist_method\"",
")",
"elif",
"i",
"==",
"\"Galaxy ID\"",
":",
"theseKeys",
".",
"append",
"(",
"\"primary_ned_id\"",
")",
"elif",
"i",
"==",
"\"D\"",
":",
"theseKeys",
".",
"append",
"(",
"\"dist_index_id\"",
")",
"else",
":",
"theseKeys",
".",
"append",
"(",
"i",
")",
"continue",
"if",
"len",
"(",
"theseKeys",
")",
":",
"count",
"+=",
"1",
"if",
"count",
">",
"1",
":",
"# Cursor up one line and clear line",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\x1b[1A\\x1b[2K\"",
")",
"if",
"count",
">",
"totalCount",
":",
"count",
"=",
"totalCount",
"percent",
"=",
"(",
"float",
"(",
"count",
")",
"/",
"float",
"(",
"totalCount",
")",
")",
"*",
"100.",
"print",
"\"%(count)s / %(totalCount)s (%(percent)1.1f%%) rows added to memory\"",
"%",
"locals",
"(",
")",
"rowDict",
"=",
"{",
"}",
"for",
"t",
",",
"r",
"in",
"zip",
"(",
"theseKeys",
",",
"row",
")",
":",
"rowDict",
"[",
"t",
"]",
"=",
"r",
"if",
"t",
"==",
"\"ref_date\"",
":",
"try",
":",
"rowDict",
"[",
"t",
"]",
"=",
"int",
"(",
"r",
")",
"+",
"1980",
"except",
":",
"rowDict",
"[",
"t",
"]",
"=",
"None",
"if",
"rowDict",
"[",
"\"dist_index_id\"",
"]",
"!=",
"\"999999\"",
":",
"dictList",
".",
"append",
"(",
"rowDict",
")",
"csvFile",
".",
"close",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_create_dictionary_of_ned_d`` method'",
")",
"return",
"dictList"
] | 42.428571 | 16.306122 |
def signed_session(self, session=None):
# type: (Optional[requests.Session]) -> requests.Session
"""Create requests session with any required auth headers applied.
If a session object is provided, configure it directly. Otherwise,
create a new session and return it.
:param session: The session to configure for authentication
:type session: requests.Session
:rtype: requests.Session
"""
session = session or requests.Session() # Don't call super on purpose, let's "auth" manage the headers.
session.auth = oauth.OAuth2(self.id, token=self.token)
return session | [
"def",
"signed_session",
"(",
"self",
",",
"session",
"=",
"None",
")",
":",
"# type: (Optional[requests.Session]) -> requests.Session",
"session",
"=",
"session",
"or",
"requests",
".",
"Session",
"(",
")",
"# Don't call super on purpose, let's \"auth\" manage the headers.",
"session",
".",
"auth",
"=",
"oauth",
".",
"OAuth2",
"(",
"self",
".",
"id",
",",
"token",
"=",
"self",
".",
"token",
")",
"return",
"session"
] | 45.642857 | 20.714286 |
def call(self, args, devnull=False):
"""Call other processes.
args - list of command args
devnull - whether to pipe stdout to /dev/null (or equivalent)
"""
if self.debug:
click.echo(subprocess.list2cmdline(args))
click.confirm('Continue?', default=True, abort=True)
try:
kwargs = {}
if devnull:
# Pipe to /dev/null (or equivalent).
kwargs['stderr'] = subprocess.STDOUT
kwargs['stdout'] = self.FNULL
ret_code = subprocess.call(args, **kwargs)
except subprocess.CalledProcessError:
return False
return ret_code | [
"def",
"call",
"(",
"self",
",",
"args",
",",
"devnull",
"=",
"False",
")",
":",
"if",
"self",
".",
"debug",
":",
"click",
".",
"echo",
"(",
"subprocess",
".",
"list2cmdline",
"(",
"args",
")",
")",
"click",
".",
"confirm",
"(",
"'Continue?'",
",",
"default",
"=",
"True",
",",
"abort",
"=",
"True",
")",
"try",
":",
"kwargs",
"=",
"{",
"}",
"if",
"devnull",
":",
"# Pipe to /dev/null (or equivalent).",
"kwargs",
"[",
"'stderr'",
"]",
"=",
"subprocess",
".",
"STDOUT",
"kwargs",
"[",
"'stdout'",
"]",
"=",
"self",
".",
"FNULL",
"ret_code",
"=",
"subprocess",
".",
"call",
"(",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"return",
"False",
"return",
"ret_code"
] | 37.5 | 13.111111 |
def get_tripIs_within_range_by_dsut(self,
start_time_ut,
end_time_ut):
"""
Obtain a list of trip_Is that take place during a time interval.
The trip needs to be only partially overlapping with the given time interval.
The grouping by dsut (day_start_ut) is required as same trip_I could
take place on multiple days.
Parameters
----------
start_time_ut : int
start of the time interval in unix time (seconds)
end_time_ut: int
end of the time interval in unix time (seconds)
Returns
-------
trip_I_dict: dict
keys: day_start_times to list of integers (trip_Is)
"""
cur = self.conn.cursor()
assert start_time_ut <= end_time_ut
dst_ut, st_ds, et_ds = \
self._get_possible_day_starts(start_time_ut, end_time_ut, 7)
# noinspection PyTypeChecker
assert len(dst_ut) >= 0
trip_I_dict = {}
for day_start_ut, start_ds, end_ds in \
zip(dst_ut, st_ds, et_ds):
query = """
SELECT distinct(trip_I)
FROM days
JOIN trips
USING(trip_I)
WHERE
(days.day_start_ut == ?)
AND (
(trips.start_time_ds <= ?)
AND
(trips.end_time_ds >= ?)
)
"""
params = (day_start_ut, end_ds, start_ds)
trip_Is = [el[0] for el in cur.execute(query, params)]
if len(trip_Is) > 0:
trip_I_dict[day_start_ut] = trip_Is
return trip_I_dict | [
"def",
"get_tripIs_within_range_by_dsut",
"(",
"self",
",",
"start_time_ut",
",",
"end_time_ut",
")",
":",
"cur",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"assert",
"start_time_ut",
"<=",
"end_time_ut",
"dst_ut",
",",
"st_ds",
",",
"et_ds",
"=",
"self",
".",
"_get_possible_day_starts",
"(",
"start_time_ut",
",",
"end_time_ut",
",",
"7",
")",
"# noinspection PyTypeChecker",
"assert",
"len",
"(",
"dst_ut",
")",
">=",
"0",
"trip_I_dict",
"=",
"{",
"}",
"for",
"day_start_ut",
",",
"start_ds",
",",
"end_ds",
"in",
"zip",
"(",
"dst_ut",
",",
"st_ds",
",",
"et_ds",
")",
":",
"query",
"=",
"\"\"\"\n SELECT distinct(trip_I)\n FROM days\n JOIN trips\n USING(trip_I)\n WHERE\n (days.day_start_ut == ?)\n AND (\n (trips.start_time_ds <= ?)\n AND\n (trips.end_time_ds >= ?)\n )\n \"\"\"",
"params",
"=",
"(",
"day_start_ut",
",",
"end_ds",
",",
"start_ds",
")",
"trip_Is",
"=",
"[",
"el",
"[",
"0",
"]",
"for",
"el",
"in",
"cur",
".",
"execute",
"(",
"query",
",",
"params",
")",
"]",
"if",
"len",
"(",
"trip_Is",
")",
">",
"0",
":",
"trip_I_dict",
"[",
"day_start_ut",
"]",
"=",
"trip_Is",
"return",
"trip_I_dict"
] | 38.916667 | 14.25 |
def start_session(self):
"""Start the underlying APIs sessions
Calling this is not required, it will be called automatically if
a method that needs a session is called
@return bool
"""
self._android_api.start_session()
self._manga_api.cr_start_session()
return self.session_started | [
"def",
"start_session",
"(",
"self",
")",
":",
"self",
".",
"_android_api",
".",
"start_session",
"(",
")",
"self",
".",
"_manga_api",
".",
"cr_start_session",
"(",
")",
"return",
"self",
".",
"session_started"
] | 30.636364 | 14.818182 |
def _handle_special_yaml_cases(v):
"""Handle values that pass integer, boolean, list or dictionary values.
"""
if "::" in v:
out = {}
for part in v.split("::"):
k_part, v_part = part.split(":")
out[k_part] = v_part.split(";")
v = out
elif ";" in v:
# split lists and remove accidental empty values
v = [x for x in v.split(";") if x != ""]
elif isinstance(v, list):
v = v
else:
try:
v = int(v)
except ValueError:
if v.lower() == "true":
v = True
elif v.lower() == "false":
v = False
return v | [
"def",
"_handle_special_yaml_cases",
"(",
"v",
")",
":",
"if",
"\"::\"",
"in",
"v",
":",
"out",
"=",
"{",
"}",
"for",
"part",
"in",
"v",
".",
"split",
"(",
"\"::\"",
")",
":",
"k_part",
",",
"v_part",
"=",
"part",
".",
"split",
"(",
"\":\"",
")",
"out",
"[",
"k_part",
"]",
"=",
"v_part",
".",
"split",
"(",
"\";\"",
")",
"v",
"=",
"out",
"elif",
"\";\"",
"in",
"v",
":",
"# split lists and remove accidental empty values",
"v",
"=",
"[",
"x",
"for",
"x",
"in",
"v",
".",
"split",
"(",
"\";\"",
")",
"if",
"x",
"!=",
"\"\"",
"]",
"elif",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"v",
"=",
"v",
"else",
":",
"try",
":",
"v",
"=",
"int",
"(",
"v",
")",
"except",
"ValueError",
":",
"if",
"v",
".",
"lower",
"(",
")",
"==",
"\"true\"",
":",
"v",
"=",
"True",
"elif",
"v",
".",
"lower",
"(",
")",
"==",
"\"false\"",
":",
"v",
"=",
"False",
"return",
"v"
] | 28.521739 | 14.26087 |
def plot_returns(perf_attrib_data, cost=None, ax=None):
"""
Plot total, specific, and common returns.
Parameters
----------
perf_attrib_data : pd.DataFrame
df with factors, common returns, and specific returns as columns,
and datetimes as index. Assumes the `total_returns` column is NOT
cost adjusted.
- Example:
momentum reversal common_returns specific_returns
dt
2017-01-01 0.249087 0.935925 1.185012 1.185012
2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980
cost : pd.Series, optional
if present, gets subtracted from `perf_attrib_data['total_returns']`,
and gets plotted separately
ax : matplotlib.axes.Axes
axes on which plots are made. if None, current axes will be used
Returns
-------
ax : matplotlib.axes.Axes
"""
if ax is None:
ax = plt.gca()
returns = perf_attrib_data['total_returns']
total_returns_label = 'Total returns'
cumulative_returns_less_costs = _cumulative_returns_less_costs(
returns,
cost
)
if cost is not None:
total_returns_label += ' (adjusted)'
specific_returns = perf_attrib_data['specific_returns']
common_returns = perf_attrib_data['common_returns']
ax.plot(cumulative_returns_less_costs, color='b',
label=total_returns_label)
ax.plot(ep.cum_returns(specific_returns), color='g',
label='Cumulative specific returns')
ax.plot(ep.cum_returns(common_returns), color='r',
label='Cumulative common returns')
if cost is not None:
ax.plot(-ep.cum_returns(cost), color='k',
label='Cumulative cost spent')
ax.set_title('Time series of cumulative returns')
ax.set_ylabel('Returns')
configure_legend(ax)
return ax | [
"def",
"plot_returns",
"(",
"perf_attrib_data",
",",
"cost",
"=",
"None",
",",
"ax",
"=",
"None",
")",
":",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"returns",
"=",
"perf_attrib_data",
"[",
"'total_returns'",
"]",
"total_returns_label",
"=",
"'Total returns'",
"cumulative_returns_less_costs",
"=",
"_cumulative_returns_less_costs",
"(",
"returns",
",",
"cost",
")",
"if",
"cost",
"is",
"not",
"None",
":",
"total_returns_label",
"+=",
"' (adjusted)'",
"specific_returns",
"=",
"perf_attrib_data",
"[",
"'specific_returns'",
"]",
"common_returns",
"=",
"perf_attrib_data",
"[",
"'common_returns'",
"]",
"ax",
".",
"plot",
"(",
"cumulative_returns_less_costs",
",",
"color",
"=",
"'b'",
",",
"label",
"=",
"total_returns_label",
")",
"ax",
".",
"plot",
"(",
"ep",
".",
"cum_returns",
"(",
"specific_returns",
")",
",",
"color",
"=",
"'g'",
",",
"label",
"=",
"'Cumulative specific returns'",
")",
"ax",
".",
"plot",
"(",
"ep",
".",
"cum_returns",
"(",
"common_returns",
")",
",",
"color",
"=",
"'r'",
",",
"label",
"=",
"'Cumulative common returns'",
")",
"if",
"cost",
"is",
"not",
"None",
":",
"ax",
".",
"plot",
"(",
"-",
"ep",
".",
"cum_returns",
"(",
"cost",
")",
",",
"color",
"=",
"'k'",
",",
"label",
"=",
"'Cumulative cost spent'",
")",
"ax",
".",
"set_title",
"(",
"'Time series of cumulative returns'",
")",
"ax",
".",
"set_ylabel",
"(",
"'Returns'",
")",
"configure_legend",
"(",
"ax",
")",
"return",
"ax"
] | 30.213115 | 22.508197 |
def id(self):
"""获取用户id,就是网址最后那一部分.
:return: 用户id
:rtype: str
"""
return re.match(r'^.*/([^/]+)/$', self.url).group(1) \
if self.url is not None else '' | [
"def",
"id",
"(",
"self",
")",
":",
"return",
"re",
".",
"match",
"(",
"r'^.*/([^/]+)/$'",
",",
"self",
".",
"url",
")",
".",
"group",
"(",
"1",
")",
"if",
"self",
".",
"url",
"is",
"not",
"None",
"else",
"''"
] | 24.75 | 16.5 |
def flat_data(self):
"""
Pass all the data from modified_data to original_data
"""
def flat_field(value):
"""
Flat field data
"""
try:
value.flat_data()
return value
except AttributeError:
return value
modified_dict = self.__original_data__
modified_dict.update(self.__modified_data__)
self.__original_data__ = {k: flat_field(v)
for k, v in modified_dict.items()
if k not in self.__deleted_fields__}
self.clear_modified_data() | [
"def",
"flat_data",
"(",
"self",
")",
":",
"def",
"flat_field",
"(",
"value",
")",
":",
"\"\"\"\n Flat field data\n \"\"\"",
"try",
":",
"value",
".",
"flat_data",
"(",
")",
"return",
"value",
"except",
"AttributeError",
":",
"return",
"value",
"modified_dict",
"=",
"self",
".",
"__original_data__",
"modified_dict",
".",
"update",
"(",
"self",
".",
"__modified_data__",
")",
"self",
".",
"__original_data__",
"=",
"{",
"k",
":",
"flat_field",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"modified_dict",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"self",
".",
"__deleted_fields__",
"}",
"self",
".",
"clear_modified_data",
"(",
")"
] | 29.454545 | 15.272727 |
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3):
"""
Read the data encoding the CapabilityInformation structure and decode
it into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
VersionNotSupported: Raised when a KMIP version is provided that
does not support the CapabilityInformation structure.
"""
if kmip_version < enums.KMIPVersion.KMIP_1_3:
raise exceptions.VersionNotSupported(
"KMIP {} does not support the CapabilityInformation "
"object.".format(
kmip_version.value
)
)
super(CapabilityInformation, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.STREAMING_CAPABILITY, local_buffer):
streaming_capability = primitives.Boolean(
tag=enums.Tags.STREAMING_CAPABILITY
)
streaming_capability.read(local_buffer, kmip_version=kmip_version)
self._streaming_capability = streaming_capability
if self.is_tag_next(enums.Tags.ASYNCHRONOUS_CAPABILITY, local_buffer):
asynchronous_capability = primitives.Boolean(
tag=enums.Tags.ASYNCHRONOUS_CAPABILITY
)
asynchronous_capability.read(
local_buffer,
kmip_version=kmip_version
)
self._asynchronous_capability = asynchronous_capability
if self.is_tag_next(enums.Tags.ATTESTATION_CAPABILITY, local_buffer):
attestation_capability = primitives.Boolean(
tag=enums.Tags.ATTESTATION_CAPABILITY
)
attestation_capability.read(
local_buffer,
kmip_version=kmip_version
)
self._attestation_capability = attestation_capability
if kmip_version >= enums.KMIPVersion.KMIP_1_4:
if self.is_tag_next(
enums.Tags.BATCH_UNDO_CAPABILITY,
local_buffer
):
batch_undo_capability = primitives.Boolean(
tag=enums.Tags.BATCH_UNDO_CAPABILITY
)
batch_undo_capability.read(
local_buffer,
kmip_version=kmip_version
)
self._batch_continue_capability = batch_undo_capability
if self.is_tag_next(
enums.Tags.BATCH_CONTINUE_CAPABILITY,
local_buffer
):
batch_continue_capability = primitives.Boolean(
tag=enums.Tags.BATCH_CONTINUE_CAPABILITY
)
batch_continue_capability.read(
local_buffer,
kmip_version=kmip_version
)
self._batch_continue_capability = batch_continue_capability
if self.is_tag_next(enums.Tags.UNWRAP_MODE, local_buffer):
unwrap_mode = primitives.Enumeration(
enums.UnwrapMode,
tag=enums.Tags.UNWRAP_MODE
)
unwrap_mode.read(local_buffer, kmip_version=kmip_version)
self._unwrap_mode = unwrap_mode
if self.is_tag_next(enums.Tags.DESTROY_ACTION, local_buffer):
destroy_action = primitives.Enumeration(
enums.DestroyAction,
tag=enums.Tags.DESTROY_ACTION
)
destroy_action.read(local_buffer, kmip_version=kmip_version)
self._destroy_action = destroy_action
if self.is_tag_next(enums.Tags.SHREDDING_ALGORITHM, local_buffer):
shredding_algorithm = primitives.Enumeration(
enums.ShreddingAlgorithm,
tag=enums.Tags.SHREDDING_ALGORITHM
)
shredding_algorithm.read(local_buffer, kmip_version=kmip_version)
self._shredding_algorithm = shredding_algorithm
if self.is_tag_next(enums.Tags.RNG_MODE, local_buffer):
rng_mode = primitives.Enumeration(
enums.RNGMode,
tag=enums.Tags.RNG_MODE
)
rng_mode.read(local_buffer, kmip_version=kmip_version)
self._rng_mode = rng_mode
self.is_oversized(local_buffer) | [
"def",
"read",
"(",
"self",
",",
"input_buffer",
",",
"kmip_version",
"=",
"enums",
".",
"KMIPVersion",
".",
"KMIP_1_3",
")",
":",
"if",
"kmip_version",
"<",
"enums",
".",
"KMIPVersion",
".",
"KMIP_1_3",
":",
"raise",
"exceptions",
".",
"VersionNotSupported",
"(",
"\"KMIP {} does not support the CapabilityInformation \"",
"\"object.\"",
".",
"format",
"(",
"kmip_version",
".",
"value",
")",
")",
"super",
"(",
"CapabilityInformation",
",",
"self",
")",
".",
"read",
"(",
"input_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"local_buffer",
"=",
"utils",
".",
"BytearrayStream",
"(",
"input_buffer",
".",
"read",
"(",
"self",
".",
"length",
")",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"STREAMING_CAPABILITY",
",",
"local_buffer",
")",
":",
"streaming_capability",
"=",
"primitives",
".",
"Boolean",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"STREAMING_CAPABILITY",
")",
"streaming_capability",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"self",
".",
"_streaming_capability",
"=",
"streaming_capability",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"ASYNCHRONOUS_CAPABILITY",
",",
"local_buffer",
")",
":",
"asynchronous_capability",
"=",
"primitives",
".",
"Boolean",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"ASYNCHRONOUS_CAPABILITY",
")",
"asynchronous_capability",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"self",
".",
"_asynchronous_capability",
"=",
"asynchronous_capability",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"ATTESTATION_CAPABILITY",
",",
"local_buffer",
")",
":",
"attestation_capability",
"=",
"primitives",
".",
"Boolean",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"ATTESTATION_CAPABILITY",
")",
"attestation_capability",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"self",
".",
"_attestation_capability",
"=",
"attestation_capability",
"if",
"kmip_version",
">=",
"enums",
".",
"KMIPVersion",
".",
"KMIP_1_4",
":",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"BATCH_UNDO_CAPABILITY",
",",
"local_buffer",
")",
":",
"batch_undo_capability",
"=",
"primitives",
".",
"Boolean",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"BATCH_UNDO_CAPABILITY",
")",
"batch_undo_capability",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"self",
".",
"_batch_continue_capability",
"=",
"batch_undo_capability",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"BATCH_CONTINUE_CAPABILITY",
",",
"local_buffer",
")",
":",
"batch_continue_capability",
"=",
"primitives",
".",
"Boolean",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"BATCH_CONTINUE_CAPABILITY",
")",
"batch_continue_capability",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"self",
".",
"_batch_continue_capability",
"=",
"batch_continue_capability",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"UNWRAP_MODE",
",",
"local_buffer",
")",
":",
"unwrap_mode",
"=",
"primitives",
".",
"Enumeration",
"(",
"enums",
".",
"UnwrapMode",
",",
"tag",
"=",
"enums",
".",
"Tags",
".",
"UNWRAP_MODE",
")",
"unwrap_mode",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"self",
".",
"_unwrap_mode",
"=",
"unwrap_mode",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"DESTROY_ACTION",
",",
"local_buffer",
")",
":",
"destroy_action",
"=",
"primitives",
".",
"Enumeration",
"(",
"enums",
".",
"DestroyAction",
",",
"tag",
"=",
"enums",
".",
"Tags",
".",
"DESTROY_ACTION",
")",
"destroy_action",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"self",
".",
"_destroy_action",
"=",
"destroy_action",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"SHREDDING_ALGORITHM",
",",
"local_buffer",
")",
":",
"shredding_algorithm",
"=",
"primitives",
".",
"Enumeration",
"(",
"enums",
".",
"ShreddingAlgorithm",
",",
"tag",
"=",
"enums",
".",
"Tags",
".",
"SHREDDING_ALGORITHM",
")",
"shredding_algorithm",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"self",
".",
"_shredding_algorithm",
"=",
"shredding_algorithm",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"RNG_MODE",
",",
"local_buffer",
")",
":",
"rng_mode",
"=",
"primitives",
".",
"Enumeration",
"(",
"enums",
".",
"RNGMode",
",",
"tag",
"=",
"enums",
".",
"Tags",
".",
"RNG_MODE",
")",
"rng_mode",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"self",
".",
"_rng_mode",
"=",
"rng_mode",
"self",
".",
"is_oversized",
"(",
"local_buffer",
")"
] | 39.550847 | 20.228814 |
def histogram2d(self, counts, x_edges, y_edges, type='bw', style=None,
bitmap=False, colormap=None):
"""Plot a two-dimensional histogram.
The user needs to supply the histogram. This method only plots
the results. You can use NumPy's histogram2d function.
:param counts: array containing the count values.
:param x_edges: array containing the x-axis bin edges.
:param y_edges: array containing the y-axis bin edges.
:param type: the type of histogram. Allowed values are 'bw' for
filled squares with shades from black (minimum value) to white
(maximum value), 'reverse_bw' for filled squares with the
shades reversed and, 'color' for color mapped histogram
which uses the 'coolwarm' colormap by default, but can be
overwritten with the colormap keyword, and 'area' for
squares where the area of the square is a measure of the
count in the bin.
:param style: optional TikZ styles to apply (e.g. 'red'). Note
that many color styles are overridden by the 'bw' and
'reverse_bw' types.
:param bitmap: Export the histogram as an image for better
performance. This does expect all bins along an axis to have
equal width. Can not be used in combination with type 'area'.
:param colormap: A colormap for the 'color' type, as expected by
the `Image.putpalette` method.
Example::
>>> x = np.random.uniform(low=1, high=1000, size=2000)
>>> y = np.random.uniform(low=0, high=50, size=2000)
>>> plot = artist.Plot()
>>> n, xbins, ybins = np.histogram2d(x, y)
>>> plot.histogram2d(n, xbins, ybins)
When you desire logarithmic axes and bitmap is set to True special
care has to be taken with the binning. This is because the bins along
an axis have to be equal size in the final result. So use logarithmic
binning for logarithmic axes.
Example::
>>> plot = artist.Plot(axis='semilogx')
>>> xbins = np.logspace(0, 3, 20)
>>> ybins = np.linspace(-5, 10, 10)
>>> n, xbins, ybins = np.histogram2d(x, y, bins=[xbins, ybins])
>>> plot.histogram2d(n, xbins, ybins, bitmap=True)
For each bin where the counts are nan the value will be set to the
minimum value (i.e. `np.nanmin(counts)`).
"""
if counts.shape != (len(x_edges) - 1, len(y_edges) - 1):
raise RuntimeError(
'The length of x_edges and y_edges should match counts')
if type not in ['bw', 'reverse_bw', 'area', 'color']:
raise RuntimeError('Histogram type %s not supported' % type)
if type == 'area' and bitmap:
raise RuntimeError('Histogram type %s not supported for bitmap '
'output' % type)
if type == 'color' and not bitmap:
raise RuntimeError('Histogram type %s not supported for '
'non-bitmapped output' % type)
if bitmap:
normed_counts = self._normalize_histogram2d(counts, type)
img = Image.fromarray(np.flipud(normed_counts.T))
if type == 'color':
if colormap == 'viridis':
img.putpalette(VIRIDIS)
elif colormap in [None, 'coolwarm']:
img.putpalette(COOLWARM)
else:
img.putpalette(colormap)
self.bitmap_list.append({'image': img,
'xmin': min(x_edges),
'xmax': max(x_edges),
'ymin': min(y_edges),
'ymax': max(y_edges)})
else:
x_centers = (x_edges[:-1] + x_edges[1:]) / 2
y_centers = (y_edges[:-1] + y_edges[1:]) / 2
self.histogram2d_list.append({'x_edges': x_edges,
'y_edges': y_edges,
'x_centers': x_centers,
'y_centers': y_centers,
'counts': counts,
'max': np.nanmax(counts),
'min': np.nanmin(counts),
'type': type,
'style': style})
# Set limits unless lower/higher limits are already set.
xmin = min(x for x in (min(x_edges), self.limits['xmin'])
if x is not None)
ymin = min(y for y in (min(y_edges), self.limits['ymin'])
if y is not None)
xmax = max(x for x in (max(x_edges), self.limits['xmax'])
if x is not None)
ymax = max(y for y in (max(y_edges), self.limits['ymax'])
if y is not None)
self.set_xlimits(xmin, xmax)
self.set_ylimits(ymin, ymax)
if type != 'area':
self.set_mlimits(np.nanmin(counts), np.nanmax(counts))
if type == 'bw':
self.set_colormap('blackwhite')
elif type == 'reverse_bw':
self.set_colormap('whiteblack')
elif type == 'color':
if colormap == 'viridis':
self.set_colormap('viridis')
elif colormap in [None, 'coolwarm']:
self.set_colormap('coolwarm') | [
"def",
"histogram2d",
"(",
"self",
",",
"counts",
",",
"x_edges",
",",
"y_edges",
",",
"type",
"=",
"'bw'",
",",
"style",
"=",
"None",
",",
"bitmap",
"=",
"False",
",",
"colormap",
"=",
"None",
")",
":",
"if",
"counts",
".",
"shape",
"!=",
"(",
"len",
"(",
"x_edges",
")",
"-",
"1",
",",
"len",
"(",
"y_edges",
")",
"-",
"1",
")",
":",
"raise",
"RuntimeError",
"(",
"'The length of x_edges and y_edges should match counts'",
")",
"if",
"type",
"not",
"in",
"[",
"'bw'",
",",
"'reverse_bw'",
",",
"'area'",
",",
"'color'",
"]",
":",
"raise",
"RuntimeError",
"(",
"'Histogram type %s not supported'",
"%",
"type",
")",
"if",
"type",
"==",
"'area'",
"and",
"bitmap",
":",
"raise",
"RuntimeError",
"(",
"'Histogram type %s not supported for bitmap '",
"'output'",
"%",
"type",
")",
"if",
"type",
"==",
"'color'",
"and",
"not",
"bitmap",
":",
"raise",
"RuntimeError",
"(",
"'Histogram type %s not supported for '",
"'non-bitmapped output'",
"%",
"type",
")",
"if",
"bitmap",
":",
"normed_counts",
"=",
"self",
".",
"_normalize_histogram2d",
"(",
"counts",
",",
"type",
")",
"img",
"=",
"Image",
".",
"fromarray",
"(",
"np",
".",
"flipud",
"(",
"normed_counts",
".",
"T",
")",
")",
"if",
"type",
"==",
"'color'",
":",
"if",
"colormap",
"==",
"'viridis'",
":",
"img",
".",
"putpalette",
"(",
"VIRIDIS",
")",
"elif",
"colormap",
"in",
"[",
"None",
",",
"'coolwarm'",
"]",
":",
"img",
".",
"putpalette",
"(",
"COOLWARM",
")",
"else",
":",
"img",
".",
"putpalette",
"(",
"colormap",
")",
"self",
".",
"bitmap_list",
".",
"append",
"(",
"{",
"'image'",
":",
"img",
",",
"'xmin'",
":",
"min",
"(",
"x_edges",
")",
",",
"'xmax'",
":",
"max",
"(",
"x_edges",
")",
",",
"'ymin'",
":",
"min",
"(",
"y_edges",
")",
",",
"'ymax'",
":",
"max",
"(",
"y_edges",
")",
"}",
")",
"else",
":",
"x_centers",
"=",
"(",
"x_edges",
"[",
":",
"-",
"1",
"]",
"+",
"x_edges",
"[",
"1",
":",
"]",
")",
"/",
"2",
"y_centers",
"=",
"(",
"y_edges",
"[",
":",
"-",
"1",
"]",
"+",
"y_edges",
"[",
"1",
":",
"]",
")",
"/",
"2",
"self",
".",
"histogram2d_list",
".",
"append",
"(",
"{",
"'x_edges'",
":",
"x_edges",
",",
"'y_edges'",
":",
"y_edges",
",",
"'x_centers'",
":",
"x_centers",
",",
"'y_centers'",
":",
"y_centers",
",",
"'counts'",
":",
"counts",
",",
"'max'",
":",
"np",
".",
"nanmax",
"(",
"counts",
")",
",",
"'min'",
":",
"np",
".",
"nanmin",
"(",
"counts",
")",
",",
"'type'",
":",
"type",
",",
"'style'",
":",
"style",
"}",
")",
"# Set limits unless lower/higher limits are already set.",
"xmin",
"=",
"min",
"(",
"x",
"for",
"x",
"in",
"(",
"min",
"(",
"x_edges",
")",
",",
"self",
".",
"limits",
"[",
"'xmin'",
"]",
")",
"if",
"x",
"is",
"not",
"None",
")",
"ymin",
"=",
"min",
"(",
"y",
"for",
"y",
"in",
"(",
"min",
"(",
"y_edges",
")",
",",
"self",
".",
"limits",
"[",
"'ymin'",
"]",
")",
"if",
"y",
"is",
"not",
"None",
")",
"xmax",
"=",
"max",
"(",
"x",
"for",
"x",
"in",
"(",
"max",
"(",
"x_edges",
")",
",",
"self",
".",
"limits",
"[",
"'xmax'",
"]",
")",
"if",
"x",
"is",
"not",
"None",
")",
"ymax",
"=",
"max",
"(",
"y",
"for",
"y",
"in",
"(",
"max",
"(",
"y_edges",
")",
",",
"self",
".",
"limits",
"[",
"'ymax'",
"]",
")",
"if",
"y",
"is",
"not",
"None",
")",
"self",
".",
"set_xlimits",
"(",
"xmin",
",",
"xmax",
")",
"self",
".",
"set_ylimits",
"(",
"ymin",
",",
"ymax",
")",
"if",
"type",
"!=",
"'area'",
":",
"self",
".",
"set_mlimits",
"(",
"np",
".",
"nanmin",
"(",
"counts",
")",
",",
"np",
".",
"nanmax",
"(",
"counts",
")",
")",
"if",
"type",
"==",
"'bw'",
":",
"self",
".",
"set_colormap",
"(",
"'blackwhite'",
")",
"elif",
"type",
"==",
"'reverse_bw'",
":",
"self",
".",
"set_colormap",
"(",
"'whiteblack'",
")",
"elif",
"type",
"==",
"'color'",
":",
"if",
"colormap",
"==",
"'viridis'",
":",
"self",
".",
"set_colormap",
"(",
"'viridis'",
")",
"elif",
"colormap",
"in",
"[",
"None",
",",
"'coolwarm'",
"]",
":",
"self",
".",
"set_colormap",
"(",
"'coolwarm'",
")"
] | 47.530435 | 20.2 |
def bss_eval_images(reference_sources, estimated_sources,
compute_permutation=True):
"""
BSS Eval v3 bss_eval_images
Wrapper to ``bss_eval`` with the right parameters.
"""
return bss_eval(
reference_sources, estimated_sources,
window=np.inf, hop=np.inf,
compute_permutation=compute_permutation, filters_len=512,
framewise_filters=True,
bsseval_sources_version=False) | [
"def",
"bss_eval_images",
"(",
"reference_sources",
",",
"estimated_sources",
",",
"compute_permutation",
"=",
"True",
")",
":",
"return",
"bss_eval",
"(",
"reference_sources",
",",
"estimated_sources",
",",
"window",
"=",
"np",
".",
"inf",
",",
"hop",
"=",
"np",
".",
"inf",
",",
"compute_permutation",
"=",
"compute_permutation",
",",
"filters_len",
"=",
"512",
",",
"framewise_filters",
"=",
"True",
",",
"bsseval_sources_version",
"=",
"False",
")"
] | 31.071429 | 13.785714 |
def get_data_iters_and_vocabs(args: argparse.Namespace,
model_folder: Optional[str]) -> Tuple['data_io.BaseParallelSampleIter',
List[vocab.Vocab], vocab.Vocab, model.ModelConfig]:
"""
Loads the data iterators and vocabularies.
:param args: Arguments as returned by argparse.
:param model_folder: Output folder.
:return: The scoring data iterator as well as the source and target vocabularies.
"""
model_config = model.SockeyeModel.load_config(os.path.join(args.model, C.CONFIG_NAME))
if args.max_seq_len is None:
max_seq_len_source = model_config.config_data.max_seq_len_source
max_seq_len_target = model_config.config_data.max_seq_len_target
else:
max_seq_len_source, max_seq_len_target = args.max_seq_len
batch_num_devices = 1 if args.use_cpu else sum(-di if di < 0 else 1 for di in args.device_ids)
# Load the existing vocabs created when starting the training run.
source_vocabs = vocab.load_source_vocabs(model_folder)
target_vocab = vocab.load_target_vocab(model_folder)
sources = [args.source] + args.source_factors
sources = [str(os.path.abspath(source)) for source in sources]
score_iter = data_io.get_scoring_data_iters(
sources=sources,
target=os.path.abspath(args.target),
source_vocabs=source_vocabs,
target_vocab=target_vocab,
batch_size=args.batch_size,
batch_num_devices=batch_num_devices,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target)
return score_iter, source_vocabs, target_vocab, model_config | [
"def",
"get_data_iters_and_vocabs",
"(",
"args",
":",
"argparse",
".",
"Namespace",
",",
"model_folder",
":",
"Optional",
"[",
"str",
"]",
")",
"->",
"Tuple",
"[",
"'data_io.BaseParallelSampleIter'",
",",
"List",
"[",
"vocab",
".",
"Vocab",
"]",
",",
"vocab",
".",
"Vocab",
",",
"model",
".",
"ModelConfig",
"]",
":",
"model_config",
"=",
"model",
".",
"SockeyeModel",
".",
"load_config",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"model",
",",
"C",
".",
"CONFIG_NAME",
")",
")",
"if",
"args",
".",
"max_seq_len",
"is",
"None",
":",
"max_seq_len_source",
"=",
"model_config",
".",
"config_data",
".",
"max_seq_len_source",
"max_seq_len_target",
"=",
"model_config",
".",
"config_data",
".",
"max_seq_len_target",
"else",
":",
"max_seq_len_source",
",",
"max_seq_len_target",
"=",
"args",
".",
"max_seq_len",
"batch_num_devices",
"=",
"1",
"if",
"args",
".",
"use_cpu",
"else",
"sum",
"(",
"-",
"di",
"if",
"di",
"<",
"0",
"else",
"1",
"for",
"di",
"in",
"args",
".",
"device_ids",
")",
"# Load the existing vocabs created when starting the training run.",
"source_vocabs",
"=",
"vocab",
".",
"load_source_vocabs",
"(",
"model_folder",
")",
"target_vocab",
"=",
"vocab",
".",
"load_target_vocab",
"(",
"model_folder",
")",
"sources",
"=",
"[",
"args",
".",
"source",
"]",
"+",
"args",
".",
"source_factors",
"sources",
"=",
"[",
"str",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"source",
")",
")",
"for",
"source",
"in",
"sources",
"]",
"score_iter",
"=",
"data_io",
".",
"get_scoring_data_iters",
"(",
"sources",
"=",
"sources",
",",
"target",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"target",
")",
",",
"source_vocabs",
"=",
"source_vocabs",
",",
"target_vocab",
"=",
"target_vocab",
",",
"batch_size",
"=",
"args",
".",
"batch_size",
",",
"batch_num_devices",
"=",
"batch_num_devices",
",",
"max_seq_len_source",
"=",
"max_seq_len_source",
",",
"max_seq_len_target",
"=",
"max_seq_len_target",
")",
"return",
"score_iter",
",",
"source_vocabs",
",",
"target_vocab",
",",
"model_config"
] | 42.769231 | 24.512821 |
def write(self, proto):
"""
Writes serialized data to proto object
:param proto: (DynamicStructBuilder) Proto object
"""
proto.filterDim = self.filterDim
proto.outputDim = self.outputDim
proto.batchSize = self.batchSize
lossHistoryProto = proto.init("losses", len(self.losses))
i = 0
for iteration, loss in self.losses.iteritems():
iterationLossHistoryProto = lossHistoryProto[i]
iterationLossHistoryProto.iteration = iteration
iterationLossHistoryProto.loss = float(loss)
i += 1
proto.iteration = self._iteration
proto.basis = list(
self.basis.flatten().astype(type('float', (float,), {}))
)
# training parameters
proto.learningRate = self.learningRate
proto.decayCycle = self.decayCycle
proto.learningRateDecay = self.learningRateDecay
# LCA parameters
proto.numLcaIterations = self.numLcaIterations
proto.lcaLearningRate = self.lcaLearningRate
proto.thresholdDecay = self.thresholdDecay
proto.minThreshold = self.minThreshold
proto.thresholdType = self.thresholdType
# debugging
proto.verbosity = self.verbosity
proto.showEvery = self.showEvery
proto.seed = self.seed | [
"def",
"write",
"(",
"self",
",",
"proto",
")",
":",
"proto",
".",
"filterDim",
"=",
"self",
".",
"filterDim",
"proto",
".",
"outputDim",
"=",
"self",
".",
"outputDim",
"proto",
".",
"batchSize",
"=",
"self",
".",
"batchSize",
"lossHistoryProto",
"=",
"proto",
".",
"init",
"(",
"\"losses\"",
",",
"len",
"(",
"self",
".",
"losses",
")",
")",
"i",
"=",
"0",
"for",
"iteration",
",",
"loss",
"in",
"self",
".",
"losses",
".",
"iteritems",
"(",
")",
":",
"iterationLossHistoryProto",
"=",
"lossHistoryProto",
"[",
"i",
"]",
"iterationLossHistoryProto",
".",
"iteration",
"=",
"iteration",
"iterationLossHistoryProto",
".",
"loss",
"=",
"float",
"(",
"loss",
")",
"i",
"+=",
"1",
"proto",
".",
"iteration",
"=",
"self",
".",
"_iteration",
"proto",
".",
"basis",
"=",
"list",
"(",
"self",
".",
"basis",
".",
"flatten",
"(",
")",
".",
"astype",
"(",
"type",
"(",
"'float'",
",",
"(",
"float",
",",
")",
",",
"{",
"}",
")",
")",
")",
"# training parameters",
"proto",
".",
"learningRate",
"=",
"self",
".",
"learningRate",
"proto",
".",
"decayCycle",
"=",
"self",
".",
"decayCycle",
"proto",
".",
"learningRateDecay",
"=",
"self",
".",
"learningRateDecay",
"# LCA parameters",
"proto",
".",
"numLcaIterations",
"=",
"self",
".",
"numLcaIterations",
"proto",
".",
"lcaLearningRate",
"=",
"self",
".",
"lcaLearningRate",
"proto",
".",
"thresholdDecay",
"=",
"self",
".",
"thresholdDecay",
"proto",
".",
"minThreshold",
"=",
"self",
".",
"minThreshold",
"proto",
".",
"thresholdType",
"=",
"self",
".",
"thresholdType",
"# debugging",
"proto",
".",
"verbosity",
"=",
"self",
".",
"verbosity",
"proto",
".",
"showEvery",
"=",
"self",
".",
"showEvery",
"proto",
".",
"seed",
"=",
"self",
".",
"seed"
] | 29.4 | 16.4 |
def get(self, tag, default=None):
"""Get a metadata value.
Each metadata value is referenced by a ``tag`` -- a short
string such as ``'xlen'`` or ``'audit'``. In the sidecar file
these tag names are prepended with ``'Xmp.pyctools.'``, which
corresponds to a custom namespace in the XML file.
:param str tag: The tag name.
:returns: The metadata value associated with ``tag``.
:rtype: :py:class:`str`
"""
full_tag = 'Xmp.pyctools.' + tag
if full_tag in self.data:
return self.data[full_tag]
return default | [
"def",
"get",
"(",
"self",
",",
"tag",
",",
"default",
"=",
"None",
")",
":",
"full_tag",
"=",
"'Xmp.pyctools.'",
"+",
"tag",
"if",
"full_tag",
"in",
"self",
".",
"data",
":",
"return",
"self",
".",
"data",
"[",
"full_tag",
"]",
"return",
"default"
] | 31.526316 | 19.368421 |
def delete(self, id):
"""Deletes a grant.
Args:
id (str): The id of the custom domain to delete
See: https://auth0.com/docs/api/management/v2#!/Custom_Domains/delete_custom_domains_by_id
"""
url = self._url('%s' % (id))
return self.client.delete(url) | [
"def",
"delete",
"(",
"self",
",",
"id",
")",
":",
"url",
"=",
"self",
".",
"_url",
"(",
"'%s'",
"%",
"(",
"id",
")",
")",
"return",
"self",
".",
"client",
".",
"delete",
"(",
"url",
")"
] | 27.454545 | 22.545455 |
def invoke(self, script_hash, params, **kwargs):
""" Invokes a contract with given parameters and returns the result.
It should be noted that the name of the function invoked in the contract should be part of
paramaters.
:param script_hash: contract script hash
:param params: list of paramaters to be passed in to the smart contract
:type script_hash: str
:type params: list
:return: result of the invocation
:rtype: dictionary
"""
contract_params = encode_invocation_params(params)
raw_result = self._call(
JSONRPCMethods.INVOKE.value, [script_hash, contract_params, ], **kwargs)
return decode_invocation_result(raw_result) | [
"def",
"invoke",
"(",
"self",
",",
"script_hash",
",",
"params",
",",
"*",
"*",
"kwargs",
")",
":",
"contract_params",
"=",
"encode_invocation_params",
"(",
"params",
")",
"raw_result",
"=",
"self",
".",
"_call",
"(",
"JSONRPCMethods",
".",
"INVOKE",
".",
"value",
",",
"[",
"script_hash",
",",
"contract_params",
",",
"]",
",",
"*",
"*",
"kwargs",
")",
"return",
"decode_invocation_result",
"(",
"raw_result",
")"
] | 40.388889 | 20.777778 |
def keywords2marc(self, key, values):
"""Populate the ``695`` MARC field.
Also populates the ``084`` and ``6531`` MARC fields through side effects.
"""
result_695 = self.get('695', [])
result_084 = self.get('084', [])
result_6531 = self.get('6531', [])
for value in values:
schema = value.get('schema')
source = value.get('source')
keyword = value.get('value')
if schema == 'PACS' or schema == 'PDG':
result_084.append({
'2': schema,
'9': source,
'a': keyword,
})
elif schema == 'JACOW':
result_6531.append({
'2': 'JACoW',
'9': source,
'a': keyword,
})
elif schema == 'INSPIRE':
result_695.append({
'2': 'INSPIRE',
'9': source,
'a': keyword,
})
elif schema == 'INIS':
result_695.append({
'2': 'INIS',
'9': source,
'a': keyword,
})
elif source != 'magpie':
result_6531.append({
'9': source,
'a': keyword,
})
self['6531'] = result_6531
self['084'] = result_084
return result_695 | [
"def",
"keywords2marc",
"(",
"self",
",",
"key",
",",
"values",
")",
":",
"result_695",
"=",
"self",
".",
"get",
"(",
"'695'",
",",
"[",
"]",
")",
"result_084",
"=",
"self",
".",
"get",
"(",
"'084'",
",",
"[",
"]",
")",
"result_6531",
"=",
"self",
".",
"get",
"(",
"'6531'",
",",
"[",
"]",
")",
"for",
"value",
"in",
"values",
":",
"schema",
"=",
"value",
".",
"get",
"(",
"'schema'",
")",
"source",
"=",
"value",
".",
"get",
"(",
"'source'",
")",
"keyword",
"=",
"value",
".",
"get",
"(",
"'value'",
")",
"if",
"schema",
"==",
"'PACS'",
"or",
"schema",
"==",
"'PDG'",
":",
"result_084",
".",
"append",
"(",
"{",
"'2'",
":",
"schema",
",",
"'9'",
":",
"source",
",",
"'a'",
":",
"keyword",
",",
"}",
")",
"elif",
"schema",
"==",
"'JACOW'",
":",
"result_6531",
".",
"append",
"(",
"{",
"'2'",
":",
"'JACoW'",
",",
"'9'",
":",
"source",
",",
"'a'",
":",
"keyword",
",",
"}",
")",
"elif",
"schema",
"==",
"'INSPIRE'",
":",
"result_695",
".",
"append",
"(",
"{",
"'2'",
":",
"'INSPIRE'",
",",
"'9'",
":",
"source",
",",
"'a'",
":",
"keyword",
",",
"}",
")",
"elif",
"schema",
"==",
"'INIS'",
":",
"result_695",
".",
"append",
"(",
"{",
"'2'",
":",
"'INIS'",
",",
"'9'",
":",
"source",
",",
"'a'",
":",
"keyword",
",",
"}",
")",
"elif",
"source",
"!=",
"'magpie'",
":",
"result_6531",
".",
"append",
"(",
"{",
"'9'",
":",
"source",
",",
"'a'",
":",
"keyword",
",",
"}",
")",
"self",
"[",
"'6531'",
"]",
"=",
"result_6531",
"self",
"[",
"'084'",
"]",
"=",
"result_084",
"return",
"result_695"
] | 27.276596 | 13.87234 |
def can_mark_block_complete_on_view(self, block):
"""
Returns True if the xblock can be marked complete on view.
This is true of any non-customized, non-scorable, completable block.
"""
return (
XBlockCompletionMode.get_mode(block) == XBlockCompletionMode.COMPLETABLE
and not getattr(block, 'has_custom_completion', False)
and not getattr(block, 'has_score', False)
) | [
"def",
"can_mark_block_complete_on_view",
"(",
"self",
",",
"block",
")",
":",
"return",
"(",
"XBlockCompletionMode",
".",
"get_mode",
"(",
"block",
")",
"==",
"XBlockCompletionMode",
".",
"COMPLETABLE",
"and",
"not",
"getattr",
"(",
"block",
",",
"'has_custom_completion'",
",",
"False",
")",
"and",
"not",
"getattr",
"(",
"block",
",",
"'has_score'",
",",
"False",
")",
")"
] | 44.2 | 21 |
def set(self):
"""Set the color as current OpenGL color
"""
glColor4f(self.r, self.g, self.b, self.a) | [
"def",
"set",
"(",
"self",
")",
":",
"glColor4f",
"(",
"self",
".",
"r",
",",
"self",
".",
"g",
",",
"self",
".",
"b",
",",
"self",
".",
"a",
")"
] | 30.5 | 8.75 |
def getDistinctPairs(self):
"""
Return a set consisting of unique feature/location pairs across all
objects
"""
distinctPairs = set()
for pairs in self.objects.itervalues():
distinctPairs = distinctPairs.union(set(pairs))
return distinctPairs | [
"def",
"getDistinctPairs",
"(",
"self",
")",
":",
"distinctPairs",
"=",
"set",
"(",
")",
"for",
"pairs",
"in",
"self",
".",
"objects",
".",
"itervalues",
"(",
")",
":",
"distinctPairs",
"=",
"distinctPairs",
".",
"union",
"(",
"set",
"(",
"pairs",
")",
")",
"return",
"distinctPairs"
] | 29.777778 | 13.333333 |
def macro_body(self, node, frame, children=None):
"""Dump the function def of a macro or call block."""
frame = self.function_scoping(node, frame, children)
# macros are delayed, they never require output checks
frame.require_output_check = False
args = frame.arguments
# XXX: this is an ugly fix for the loop nesting bug
# (tests.test_old_bugs.test_loop_call_bug). This works around
# a identifier nesting problem we have in general. It's just more
# likely to happen in loops which is why we work around it. The
# real solution would be "nonlocal" all the identifiers that are
# leaking into a new python frame and might be used both unassigned
# and assigned.
if 'loop' in frame.identifiers.declared:
args = args + ['l_loop=l_loop']
self.writeline('def macro(%s):' % ', '.join(args), node)
self.indent()
self.buffer(frame)
self.pull_locals(frame)
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame)
self.outdent()
return frame | [
"def",
"macro_body",
"(",
"self",
",",
"node",
",",
"frame",
",",
"children",
"=",
"None",
")",
":",
"frame",
"=",
"self",
".",
"function_scoping",
"(",
"node",
",",
"frame",
",",
"children",
")",
"# macros are delayed, they never require output checks",
"frame",
".",
"require_output_check",
"=",
"False",
"args",
"=",
"frame",
".",
"arguments",
"# XXX: this is an ugly fix for the loop nesting bug",
"# (tests.test_old_bugs.test_loop_call_bug). This works around",
"# a identifier nesting problem we have in general. It's just more",
"# likely to happen in loops which is why we work around it. The",
"# real solution would be \"nonlocal\" all the identifiers that are",
"# leaking into a new python frame and might be used both unassigned",
"# and assigned.",
"if",
"'loop'",
"in",
"frame",
".",
"identifiers",
".",
"declared",
":",
"args",
"=",
"args",
"+",
"[",
"'l_loop=l_loop'",
"]",
"self",
".",
"writeline",
"(",
"'def macro(%s):'",
"%",
"', '",
".",
"join",
"(",
"args",
")",
",",
"node",
")",
"self",
".",
"indent",
"(",
")",
"self",
".",
"buffer",
"(",
"frame",
")",
"self",
".",
"pull_locals",
"(",
"frame",
")",
"self",
".",
"blockvisit",
"(",
"node",
".",
"body",
",",
"frame",
")",
"self",
".",
"return_buffer_contents",
"(",
"frame",
")",
"self",
".",
"outdent",
"(",
")",
"return",
"frame"
] | 48.130435 | 16.521739 |
def SlotSentinel(*args):
"""Provides exception handling for all slots"""
# (NOTE) davidlatwe
# Thanks to this answer
# https://stackoverflow.com/questions/18740884
if len(args) == 0 or isinstance(args[0], types.FunctionType):
args = []
@QtCore.pyqtSlot(*args)
def slotdecorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args)
except Exception:
traceback.print_exc()
return wrapper
return slotdecorator | [
"def",
"SlotSentinel",
"(",
"*",
"args",
")",
":",
"# (NOTE) davidlatwe",
"# Thanks to this answer",
"# https://stackoverflow.com/questions/18740884",
"if",
"len",
"(",
"args",
")",
"==",
"0",
"or",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"types",
".",
"FunctionType",
")",
":",
"args",
"=",
"[",
"]",
"@",
"QtCore",
".",
"pyqtSlot",
"(",
"*",
"args",
")",
"def",
"slotdecorator",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"func",
"(",
"*",
"args",
")",
"except",
"Exception",
":",
"traceback",
".",
"print_exc",
"(",
")",
"return",
"wrapper",
"return",
"slotdecorator"
] | 24.952381 | 18.904762 |
def filter_rows_as_dict(fname, filter_, **kw):
"""Rewrite a dsv file, filtering the rows.
:param fname: Path to dsv file
:param filter_: callable which accepts a `dict` with a row's data as single argument\
returning a `Boolean` indicating whether to keep the row (`True`) or to discard it \
`False`.
:param kw: Keyword arguments to be passed `UnicodeReader` and `UnicodeWriter`.
:return: The number of rows that have been removed.
"""
filter_ = DictFilter(filter_)
rewrite(fname, filter_, **kw)
return filter_.removed | [
"def",
"filter_rows_as_dict",
"(",
"fname",
",",
"filter_",
",",
"*",
"*",
"kw",
")",
":",
"filter_",
"=",
"DictFilter",
"(",
"filter_",
")",
"rewrite",
"(",
"fname",
",",
"filter_",
",",
"*",
"*",
"kw",
")",
"return",
"filter_",
".",
"removed"
] | 42.384615 | 20.153846 |
def make_flow_labels(graph, flow, capac):
"""Generate arc labels for a flow in a graph with capacities.
:param graph: adjacency list or adjacency dictionary
:param flow: flow matrix or adjacency dictionary
:param capac: capacity matrix or adjacency dictionary
:returns: listdic graph representation, with the arc label strings
"""
V = range(len(graph))
arc_label = [{v:"" for v in graph[u]} for u in V]
for u in V:
for v in graph[u]:
if flow[u][v] >= 0:
arc_label[u][v] = "%s/%s" % (flow[u][v], capac[u][v])
else:
arc_label[u][v] = None # do not show negative flow arcs
return arc_label | [
"def",
"make_flow_labels",
"(",
"graph",
",",
"flow",
",",
"capac",
")",
":",
"V",
"=",
"range",
"(",
"len",
"(",
"graph",
")",
")",
"arc_label",
"=",
"[",
"{",
"v",
":",
"\"\"",
"for",
"v",
"in",
"graph",
"[",
"u",
"]",
"}",
"for",
"u",
"in",
"V",
"]",
"for",
"u",
"in",
"V",
":",
"for",
"v",
"in",
"graph",
"[",
"u",
"]",
":",
"if",
"flow",
"[",
"u",
"]",
"[",
"v",
"]",
">=",
"0",
":",
"arc_label",
"[",
"u",
"]",
"[",
"v",
"]",
"=",
"\"%s/%s\"",
"%",
"(",
"flow",
"[",
"u",
"]",
"[",
"v",
"]",
",",
"capac",
"[",
"u",
"]",
"[",
"v",
"]",
")",
"else",
":",
"arc_label",
"[",
"u",
"]",
"[",
"v",
"]",
"=",
"None",
"# do not show negative flow arcs",
"return",
"arc_label"
] | 39.882353 | 17.529412 |
def publish_queue(self):
"""
Publish all messages that have been added to the queue for configured protocol
:return: None
"""
self.last_send_time = time.time()
try:
self._tx_queue_lock.acquire()
start_length = len(self._rx_queue)
publish_amount = len(self._tx_queue)
if self.config.protocol == PublisherConfig.Protocol.GRPC:
self._publish_queue_grpc()
else:
self._publish_queue_wss()
self._tx_queue = []
finally:
self._tx_queue_lock.release()
if self.config.publish_type == self.config.Type.SYNC:
start_time = time.time()
while time.time() - start_time < self.config.sync_timeout and \
len(self._rx_queue) - start_length < publish_amount:
pass
return self._rx_queue | [
"def",
"publish_queue",
"(",
"self",
")",
":",
"self",
".",
"last_send_time",
"=",
"time",
".",
"time",
"(",
")",
"try",
":",
"self",
".",
"_tx_queue_lock",
".",
"acquire",
"(",
")",
"start_length",
"=",
"len",
"(",
"self",
".",
"_rx_queue",
")",
"publish_amount",
"=",
"len",
"(",
"self",
".",
"_tx_queue",
")",
"if",
"self",
".",
"config",
".",
"protocol",
"==",
"PublisherConfig",
".",
"Protocol",
".",
"GRPC",
":",
"self",
".",
"_publish_queue_grpc",
"(",
")",
"else",
":",
"self",
".",
"_publish_queue_wss",
"(",
")",
"self",
".",
"_tx_queue",
"=",
"[",
"]",
"finally",
":",
"self",
".",
"_tx_queue_lock",
".",
"release",
"(",
")",
"if",
"self",
".",
"config",
".",
"publish_type",
"==",
"self",
".",
"config",
".",
"Type",
".",
"SYNC",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"while",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
"<",
"self",
".",
"config",
".",
"sync_timeout",
"and",
"len",
"(",
"self",
".",
"_rx_queue",
")",
"-",
"start_length",
"<",
"publish_amount",
":",
"pass",
"return",
"self",
".",
"_rx_queue"
] | 37.958333 | 16.208333 |
def connection_with_anon(credentials, anon=True):
"""
Connect to S3 with automatic handling for anonymous access.
Parameters
----------
credentials : dict
AWS access key ('access') and secret access key ('secret')
anon : boolean, optional, default = True
Whether to make an anonymous connection if credentials fail to authenticate
"""
from boto.s3.connection import S3Connection
from boto.exception import NoAuthHandlerFound
try:
conn = S3Connection(aws_access_key_id=credentials['access'],
aws_secret_access_key=credentials['secret'])
return conn
except NoAuthHandlerFound:
if anon:
conn = S3Connection(anon=True)
return conn
else:
raise | [
"def",
"connection_with_anon",
"(",
"credentials",
",",
"anon",
"=",
"True",
")",
":",
"from",
"boto",
".",
"s3",
".",
"connection",
"import",
"S3Connection",
"from",
"boto",
".",
"exception",
"import",
"NoAuthHandlerFound",
"try",
":",
"conn",
"=",
"S3Connection",
"(",
"aws_access_key_id",
"=",
"credentials",
"[",
"'access'",
"]",
",",
"aws_secret_access_key",
"=",
"credentials",
"[",
"'secret'",
"]",
")",
"return",
"conn",
"except",
"NoAuthHandlerFound",
":",
"if",
"anon",
":",
"conn",
"=",
"S3Connection",
"(",
"anon",
"=",
"True",
")",
"return",
"conn",
"else",
":",
"raise"
] | 29.730769 | 21.807692 |
def ValidateRequiredFieldsAreNotEmpty(gtfs_object, required_field_names,
problems=None):
"""
Validates whether all required fields of an object have a value:
- if value empty adds MissingValue errors (if problems accumulator is
provided)
"""
no_missing_value = True
for name in required_field_names:
if IsEmpty(getattr(gtfs_object, name, None)):
if problems:
problems.MissingValue(name)
no_missing_value = False
return no_missing_value | [
"def",
"ValidateRequiredFieldsAreNotEmpty",
"(",
"gtfs_object",
",",
"required_field_names",
",",
"problems",
"=",
"None",
")",
":",
"no_missing_value",
"=",
"True",
"for",
"name",
"in",
"required_field_names",
":",
"if",
"IsEmpty",
"(",
"getattr",
"(",
"gtfs_object",
",",
"name",
",",
"None",
")",
")",
":",
"if",
"problems",
":",
"problems",
".",
"MissingValue",
"(",
"name",
")",
"no_missing_value",
"=",
"False",
"return",
"no_missing_value"
] | 36.142857 | 15 |
def _connect(self, config):
"""Establish a connection with a MySQL database."""
if 'connection_timeout' not in self._config:
self._config['connection_timeout'] = 480
try:
self._cnx = connect(**config)
self._cursor = self._cnx.cursor()
self._printer('\tMySQL DB connection established with db', config['database'])
except Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
raise err | [
"def",
"_connect",
"(",
"self",
",",
"config",
")",
":",
"if",
"'connection_timeout'",
"not",
"in",
"self",
".",
"_config",
":",
"self",
".",
"_config",
"[",
"'connection_timeout'",
"]",
"=",
"480",
"try",
":",
"self",
".",
"_cnx",
"=",
"connect",
"(",
"*",
"*",
"config",
")",
"self",
".",
"_cursor",
"=",
"self",
".",
"_cnx",
".",
"cursor",
"(",
")",
"self",
".",
"_printer",
"(",
"'\\tMySQL DB connection established with db'",
",",
"config",
"[",
"'database'",
"]",
")",
"except",
"Error",
"as",
"err",
":",
"if",
"err",
".",
"errno",
"==",
"errorcode",
".",
"ER_ACCESS_DENIED_ERROR",
":",
"print",
"(",
"\"Something is wrong with your user name or password\"",
")",
"elif",
"err",
".",
"errno",
"==",
"errorcode",
".",
"ER_BAD_DB_ERROR",
":",
"print",
"(",
"\"Database does not exist\"",
")",
"raise",
"err"
] | 47.642857 | 16.571429 |
def plantloopfields(data, commdct):
"""get plantloop fields to diagram it"""
fieldlists = plantloopfieldlists(data)
objkey = 'plantloop'.upper()
return extractfields(data, commdct, objkey, fieldlists) | [
"def",
"plantloopfields",
"(",
"data",
",",
"commdct",
")",
":",
"fieldlists",
"=",
"plantloopfieldlists",
"(",
"data",
")",
"objkey",
"=",
"'plantloop'",
".",
"upper",
"(",
")",
"return",
"extractfields",
"(",
"data",
",",
"commdct",
",",
"objkey",
",",
"fieldlists",
")"
] | 42.4 | 6.8 |
def periodic_callback(self):
"""Periodic cleanup tasks to maintain this adapter, should be called every second. """
if self.stopped:
return
# Check if we should start scanning again
if not self.scanning and len(self.connections.get_connections()) == 0:
self._logger.info("Restarting scan for devices")
self.start_scan(self._active_scan)
self._logger.info("Finished restarting scan for devices") | [
"def",
"periodic_callback",
"(",
"self",
")",
":",
"if",
"self",
".",
"stopped",
":",
"return",
"# Check if we should start scanning again",
"if",
"not",
"self",
".",
"scanning",
"and",
"len",
"(",
"self",
".",
"connections",
".",
"get_connections",
"(",
")",
")",
"==",
"0",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Restarting scan for devices\"",
")",
"self",
".",
"start_scan",
"(",
"self",
".",
"_active_scan",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Finished restarting scan for devices\"",
")"
] | 42.363636 | 21.090909 |
def cross_lists(*sets):
"""Return the cross product of the arguments"""
wheels = [iter(_) for _ in sets]
digits = [next(it) for it in wheels]
while True:
yield digits[:]
for i in range(len(digits)-1, -1, -1):
try:
digits[i] = next(wheels[i])
break
except StopIteration:
wheels[i] = iter(sets[i])
digits[i] = next(wheels[i])
else:
break | [
"def",
"cross_lists",
"(",
"*",
"sets",
")",
":",
"wheels",
"=",
"[",
"iter",
"(",
"_",
")",
"for",
"_",
"in",
"sets",
"]",
"digits",
"=",
"[",
"next",
"(",
"it",
")",
"for",
"it",
"in",
"wheels",
"]",
"while",
"True",
":",
"yield",
"digits",
"[",
":",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"digits",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"try",
":",
"digits",
"[",
"i",
"]",
"=",
"next",
"(",
"wheels",
"[",
"i",
"]",
")",
"break",
"except",
"StopIteration",
":",
"wheels",
"[",
"i",
"]",
"=",
"iter",
"(",
"sets",
"[",
"i",
"]",
")",
"digits",
"[",
"i",
"]",
"=",
"next",
"(",
"wheels",
"[",
"i",
"]",
")",
"else",
":",
"break"
] | 30.733333 | 11.733333 |
def satisfiable(self, extra_constraints=(), exact=None):
"""
This function does a constraint check and checks if the solver is in a sat state.
:param extra_constraints: Extra constraints (as ASTs) to add to s for this solve
:param exact: If False, return approximate solutions.
:return: True if sat, otherwise false
"""
if exact is False and o.VALIDATE_APPROXIMATIONS in self.state.options:
er = self._solver.satisfiable(extra_constraints=self._adjust_constraint_list(extra_constraints))
ar = self._solver.satisfiable(extra_constraints=self._adjust_constraint_list(extra_constraints), exact=False)
if er is True:
assert ar is True
return ar
return self._solver.satisfiable(extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact) | [
"def",
"satisfiable",
"(",
"self",
",",
"extra_constraints",
"=",
"(",
")",
",",
"exact",
"=",
"None",
")",
":",
"if",
"exact",
"is",
"False",
"and",
"o",
".",
"VALIDATE_APPROXIMATIONS",
"in",
"self",
".",
"state",
".",
"options",
":",
"er",
"=",
"self",
".",
"_solver",
".",
"satisfiable",
"(",
"extra_constraints",
"=",
"self",
".",
"_adjust_constraint_list",
"(",
"extra_constraints",
")",
")",
"ar",
"=",
"self",
".",
"_solver",
".",
"satisfiable",
"(",
"extra_constraints",
"=",
"self",
".",
"_adjust_constraint_list",
"(",
"extra_constraints",
")",
",",
"exact",
"=",
"False",
")",
"if",
"er",
"is",
"True",
":",
"assert",
"ar",
"is",
"True",
"return",
"ar",
"return",
"self",
".",
"_solver",
".",
"satisfiable",
"(",
"extra_constraints",
"=",
"self",
".",
"_adjust_constraint_list",
"(",
"extra_constraints",
")",
",",
"exact",
"=",
"exact",
")"
] | 56.375 | 35 |
def _write(self, string):
"""Helper function to call write_data on the provided FTDI device and
verify it succeeds.
"""
# Get modem status. Useful to enable for debugging.
#ret, status = ftdi.poll_modem_status(self._ctx)
#if ret == 0:
# logger.debug('Modem status {0:02X}'.format(status))
#else:
# logger.debug('Modem status error {0}'.format(ret))
length = len(string)
try:
ret = ftdi.write_data(self._ctx, string, length)
except TypeError:
ret = ftdi.write_data(self._ctx, string); #compatible with libFtdi 1.3
# Log the string that was written in a python hex string format using a very
# ugly one-liner list comprehension for brevity.
#logger.debug('Wrote {0}'.format(''.join(['\\x{0:02X}'.format(ord(x)) for x in string])))
if ret < 0:
raise RuntimeError('ftdi_write_data failed with error {0}: {1}'.format(ret, ftdi.get_error_string(self._ctx)))
if ret != length:
raise RuntimeError('ftdi_write_data expected to write {0} bytes but actually wrote {1}!'.format(length, ret)) | [
"def",
"_write",
"(",
"self",
",",
"string",
")",
":",
"# Get modem status. Useful to enable for debugging.",
"#ret, status = ftdi.poll_modem_status(self._ctx)",
"#if ret == 0:",
"#\tlogger.debug('Modem status {0:02X}'.format(status))",
"#else:",
"#\tlogger.debug('Modem status error {0}'.format(ret))",
"length",
"=",
"len",
"(",
"string",
")",
"try",
":",
"ret",
"=",
"ftdi",
".",
"write_data",
"(",
"self",
".",
"_ctx",
",",
"string",
",",
"length",
")",
"except",
"TypeError",
":",
"ret",
"=",
"ftdi",
".",
"write_data",
"(",
"self",
".",
"_ctx",
",",
"string",
")",
"#compatible with libFtdi 1.3",
"# Log the string that was written in a python hex string format using a very",
"# ugly one-liner list comprehension for brevity.",
"#logger.debug('Wrote {0}'.format(''.join(['\\\\x{0:02X}'.format(ord(x)) for x in string])))",
"if",
"ret",
"<",
"0",
":",
"raise",
"RuntimeError",
"(",
"'ftdi_write_data failed with error {0}: {1}'",
".",
"format",
"(",
"ret",
",",
"ftdi",
".",
"get_error_string",
"(",
"self",
".",
"_ctx",
")",
")",
")",
"if",
"ret",
"!=",
"length",
":",
"raise",
"RuntimeError",
"(",
"'ftdi_write_data expected to write {0} bytes but actually wrote {1}!'",
".",
"format",
"(",
"length",
",",
"ret",
")",
")"
] | 51.909091 | 26.454545 |
def float_field_data(field, **kwargs):
"""
Return random value for FloatField
>>> result = any_form_field(forms.FloatField(max_value=200, min_value=100))
>>> type(result)
<type 'str'>
>>> float(result) >=100, float(result) <=200
(True, True)
"""
min_value = 0
max_value = 100
from django.core.validators import MinValueValidator, MaxValueValidator
for elem in field.validators:
if isinstance(elem, MinValueValidator):
min_value = elem.limit_value
if isinstance(elem, MaxValueValidator):
max_value = elem.limit_value
min_value = kwargs.get('min_value', min_value)
max_value = kwargs.get('max_value', max_value)
precision = kwargs.get('precision', 3)
return str(xunit.any_float(min_value=min_value, max_value=max_value, precision=precision)) | [
"def",
"float_field_data",
"(",
"field",
",",
"*",
"*",
"kwargs",
")",
":",
"min_value",
"=",
"0",
"max_value",
"=",
"100",
"from",
"django",
".",
"core",
".",
"validators",
"import",
"MinValueValidator",
",",
"MaxValueValidator",
"for",
"elem",
"in",
"field",
".",
"validators",
":",
"if",
"isinstance",
"(",
"elem",
",",
"MinValueValidator",
")",
":",
"min_value",
"=",
"elem",
".",
"limit_value",
"if",
"isinstance",
"(",
"elem",
",",
"MaxValueValidator",
")",
":",
"max_value",
"=",
"elem",
".",
"limit_value",
"min_value",
"=",
"kwargs",
".",
"get",
"(",
"'min_value'",
",",
"min_value",
")",
"max_value",
"=",
"kwargs",
".",
"get",
"(",
"'max_value'",
",",
"max_value",
")",
"precision",
"=",
"kwargs",
".",
"get",
"(",
"'precision'",
",",
"3",
")",
"return",
"str",
"(",
"xunit",
".",
"any_float",
"(",
"min_value",
"=",
"min_value",
",",
"max_value",
"=",
"max_value",
",",
"precision",
"=",
"precision",
")",
")"
] | 34.291667 | 17.291667 |
def urlencode_params(params):
"""URL encodes the parameters.
:param params: The parameters
:type params: list of key/value tuples.
:rtype: string
"""
# urlencode does not handle unicode strings in Python 2.
# Firstly, normalize the values so they get encoded correctly.
params = [(key, normalize_for_urlencode(val)) for key, val in params]
# Secondly, unquote unreserved chars which are incorrectly quoted
# by urllib.urlencode, causing invalid auth signatures. See GH #72
# for more info.
return requests.utils.unquote_unreserved(urlencode(params)) | [
"def",
"urlencode_params",
"(",
"params",
")",
":",
"# urlencode does not handle unicode strings in Python 2.",
"# Firstly, normalize the values so they get encoded correctly.",
"params",
"=",
"[",
"(",
"key",
",",
"normalize_for_urlencode",
"(",
"val",
")",
")",
"for",
"key",
",",
"val",
"in",
"params",
"]",
"# Secondly, unquote unreserved chars which are incorrectly quoted",
"# by urllib.urlencode, causing invalid auth signatures. See GH #72",
"# for more info.",
"return",
"requests",
".",
"utils",
".",
"unquote_unreserved",
"(",
"urlencode",
"(",
"params",
")",
")"
] | 39 | 20.266667 |
def _set_below(self, v, load=False):
"""
Setter method for below, mapped from YANG variable /rbridge_id/threshold_monitor/interface/policy/area/alert/below (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_below is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_below() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=below.below, is_container='container', presence=False, yang_name="below", rest_name="below", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Below trigger', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """below must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=below.below, is_container='container', presence=False, yang_name="below", rest_name="below", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Below trigger', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)""",
})
self.__below = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_below",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"below",
".",
"below",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"below\"",
",",
"rest_name",
"=",
"\"below\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Below trigger'",
",",
"u'cli-incomplete-no'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-threshold-monitor'",
",",
"defining_module",
"=",
"'brocade-threshold-monitor'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"below must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=below.below, is_container='container', presence=False, yang_name=\"below\", rest_name=\"below\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Below trigger', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__below",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 74.636364 | 35.636364 |
def score(self, X, y, compute=True):
"""Returns the score on the given data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
return self.estimator.score(X, y)
"""
scoring = self.scoring
X = self._check_array(X)
y = self._check_array(y)
if not scoring:
if type(self._postfit_estimator).score == sklearn.base.RegressorMixin.score:
scoring = "r2"
elif (
type(self._postfit_estimator).score
== sklearn.base.ClassifierMixin.score
):
scoring = "accuracy"
else:
scoring = self.scoring
if scoring:
if not dask.is_dask_collection(X) and not dask.is_dask_collection(y):
scorer = sklearn.metrics.get_scorer(scoring)
else:
scorer = get_scorer(scoring, compute=compute)
return scorer(self, X, y)
else:
return self._postfit_estimator.score(X, y) | [
"def",
"score",
"(",
"self",
",",
"X",
",",
"y",
",",
"compute",
"=",
"True",
")",
":",
"scoring",
"=",
"self",
".",
"scoring",
"X",
"=",
"self",
".",
"_check_array",
"(",
"X",
")",
"y",
"=",
"self",
".",
"_check_array",
"(",
"y",
")",
"if",
"not",
"scoring",
":",
"if",
"type",
"(",
"self",
".",
"_postfit_estimator",
")",
".",
"score",
"==",
"sklearn",
".",
"base",
".",
"RegressorMixin",
".",
"score",
":",
"scoring",
"=",
"\"r2\"",
"elif",
"(",
"type",
"(",
"self",
".",
"_postfit_estimator",
")",
".",
"score",
"==",
"sklearn",
".",
"base",
".",
"ClassifierMixin",
".",
"score",
")",
":",
"scoring",
"=",
"\"accuracy\"",
"else",
":",
"scoring",
"=",
"self",
".",
"scoring",
"if",
"scoring",
":",
"if",
"not",
"dask",
".",
"is_dask_collection",
"(",
"X",
")",
"and",
"not",
"dask",
".",
"is_dask_collection",
"(",
"y",
")",
":",
"scorer",
"=",
"sklearn",
".",
"metrics",
".",
"get_scorer",
"(",
"scoring",
")",
"else",
":",
"scorer",
"=",
"get_scorer",
"(",
"scoring",
",",
"compute",
"=",
"compute",
")",
"return",
"scorer",
"(",
"self",
",",
"X",
",",
"y",
")",
"else",
":",
"return",
"self",
".",
"_postfit_estimator",
".",
"score",
"(",
"X",
",",
"y",
")"
] | 33.780488 | 20.121951 |
def info(self):
"""
Prints out information for the loaded database, namely the available tables and the number of entries for each.
"""
t = self.query("SELECT * FROM sqlite_master WHERE type='table'", fmt='table')
all_tables = t['name'].tolist()
print('\nDatabase path: {} \nSQL path: {}\n'.format(self.dbpath, self.sqlpath))
print('Database Inventory')
print('==================')
for table in ['sources'] + [t for t in all_tables if
t not in ['sources', 'sqlite_sequence']]:
x = self.query('select count() from {}'.format(table), fmt='array', fetch='one')
if x is None: continue
print('{}: {}'.format(table.upper(), x[0])) | [
"def",
"info",
"(",
"self",
")",
":",
"t",
"=",
"self",
".",
"query",
"(",
"\"SELECT * FROM sqlite_master WHERE type='table'\"",
",",
"fmt",
"=",
"'table'",
")",
"all_tables",
"=",
"t",
"[",
"'name'",
"]",
".",
"tolist",
"(",
")",
"print",
"(",
"'\\nDatabase path: {} \\nSQL path: {}\\n'",
".",
"format",
"(",
"self",
".",
"dbpath",
",",
"self",
".",
"sqlpath",
")",
")",
"print",
"(",
"'Database Inventory'",
")",
"print",
"(",
"'=================='",
")",
"for",
"table",
"in",
"[",
"'sources'",
"]",
"+",
"[",
"t",
"for",
"t",
"in",
"all_tables",
"if",
"t",
"not",
"in",
"[",
"'sources'",
",",
"'sqlite_sequence'",
"]",
"]",
":",
"x",
"=",
"self",
".",
"query",
"(",
"'select count() from {}'",
".",
"format",
"(",
"table",
")",
",",
"fmt",
"=",
"'array'",
",",
"fetch",
"=",
"'one'",
")",
"if",
"x",
"is",
"None",
":",
"continue",
"print",
"(",
"'{}: {}'",
".",
"format",
"(",
"table",
".",
"upper",
"(",
")",
",",
"x",
"[",
"0",
"]",
")",
")"
] | 53.928571 | 24.071429 |
def list_events(self, cond, cols, fields):
"""
Return the list of events, with a specific order and filtered by a condition.
An element of the list is a tuple with three component. The first is the main
attribute (first field). The second the second field/label, usually a string
that identify the service. The third is a dictionary with a key-tuple composed
by all other fields and values indicating the number of events associated.
"""
def insert_row():
"""
Internal function to flush results for a single tabkey to result list.
"""
row = list(row_template)
j = 0
for n in range(cols):
if row[n] is None:
if j == keylen:
row[n] = tabvalues
else:
row[n] = tabkey[j]
j += 1
reslist.append(row)
if not self.results:
return []
# Set local variables
results = self.results
pos = [self.key_gids.index(gid) for gid in fields if gid[0] != '"']
has_cond = cond != "*"
# If a condition is passed then compile a pattern matching object
if has_cond:
match = re.search("(\w+)(!=|==)\"([^\"]*)\"", cond)
condpos = self.key_gids.index(match.group(1))
invert = (match.group(2) == '!=')
recond = re.compile(match.group(3))
else:
recond = condpos = None
# Define the row template with places for values and fixed strings
row_template = []
for i in range(cols):
if fields[i][0] == '"':
row_template.append(fields[i].strip('"'))
else:
row_template.append(None)
# Set the processing table and reduced key length
keylen = len(pos) - (len(fields) - cols) - 1
tabvalues = dict()
tabkey = None
reslist = []
for key in sorted(results, key=lambda x: x[pos[0]]):
# Skip results that don't satisfy the condition
if has_cond:
try:
match = recond.search(key[condpos])
except TypeError:
continue
if ((match is None) and not invert) or ((match is not None) and invert):
continue
new_tabkey = [key[pos[i]] for i in range(keylen)]
if tabkey is None:
tabkey = new_tabkey
elif tabkey != new_tabkey:
insert_row()
tabvalues = dict()
tabkey = [key[pos[i]] for i in range(keylen)]
value = tuple([key[k] for k in pos[keylen:]])
if value in tabvalues:
tabvalues[value] += results[key]
else:
tabvalues[value] = results[key]
if tabvalues:
insert_row()
return reslist | [
"def",
"list_events",
"(",
"self",
",",
"cond",
",",
"cols",
",",
"fields",
")",
":",
"def",
"insert_row",
"(",
")",
":",
"\"\"\"\r\n Internal function to flush results for a single tabkey to result list.\r\n \"\"\"",
"row",
"=",
"list",
"(",
"row_template",
")",
"j",
"=",
"0",
"for",
"n",
"in",
"range",
"(",
"cols",
")",
":",
"if",
"row",
"[",
"n",
"]",
"is",
"None",
":",
"if",
"j",
"==",
"keylen",
":",
"row",
"[",
"n",
"]",
"=",
"tabvalues",
"else",
":",
"row",
"[",
"n",
"]",
"=",
"tabkey",
"[",
"j",
"]",
"j",
"+=",
"1",
"reslist",
".",
"append",
"(",
"row",
")",
"if",
"not",
"self",
".",
"results",
":",
"return",
"[",
"]",
"# Set local variables\r",
"results",
"=",
"self",
".",
"results",
"pos",
"=",
"[",
"self",
".",
"key_gids",
".",
"index",
"(",
"gid",
")",
"for",
"gid",
"in",
"fields",
"if",
"gid",
"[",
"0",
"]",
"!=",
"'\"'",
"]",
"has_cond",
"=",
"cond",
"!=",
"\"*\"",
"# If a condition is passed then compile a pattern matching object\r",
"if",
"has_cond",
":",
"match",
"=",
"re",
".",
"search",
"(",
"\"(\\w+)(!=|==)\\\"([^\\\"]*)\\\"\"",
",",
"cond",
")",
"condpos",
"=",
"self",
".",
"key_gids",
".",
"index",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"invert",
"=",
"(",
"match",
".",
"group",
"(",
"2",
")",
"==",
"'!='",
")",
"recond",
"=",
"re",
".",
"compile",
"(",
"match",
".",
"group",
"(",
"3",
")",
")",
"else",
":",
"recond",
"=",
"condpos",
"=",
"None",
"# Define the row template with places for values and fixed strings\r",
"row_template",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"cols",
")",
":",
"if",
"fields",
"[",
"i",
"]",
"[",
"0",
"]",
"==",
"'\"'",
":",
"row_template",
".",
"append",
"(",
"fields",
"[",
"i",
"]",
".",
"strip",
"(",
"'\"'",
")",
")",
"else",
":",
"row_template",
".",
"append",
"(",
"None",
")",
"# Set the processing table and reduced key length\r",
"keylen",
"=",
"len",
"(",
"pos",
")",
"-",
"(",
"len",
"(",
"fields",
")",
"-",
"cols",
")",
"-",
"1",
"tabvalues",
"=",
"dict",
"(",
")",
"tabkey",
"=",
"None",
"reslist",
"=",
"[",
"]",
"for",
"key",
"in",
"sorted",
"(",
"results",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"pos",
"[",
"0",
"]",
"]",
")",
":",
"# Skip results that don't satisfy the condition\r",
"if",
"has_cond",
":",
"try",
":",
"match",
"=",
"recond",
".",
"search",
"(",
"key",
"[",
"condpos",
"]",
")",
"except",
"TypeError",
":",
"continue",
"if",
"(",
"(",
"match",
"is",
"None",
")",
"and",
"not",
"invert",
")",
"or",
"(",
"(",
"match",
"is",
"not",
"None",
")",
"and",
"invert",
")",
":",
"continue",
"new_tabkey",
"=",
"[",
"key",
"[",
"pos",
"[",
"i",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"keylen",
")",
"]",
"if",
"tabkey",
"is",
"None",
":",
"tabkey",
"=",
"new_tabkey",
"elif",
"tabkey",
"!=",
"new_tabkey",
":",
"insert_row",
"(",
")",
"tabvalues",
"=",
"dict",
"(",
")",
"tabkey",
"=",
"[",
"key",
"[",
"pos",
"[",
"i",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"keylen",
")",
"]",
"value",
"=",
"tuple",
"(",
"[",
"key",
"[",
"k",
"]",
"for",
"k",
"in",
"pos",
"[",
"keylen",
":",
"]",
"]",
")",
"if",
"value",
"in",
"tabvalues",
":",
"tabvalues",
"[",
"value",
"]",
"+=",
"results",
"[",
"key",
"]",
"else",
":",
"tabvalues",
"[",
"value",
"]",
"=",
"results",
"[",
"key",
"]",
"if",
"tabvalues",
":",
"insert_row",
"(",
")",
"return",
"reslist"
] | 36.54878 | 18.670732 |
def close_client_stream(client_stream, unix_path):
""" Closes provided client stream """
try:
client_stream.shutdown(socket.SHUT_RDWR)
if unix_path:
logger.debug('%s: Connection closed', unix_path)
else:
peer = client_stream.getpeername()
logger.debug('%s:%s: Connection closed', peer[0], peer[1])
except (socket.error, OSError) as exception:
logger.debug('Connection closing error: %s', exception)
client_stream.close() | [
"def",
"close_client_stream",
"(",
"client_stream",
",",
"unix_path",
")",
":",
"try",
":",
"client_stream",
".",
"shutdown",
"(",
"socket",
".",
"SHUT_RDWR",
")",
"if",
"unix_path",
":",
"logger",
".",
"debug",
"(",
"'%s: Connection closed'",
",",
"unix_path",
")",
"else",
":",
"peer",
"=",
"client_stream",
".",
"getpeername",
"(",
")",
"logger",
".",
"debug",
"(",
"'%s:%s: Connection closed'",
",",
"peer",
"[",
"0",
"]",
",",
"peer",
"[",
"1",
"]",
")",
"except",
"(",
"socket",
".",
"error",
",",
"OSError",
")",
"as",
"exception",
":",
"logger",
".",
"debug",
"(",
"'Connection closing error: %s'",
",",
"exception",
")",
"client_stream",
".",
"close",
"(",
")"
] | 41.083333 | 16.5 |
def all_network_files():
"""All network files"""
# TODO: list explicitly since some are missing?
network_types = [
'AND-circle',
'MAJ-specialized',
'MAJ-complete',
'iit-3.0-modular'
]
network_sizes = range(5, 8)
network_files = []
for n in network_sizes:
for t in network_types:
network_files.append('{}-{}'.format(n, t))
return network_files | [
"def",
"all_network_files",
"(",
")",
":",
"# TODO: list explicitly since some are missing?",
"network_types",
"=",
"[",
"'AND-circle'",
",",
"'MAJ-specialized'",
",",
"'MAJ-complete'",
",",
"'iit-3.0-modular'",
"]",
"network_sizes",
"=",
"range",
"(",
"5",
",",
"8",
")",
"network_files",
"=",
"[",
"]",
"for",
"n",
"in",
"network_sizes",
":",
"for",
"t",
"in",
"network_types",
":",
"network_files",
".",
"append",
"(",
"'{}-{}'",
".",
"format",
"(",
"n",
",",
"t",
")",
")",
"return",
"network_files"
] | 27.466667 | 15 |
def get_pmap_from_nrml(oqparam, fname):
"""
:param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:param fname:
an XML file containing hazard curves
:returns:
site mesh, curve array
"""
hcurves_by_imt = {}
oqparam.hazard_imtls = imtls = {}
for hcurves in nrml.read(fname):
imt = hcurves['IMT']
oqparam.investigation_time = hcurves['investigationTime']
if imt == 'SA':
imt += '(%s)' % hcurves['saPeriod']
imtls[imt] = ~hcurves.IMLs
data = sorted((~node.Point.pos, ~node.poEs) for node in hcurves[1:])
hcurves_by_imt[imt] = numpy.array([d[1] for d in data])
lons, lats = [], []
for xy, poes in data:
lons.append(xy[0])
lats.append(xy[1])
mesh = geo.Mesh(numpy.array(lons), numpy.array(lats))
num_levels = sum(len(v) for v in imtls.values())
array = numpy.zeros((len(mesh), num_levels))
imtls = DictArray(imtls)
for imt_ in hcurves_by_imt:
array[:, imtls(imt_)] = hcurves_by_imt[imt_]
return mesh, ProbabilityMap.from_array(array, range(len(mesh))) | [
"def",
"get_pmap_from_nrml",
"(",
"oqparam",
",",
"fname",
")",
":",
"hcurves_by_imt",
"=",
"{",
"}",
"oqparam",
".",
"hazard_imtls",
"=",
"imtls",
"=",
"{",
"}",
"for",
"hcurves",
"in",
"nrml",
".",
"read",
"(",
"fname",
")",
":",
"imt",
"=",
"hcurves",
"[",
"'IMT'",
"]",
"oqparam",
".",
"investigation_time",
"=",
"hcurves",
"[",
"'investigationTime'",
"]",
"if",
"imt",
"==",
"'SA'",
":",
"imt",
"+=",
"'(%s)'",
"%",
"hcurves",
"[",
"'saPeriod'",
"]",
"imtls",
"[",
"imt",
"]",
"=",
"~",
"hcurves",
".",
"IMLs",
"data",
"=",
"sorted",
"(",
"(",
"~",
"node",
".",
"Point",
".",
"pos",
",",
"~",
"node",
".",
"poEs",
")",
"for",
"node",
"in",
"hcurves",
"[",
"1",
":",
"]",
")",
"hcurves_by_imt",
"[",
"imt",
"]",
"=",
"numpy",
".",
"array",
"(",
"[",
"d",
"[",
"1",
"]",
"for",
"d",
"in",
"data",
"]",
")",
"lons",
",",
"lats",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"xy",
",",
"poes",
"in",
"data",
":",
"lons",
".",
"append",
"(",
"xy",
"[",
"0",
"]",
")",
"lats",
".",
"append",
"(",
"xy",
"[",
"1",
"]",
")",
"mesh",
"=",
"geo",
".",
"Mesh",
"(",
"numpy",
".",
"array",
"(",
"lons",
")",
",",
"numpy",
".",
"array",
"(",
"lats",
")",
")",
"num_levels",
"=",
"sum",
"(",
"len",
"(",
"v",
")",
"for",
"v",
"in",
"imtls",
".",
"values",
"(",
")",
")",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"len",
"(",
"mesh",
")",
",",
"num_levels",
")",
")",
"imtls",
"=",
"DictArray",
"(",
"imtls",
")",
"for",
"imt_",
"in",
"hcurves_by_imt",
":",
"array",
"[",
":",
",",
"imtls",
"(",
"imt_",
")",
"]",
"=",
"hcurves_by_imt",
"[",
"imt_",
"]",
"return",
"mesh",
",",
"ProbabilityMap",
".",
"from_array",
"(",
"array",
",",
"range",
"(",
"len",
"(",
"mesh",
")",
")",
")"
] | 37.066667 | 14.066667 |
def get_queue(name='default', default_timeout=None, is_async=None,
autocommit=None, connection=None, queue_class=None, job_class=None, **kwargs):
"""
Returns an rq Queue using parameters defined in ``RQ_QUEUES``
"""
from .settings import QUEUES
if kwargs.get('async') is not None:
is_async = kwargs['async']
warnings.warn('The `async` keyword is deprecated. Use `is_async` instead', DeprecationWarning)
# If is_async is provided, use it, otherwise, get it from the configuration
if is_async is None:
is_async = QUEUES[name].get('ASYNC', True)
# same for job_class
job_class = get_job_class(job_class)
if default_timeout is None:
default_timeout = QUEUES[name].get('DEFAULT_TIMEOUT')
if connection is None:
connection = get_connection(name)
queue_class = get_queue_class(QUEUES[name], queue_class)
return queue_class(name, default_timeout=default_timeout,
connection=connection, is_async=is_async,
job_class=job_class, autocommit=autocommit, **kwargs) | [
"def",
"get_queue",
"(",
"name",
"=",
"'default'",
",",
"default_timeout",
"=",
"None",
",",
"is_async",
"=",
"None",
",",
"autocommit",
"=",
"None",
",",
"connection",
"=",
"None",
",",
"queue_class",
"=",
"None",
",",
"job_class",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"settings",
"import",
"QUEUES",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
"is",
"not",
"None",
":",
"is_async",
"=",
"kwargs",
"[",
"'async'",
"]",
"warnings",
".",
"warn",
"(",
"'The `async` keyword is deprecated. Use `is_async` instead'",
",",
"DeprecationWarning",
")",
"# If is_async is provided, use it, otherwise, get it from the configuration",
"if",
"is_async",
"is",
"None",
":",
"is_async",
"=",
"QUEUES",
"[",
"name",
"]",
".",
"get",
"(",
"'ASYNC'",
",",
"True",
")",
"# same for job_class",
"job_class",
"=",
"get_job_class",
"(",
"job_class",
")",
"if",
"default_timeout",
"is",
"None",
":",
"default_timeout",
"=",
"QUEUES",
"[",
"name",
"]",
".",
"get",
"(",
"'DEFAULT_TIMEOUT'",
")",
"if",
"connection",
"is",
"None",
":",
"connection",
"=",
"get_connection",
"(",
"name",
")",
"queue_class",
"=",
"get_queue_class",
"(",
"QUEUES",
"[",
"name",
"]",
",",
"queue_class",
")",
"return",
"queue_class",
"(",
"name",
",",
"default_timeout",
"=",
"default_timeout",
",",
"connection",
"=",
"connection",
",",
"is_async",
"=",
"is_async",
",",
"job_class",
"=",
"job_class",
",",
"autocommit",
"=",
"autocommit",
",",
"*",
"*",
"kwargs",
")"
] | 43.24 | 21.08 |
def one_to_many(df, unitcol, manycol):
"""
Assert that a many-to-one relationship is preserved between two
columns. For example, a retail store will have have distinct
departments, each with several employees. If each employee may
only work in a single department, then the relationship of the
department to the employees is one to many.
Parameters
==========
df : DataFrame
unitcol : str
The column that encapulates the groups in ``manycol``.
manycol : str
The column that must remain unique in the distict pairs
between ``manycol`` and ``unitcol``
Returns
=======
df : DataFrame
"""
subset = df[[manycol, unitcol]].drop_duplicates()
for many in subset[manycol].unique():
if subset[subset[manycol] == many].shape[0] > 1:
msg = "{} in {} has multiple values for {}".format(many, manycol, unitcol)
raise AssertionError(msg)
return df | [
"def",
"one_to_many",
"(",
"df",
",",
"unitcol",
",",
"manycol",
")",
":",
"subset",
"=",
"df",
"[",
"[",
"manycol",
",",
"unitcol",
"]",
"]",
".",
"drop_duplicates",
"(",
")",
"for",
"many",
"in",
"subset",
"[",
"manycol",
"]",
".",
"unique",
"(",
")",
":",
"if",
"subset",
"[",
"subset",
"[",
"manycol",
"]",
"==",
"many",
"]",
".",
"shape",
"[",
"0",
"]",
">",
"1",
":",
"msg",
"=",
"\"{} in {} has multiple values for {}\"",
".",
"format",
"(",
"many",
",",
"manycol",
",",
"unitcol",
")",
"raise",
"AssertionError",
"(",
"msg",
")",
"return",
"df"
] | 32.275862 | 21.586207 |
def update_firmware(filename,
host=None,
admin_username=None,
admin_password=None):
'''
Updates firmware using local firmware file
.. code-block:: bash
salt dell dracr.update_firmware firmware.exe
This executes the following command on your FX2
(using username and password stored in the pillar data)
.. code-block:: bash
racadm update –f firmware.exe -u user –p pass
'''
if os.path.exists(filename):
return _update_firmware('update -f {0}'.format(filename),
host=None,
admin_username=None,
admin_password=None)
else:
raise CommandExecutionError('Unable to find firmware file {0}'
.format(filename)) | [
"def",
"update_firmware",
"(",
"filename",
",",
"host",
"=",
"None",
",",
"admin_username",
"=",
"None",
",",
"admin_password",
"=",
"None",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"return",
"_update_firmware",
"(",
"'update -f {0}'",
".",
"format",
"(",
"filename",
")",
",",
"host",
"=",
"None",
",",
"admin_username",
"=",
"None",
",",
"admin_password",
"=",
"None",
")",
"else",
":",
"raise",
"CommandExecutionError",
"(",
"'Unable to find firmware file {0}'",
".",
"format",
"(",
"filename",
")",
")"
] | 31.148148 | 20.62963 |
def related_records_78708(self, key, value):
"""Populate the ``related_records`` key."""
record = get_record_ref(maybe_int(value.get('w')), 'literature')
if record:
return {
'curated_relation': record is not None,
'record': record,
'relation_freetext': value.get('i'),
} | [
"def",
"related_records_78708",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"record",
"=",
"get_record_ref",
"(",
"maybe_int",
"(",
"value",
".",
"get",
"(",
"'w'",
")",
")",
",",
"'literature'",
")",
"if",
"record",
":",
"return",
"{",
"'curated_relation'",
":",
"record",
"is",
"not",
"None",
",",
"'record'",
":",
"record",
",",
"'relation_freetext'",
":",
"value",
".",
"get",
"(",
"'i'",
")",
",",
"}"
] | 36.222222 | 15.888889 |
def basic_distance(trig_pin, echo_pin, celsius=20):
'''Return an unformatted distance in cm's as read directly from
RPi.GPIO.'''
speed_of_sound = 331.3 * math.sqrt(1+(celsius / 273.15))
GPIO.setup(trig_pin, GPIO.OUT)
GPIO.setup(echo_pin, GPIO.IN)
GPIO.output(trig_pin, GPIO.LOW)
time.sleep(0.1)
GPIO.output(trig_pin, True)
time.sleep(0.00001)
GPIO.output(trig_pin, False)
echo_status_counter = 1
while GPIO.input(echo_pin) == 0:
if echo_status_counter < 1000:
sonar_signal_off = time.time()
echo_status_counter += 1
else:
raise SystemError('Echo pulse was not received')
while GPIO.input(echo_pin) == 1:
sonar_signal_on = time.time()
time_passed = sonar_signal_on - sonar_signal_off
return time_passed * ((speed_of_sound * 100) / 2) | [
"def",
"basic_distance",
"(",
"trig_pin",
",",
"echo_pin",
",",
"celsius",
"=",
"20",
")",
":",
"speed_of_sound",
"=",
"331.3",
"*",
"math",
".",
"sqrt",
"(",
"1",
"+",
"(",
"celsius",
"/",
"273.15",
")",
")",
"GPIO",
".",
"setup",
"(",
"trig_pin",
",",
"GPIO",
".",
"OUT",
")",
"GPIO",
".",
"setup",
"(",
"echo_pin",
",",
"GPIO",
".",
"IN",
")",
"GPIO",
".",
"output",
"(",
"trig_pin",
",",
"GPIO",
".",
"LOW",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"GPIO",
".",
"output",
"(",
"trig_pin",
",",
"True",
")",
"time",
".",
"sleep",
"(",
"0.00001",
")",
"GPIO",
".",
"output",
"(",
"trig_pin",
",",
"False",
")",
"echo_status_counter",
"=",
"1",
"while",
"GPIO",
".",
"input",
"(",
"echo_pin",
")",
"==",
"0",
":",
"if",
"echo_status_counter",
"<",
"1000",
":",
"sonar_signal_off",
"=",
"time",
".",
"time",
"(",
")",
"echo_status_counter",
"+=",
"1",
"else",
":",
"raise",
"SystemError",
"(",
"'Echo pulse was not received'",
")",
"while",
"GPIO",
".",
"input",
"(",
"echo_pin",
")",
"==",
"1",
":",
"sonar_signal_on",
"=",
"time",
".",
"time",
"(",
")",
"time_passed",
"=",
"sonar_signal_on",
"-",
"sonar_signal_off",
"return",
"time_passed",
"*",
"(",
"(",
"speed_of_sound",
"*",
"100",
")",
"/",
"2",
")"
] | 34.625 | 14.125 |
def find(self, name, menu=None):
"""
Finds a menu item by name and returns it.
:param name:
The menu item name.
"""
menu = menu or self.menu
for i in menu:
if i.name == name:
return i
else:
if i.childs:
ret_item = self.find(name, menu=i.childs)
if ret_item:
return ret_item | [
"def",
"find",
"(",
"self",
",",
"name",
",",
"menu",
"=",
"None",
")",
":",
"menu",
"=",
"menu",
"or",
"self",
".",
"menu",
"for",
"i",
"in",
"menu",
":",
"if",
"i",
".",
"name",
"==",
"name",
":",
"return",
"i",
"else",
":",
"if",
"i",
".",
"childs",
":",
"ret_item",
"=",
"self",
".",
"find",
"(",
"name",
",",
"menu",
"=",
"i",
".",
"childs",
")",
"if",
"ret_item",
":",
"return",
"ret_item"
] | 28.1875 | 12.4375 |
def emails_parse(emails_dict):
"""
Parse the output of ``SESConnection.list_verified_emails()`` and get
a list of emails.
"""
result = emails_dict['ListVerifiedEmailAddressesResponse'][
'ListVerifiedEmailAddressesResult']
emails = [email for email in result['VerifiedEmailAddresses']]
return sorted(emails) | [
"def",
"emails_parse",
"(",
"emails_dict",
")",
":",
"result",
"=",
"emails_dict",
"[",
"'ListVerifiedEmailAddressesResponse'",
"]",
"[",
"'ListVerifiedEmailAddressesResult'",
"]",
"emails",
"=",
"[",
"email",
"for",
"email",
"in",
"result",
"[",
"'VerifiedEmailAddresses'",
"]",
"]",
"return",
"sorted",
"(",
"emails",
")"
] | 33.4 | 16.8 |
def generate_validation_function(self, uri, name):
"""
Generate validation function for given uri with given name
"""
self._validation_functions_done.add(uri)
self.l('')
with self._resolver.resolving(uri) as definition:
with self.l('def {}(data):', name):
self.generate_func_code_block(definition, 'data', 'data', clear_variables=True)
self.l('return data') | [
"def",
"generate_validation_function",
"(",
"self",
",",
"uri",
",",
"name",
")",
":",
"self",
".",
"_validation_functions_done",
".",
"add",
"(",
"uri",
")",
"self",
".",
"l",
"(",
"''",
")",
"with",
"self",
".",
"_resolver",
".",
"resolving",
"(",
"uri",
")",
"as",
"definition",
":",
"with",
"self",
".",
"l",
"(",
"'def {}(data):'",
",",
"name",
")",
":",
"self",
".",
"generate_func_code_block",
"(",
"definition",
",",
"'data'",
",",
"'data'",
",",
"clear_variables",
"=",
"True",
")",
"self",
".",
"l",
"(",
"'return data'",
")"
] | 44 | 14.8 |
def apply_trapping(self, outlets):
"""
Apply trapping based on algorithm described by Y. Masson [1].
It is applied as a post-process and runs the percolation algorithm in
reverse assessing the occupancy of pore neighbors. Consider the
following scenario when running standard IP without trapping,
3 situations can happen after each invasion step:
The number of defending clusters stays the same and clusters can
shrink
A cluster of size one is suppressed
A cluster is split into multiple clusters
In reverse the following opposite situations can happen:
The number of defending clusters stays the same and clusters can
grow
A cluster of size one is created
Mutliple clusters merge into one cluster
With trapping the reversed rules are adjusted so that:
Only clusters that do not connect to a sink can grow and merge.
At the point that a neighbor connected to a sink is touched the
trapped cluster stops growing as this is the point of trapping in
forward invasion time.
Logger info displays the invasion sequence and pore index and a message
with condition number based on the modified trapping rules and the
assignment of the pore to a given cluster.
Initially all invaded pores are given cluster label -1
Outlets / Sinks are given -2
New clusters that grow into fully trapped clusters are either
identified at the point of breakthrough or grow from nothing if the
full invasion sequence is run, they are assigned numbers from 0 up.
Ref:
[1] Masson, Y., 2016. A fast two-step algorithm for invasion
percolation with trapping. Computers & Geosciences, 90, pp.41-48
Parameters
----------
outlets : list or array of pore indices for defending fluid to escape
through
Returns
-------
Creates a throat array called 'pore.clusters' in the Algorithm
dictionary. Any positive number is a trapped cluster
Also creates 2 boolean arrays Np and Nt long called '<element>.trapped'
"""
# First see if network is fully invaded
net = self.project.network
invaded_ps = self['pore.invasion_sequence'] > -1
if ~np.all(invaded_ps):
# Put defending phase into clusters
clusters = net.find_clusters2(~invaded_ps)
# Identify clusters that are connected to an outlet and set to -2
# -1 is the invaded fluid
# -2 is the defender fluid able to escape
# All others now trapped clusters which grow as invasion is reversed
out_clusters = sp.unique(clusters[outlets])
for c in out_clusters:
if c >= 0:
clusters[clusters == c] = -2
else:
# Go from end
clusters = np.ones(net.Np, dtype=int)*-1
clusters[outlets] = -2
# Turn into a list for indexing
inv_seq = np.vstack((self['pore.invasion_sequence'].astype(int),
np.arange(0, net.Np, dtype=int))).T
# Reverse sort list
inv_seq = inv_seq[inv_seq[:, 0].argsort()][::-1]
next_cluster_num = np.max(clusters)+1
# For all the steps after the inlets are set up to break-through
# Reverse the sequence and assess the neighbors cluster state
stopped_clusters = np.zeros(net.Np, dtype=bool)
all_neighbors = net.find_neighbor_pores(net.pores(), flatten=False,
include_input=True)
for un_seq, pore in inv_seq:
if pore not in outlets and un_seq > 0: # Skip inlets and outlets
nc = clusters[all_neighbors[pore]] # Neighboring clusters
unique_ns = np.unique(nc[nc != -1]) # Unique Neighbors
seq_pore = "S:"+str(un_seq)+" P:"+str(pore)
if np.all(nc == -1):
# This is the start of a new trapped cluster
clusters[pore] = next_cluster_num
next_cluster_num += 1
msg = (seq_pore+" C:1 new cluster number: " +
str(clusters[pore]))
logger.info(msg)
elif len(unique_ns) == 1:
# Grow the only connected neighboring cluster
if not stopped_clusters[unique_ns[0]]:
clusters[pore] = unique_ns[0]
msg = (seq_pore+" C:2 joins cluster number: " +
str(clusters[pore]))
logger.info(msg)
else:
clusters[pore] = -2
elif -2 in unique_ns:
# We have reached a sink neighbor, stop growing cluster
msg = (seq_pore+" C:3 joins sink cluster")
logger.info(msg)
clusters[pore] = -2
# Stop growth and merging
stopped_clusters[unique_ns[unique_ns > -1]] = True
else:
# We might be able to do some merging
# Check if any stopped clusters are neighbors
if np.any(stopped_clusters[unique_ns]):
msg = (seq_pore+" C:4 joins sink cluster")
logger.info(msg)
clusters[pore] = -2
# Stop growing all neighboring clusters
stopped_clusters[unique_ns] = True
else:
# Merge multiple un-stopped trapped clusters
new_num = unique_ns[0]
clusters[pore] = new_num
for c in unique_ns:
clusters[clusters == c] = new_num
msg = (seq_pore + " C:5 merge clusters: " +
str(c) + " into "+str(new_num))
logger.info(msg)
# And now return clusters
self['pore.clusters'] = clusters
logger.info("Number of trapped clusters" +
str(np.sum(np.unique(clusters) >= 0)))
self['pore.trapped'] = self['pore.clusters'] > -1
trapped_ts = net.find_neighbor_throats(self['pore.trapped'])
self['throat.trapped'] = np.zeros([net.Nt], dtype=bool)
self['throat.trapped'][trapped_ts] = True
self['pore.invasion_sequence'][self['pore.trapped']] = -1
self['throat.invasion_sequence'][self['throat.trapped']] = -1 | [
"def",
"apply_trapping",
"(",
"self",
",",
"outlets",
")",
":",
"# First see if network is fully invaded",
"net",
"=",
"self",
".",
"project",
".",
"network",
"invaded_ps",
"=",
"self",
"[",
"'pore.invasion_sequence'",
"]",
">",
"-",
"1",
"if",
"~",
"np",
".",
"all",
"(",
"invaded_ps",
")",
":",
"# Put defending phase into clusters",
"clusters",
"=",
"net",
".",
"find_clusters2",
"(",
"~",
"invaded_ps",
")",
"# Identify clusters that are connected to an outlet and set to -2",
"# -1 is the invaded fluid",
"# -2 is the defender fluid able to escape",
"# All others now trapped clusters which grow as invasion is reversed",
"out_clusters",
"=",
"sp",
".",
"unique",
"(",
"clusters",
"[",
"outlets",
"]",
")",
"for",
"c",
"in",
"out_clusters",
":",
"if",
"c",
">=",
"0",
":",
"clusters",
"[",
"clusters",
"==",
"c",
"]",
"=",
"-",
"2",
"else",
":",
"# Go from end",
"clusters",
"=",
"np",
".",
"ones",
"(",
"net",
".",
"Np",
",",
"dtype",
"=",
"int",
")",
"*",
"-",
"1",
"clusters",
"[",
"outlets",
"]",
"=",
"-",
"2",
"# Turn into a list for indexing",
"inv_seq",
"=",
"np",
".",
"vstack",
"(",
"(",
"self",
"[",
"'pore.invasion_sequence'",
"]",
".",
"astype",
"(",
"int",
")",
",",
"np",
".",
"arange",
"(",
"0",
",",
"net",
".",
"Np",
",",
"dtype",
"=",
"int",
")",
")",
")",
".",
"T",
"# Reverse sort list",
"inv_seq",
"=",
"inv_seq",
"[",
"inv_seq",
"[",
":",
",",
"0",
"]",
".",
"argsort",
"(",
")",
"]",
"[",
":",
":",
"-",
"1",
"]",
"next_cluster_num",
"=",
"np",
".",
"max",
"(",
"clusters",
")",
"+",
"1",
"# For all the steps after the inlets are set up to break-through",
"# Reverse the sequence and assess the neighbors cluster state",
"stopped_clusters",
"=",
"np",
".",
"zeros",
"(",
"net",
".",
"Np",
",",
"dtype",
"=",
"bool",
")",
"all_neighbors",
"=",
"net",
".",
"find_neighbor_pores",
"(",
"net",
".",
"pores",
"(",
")",
",",
"flatten",
"=",
"False",
",",
"include_input",
"=",
"True",
")",
"for",
"un_seq",
",",
"pore",
"in",
"inv_seq",
":",
"if",
"pore",
"not",
"in",
"outlets",
"and",
"un_seq",
">",
"0",
":",
"# Skip inlets and outlets",
"nc",
"=",
"clusters",
"[",
"all_neighbors",
"[",
"pore",
"]",
"]",
"# Neighboring clusters",
"unique_ns",
"=",
"np",
".",
"unique",
"(",
"nc",
"[",
"nc",
"!=",
"-",
"1",
"]",
")",
"# Unique Neighbors",
"seq_pore",
"=",
"\"S:\"",
"+",
"str",
"(",
"un_seq",
")",
"+",
"\" P:\"",
"+",
"str",
"(",
"pore",
")",
"if",
"np",
".",
"all",
"(",
"nc",
"==",
"-",
"1",
")",
":",
"# This is the start of a new trapped cluster",
"clusters",
"[",
"pore",
"]",
"=",
"next_cluster_num",
"next_cluster_num",
"+=",
"1",
"msg",
"=",
"(",
"seq_pore",
"+",
"\" C:1 new cluster number: \"",
"+",
"str",
"(",
"clusters",
"[",
"pore",
"]",
")",
")",
"logger",
".",
"info",
"(",
"msg",
")",
"elif",
"len",
"(",
"unique_ns",
")",
"==",
"1",
":",
"# Grow the only connected neighboring cluster",
"if",
"not",
"stopped_clusters",
"[",
"unique_ns",
"[",
"0",
"]",
"]",
":",
"clusters",
"[",
"pore",
"]",
"=",
"unique_ns",
"[",
"0",
"]",
"msg",
"=",
"(",
"seq_pore",
"+",
"\" C:2 joins cluster number: \"",
"+",
"str",
"(",
"clusters",
"[",
"pore",
"]",
")",
")",
"logger",
".",
"info",
"(",
"msg",
")",
"else",
":",
"clusters",
"[",
"pore",
"]",
"=",
"-",
"2",
"elif",
"-",
"2",
"in",
"unique_ns",
":",
"# We have reached a sink neighbor, stop growing cluster",
"msg",
"=",
"(",
"seq_pore",
"+",
"\" C:3 joins sink cluster\"",
")",
"logger",
".",
"info",
"(",
"msg",
")",
"clusters",
"[",
"pore",
"]",
"=",
"-",
"2",
"# Stop growth and merging",
"stopped_clusters",
"[",
"unique_ns",
"[",
"unique_ns",
">",
"-",
"1",
"]",
"]",
"=",
"True",
"else",
":",
"# We might be able to do some merging",
"# Check if any stopped clusters are neighbors",
"if",
"np",
".",
"any",
"(",
"stopped_clusters",
"[",
"unique_ns",
"]",
")",
":",
"msg",
"=",
"(",
"seq_pore",
"+",
"\" C:4 joins sink cluster\"",
")",
"logger",
".",
"info",
"(",
"msg",
")",
"clusters",
"[",
"pore",
"]",
"=",
"-",
"2",
"# Stop growing all neighboring clusters",
"stopped_clusters",
"[",
"unique_ns",
"]",
"=",
"True",
"else",
":",
"# Merge multiple un-stopped trapped clusters",
"new_num",
"=",
"unique_ns",
"[",
"0",
"]",
"clusters",
"[",
"pore",
"]",
"=",
"new_num",
"for",
"c",
"in",
"unique_ns",
":",
"clusters",
"[",
"clusters",
"==",
"c",
"]",
"=",
"new_num",
"msg",
"=",
"(",
"seq_pore",
"+",
"\" C:5 merge clusters: \"",
"+",
"str",
"(",
"c",
")",
"+",
"\" into \"",
"+",
"str",
"(",
"new_num",
")",
")",
"logger",
".",
"info",
"(",
"msg",
")",
"# And now return clusters",
"self",
"[",
"'pore.clusters'",
"]",
"=",
"clusters",
"logger",
".",
"info",
"(",
"\"Number of trapped clusters\"",
"+",
"str",
"(",
"np",
".",
"sum",
"(",
"np",
".",
"unique",
"(",
"clusters",
")",
">=",
"0",
")",
")",
")",
"self",
"[",
"'pore.trapped'",
"]",
"=",
"self",
"[",
"'pore.clusters'",
"]",
">",
"-",
"1",
"trapped_ts",
"=",
"net",
".",
"find_neighbor_throats",
"(",
"self",
"[",
"'pore.trapped'",
"]",
")",
"self",
"[",
"'throat.trapped'",
"]",
"=",
"np",
".",
"zeros",
"(",
"[",
"net",
".",
"Nt",
"]",
",",
"dtype",
"=",
"bool",
")",
"self",
"[",
"'throat.trapped'",
"]",
"[",
"trapped_ts",
"]",
"=",
"True",
"self",
"[",
"'pore.invasion_sequence'",
"]",
"[",
"self",
"[",
"'pore.trapped'",
"]",
"]",
"=",
"-",
"1",
"self",
"[",
"'throat.invasion_sequence'",
"]",
"[",
"self",
"[",
"'throat.trapped'",
"]",
"]",
"=",
"-",
"1"
] | 49.014815 | 19.888889 |
def write(self, filename=None):
"""Save template to xml. Before saving template will update
date, start position, well positions, and counts.
Parameters
----------
filename : str
If not set, XML will be written to self.filename.
"""
if not filename:
filename = self.filename
# update time
self.properties.CurrentDate = _current_time()
# set rubber band to true
self.properties.EnableRubberBand = 'true'
# update start position
self.update_start_position()
# update well postions
self.update_well_positions()
# update counts
self.update_counts()
# remove py:pytype attributes
objectify.deannotate(self.root)
# remove namespaces added by lxml
for child in self.root.iterchildren():
etree.cleanup_namespaces(child)
xml = etree.tostring(self.root, encoding='utf8',
xml_declaration=True, pretty_print=True)
# fix format quirks
# add carriage return character
xml = u'\r\n'.join(l.decode(encoding='utf8') for l in xml.splitlines())
# add space at "end/>" --> "end />"
xml = re.sub(r'(["a-z])/>', r'\1 />', xml)
xml = xml.replace("version='1.0' encoding='utf8'", 'version="1.0"')
with open(filename, 'wb') as f:
f.write(xml.encode('utf8')) | [
"def",
"write",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"if",
"not",
"filename",
":",
"filename",
"=",
"self",
".",
"filename",
"# update time",
"self",
".",
"properties",
".",
"CurrentDate",
"=",
"_current_time",
"(",
")",
"# set rubber band to true",
"self",
".",
"properties",
".",
"EnableRubberBand",
"=",
"'true'",
"# update start position",
"self",
".",
"update_start_position",
"(",
")",
"# update well postions",
"self",
".",
"update_well_positions",
"(",
")",
"# update counts",
"self",
".",
"update_counts",
"(",
")",
"# remove py:pytype attributes",
"objectify",
".",
"deannotate",
"(",
"self",
".",
"root",
")",
"# remove namespaces added by lxml",
"for",
"child",
"in",
"self",
".",
"root",
".",
"iterchildren",
"(",
")",
":",
"etree",
".",
"cleanup_namespaces",
"(",
"child",
")",
"xml",
"=",
"etree",
".",
"tostring",
"(",
"self",
".",
"root",
",",
"encoding",
"=",
"'utf8'",
",",
"xml_declaration",
"=",
"True",
",",
"pretty_print",
"=",
"True",
")",
"# fix format quirks",
"# add carriage return character",
"xml",
"=",
"u'\\r\\n'",
".",
"join",
"(",
"l",
".",
"decode",
"(",
"encoding",
"=",
"'utf8'",
")",
"for",
"l",
"in",
"xml",
".",
"splitlines",
"(",
")",
")",
"# add space at \"end/>\" --> \"end />\"",
"xml",
"=",
"re",
".",
"sub",
"(",
"r'([\"a-z])/>'",
",",
"r'\\1 />'",
",",
"xml",
")",
"xml",
"=",
"xml",
".",
"replace",
"(",
"\"version='1.0' encoding='utf8'\"",
",",
"'version=\"1.0\"'",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"xml",
".",
"encode",
"(",
"'utf8'",
")",
")"
] | 30.586957 | 18.152174 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.