text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def miscellaneous_menu(self, value):
"""
Setter for **self.__miscellaneous_menu** attribute.
:param value: Attribute value.
:type value: QMenu
"""
if value is not None:
assert type(value) is QMenu, "'{0}' attribute: '{1}' type is not 'QMenu'!".format(
"miscellaneous_menu", value)
self.__miscellaneous_menu = value | [
"def",
"miscellaneous_menu",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"assert",
"type",
"(",
"value",
")",
"is",
"QMenu",
",",
"\"'{0}' attribute: '{1}' type is not 'QMenu'!\"",
".",
"format",
"(",
"\"miscellaneous_menu\"",
"... | 32.416667 | 15.75 |
def project(self, point_cloud, round_px=True):
"""Projects a point cloud onto the camera image plane.
Parameters
----------
point_cloud : :obj:`autolab_core.PointCloud` or :obj:`autolab_core.Point`
A PointCloud or Point to project onto the camera image plane.
round_px : bool
If True, projections are rounded to the nearest pixel.
Returns
-------
:obj:`autolab_core.ImageCoords` or :obj:`autolab_core.Point`
A corresponding set of image coordinates representing the given
PointCloud's projections onto the camera image plane. If the input
was a single Point, returns a 2D Point in the camera plane.
Raises
------
ValueError
If the input is not a PointCloud or Point in the same reference
frame as the camera.
"""
if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3):
raise ValueError('Must provide PointCloud or 3D Point object for projection')
if point_cloud.frame != self._frame:
raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame))
points_proj = self._K.dot(point_cloud.data)
if len(points_proj.shape) == 1:
points_proj = points_proj[:, np.newaxis]
point_depths = np.tile(points_proj[2,:], [3, 1])
points_proj = np.divide(points_proj, point_depths)
if round_px:
points_proj = np.round(points_proj)
if isinstance(point_cloud, Point):
return Point(data=points_proj[:2,:].astype(np.int16), frame=self._frame)
return ImageCoords(data=points_proj[:2,:].astype(np.int16), frame=self._frame) | [
"def",
"project",
"(",
"self",
",",
"point_cloud",
",",
"round_px",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"point_cloud",
",",
"PointCloud",
")",
"and",
"not",
"(",
"isinstance",
"(",
"point_cloud",
",",
"Point",
")",
"and",
"point_cloud",
... | 44.575 | 27.45 |
def editMeta(self, title=None, description=None):
"""Set metadata for photo. (flickr.photos.setMeta)"""
method = 'flickr.photosets.editMeta'
if title is None:
title = self.title
if description is None:
description = self.description
_dopost(method, auth=True, title=title, \
description=description, photoset_id=self.id)
self.__title = title
self.__description = description
return True | [
"def",
"editMeta",
"(",
"self",
",",
"title",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"method",
"=",
"'flickr.photosets.editMeta'",
"if",
"title",
"is",
"None",
":",
"title",
"=",
"self",
".",
"title",
"if",
"description",
"is",
"None",
"... | 33.2 | 14.066667 |
def csp_header(csp={}):
""" Decorator to include csp header on app.route wrapper """
_csp = csp_default().read()
_csp.update(csp)
_header = ''
if 'report-only' in _csp and _csp['report-only'] is True:
_header = 'Content-Security-Policy-Report-Only'
else:
_header = 'Content-Security-Policy'
if 'report-only' in _csp:
del _csp['report-only']
_headers = {_header: create_csp_header(_csp)}
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
resp = make_response(f(*args, **kwargs))
h = resp.headers
for header, value in _headers.items():
h[header] = value
return resp
return decorated_function
return decorator | [
"def",
"csp_header",
"(",
"csp",
"=",
"{",
"}",
")",
":",
"_csp",
"=",
"csp_default",
"(",
")",
".",
"read",
"(",
")",
"_csp",
".",
"update",
"(",
"csp",
")",
"_header",
"=",
"''",
"if",
"'report-only'",
"in",
"_csp",
"and",
"_csp",
"[",
"'report-o... | 31.458333 | 15.541667 |
def _compute_value(power, wg):
"""Return the weight corresponding to single power."""
if power not in wg:
p1, p2 = power
# y power
if p1 == 0:
yy = wg[(0, -1)]
wg[power] = numpy.power(yy, p2 / 2).sum() / len(yy)
# x power
else:
xx = wg[(-1, 0)]
wg[power] = numpy.power(xx, p1 / 2).sum() / len(xx)
return wg[power] | [
"def",
"_compute_value",
"(",
"power",
",",
"wg",
")",
":",
"if",
"power",
"not",
"in",
"wg",
":",
"p1",
",",
"p2",
"=",
"power",
"# y power",
"if",
"p1",
"==",
"0",
":",
"yy",
"=",
"wg",
"[",
"(",
"0",
",",
"-",
"1",
")",
"]",
"wg",
"[",
"... | 30.846154 | 17.615385 |
def send(self, data):
"""
Send encoded instructions to Guacamole guacd server.
"""
self.logger.debug('Sending data: %s' % data)
self.client.sendall(data.encode()) | [
"def",
"send",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Sending data: %s'",
"%",
"data",
")",
"self",
".",
"client",
".",
"sendall",
"(",
"data",
".",
"encode",
"(",
")",
")"
] | 32.833333 | 8.833333 |
def _dist_kw_arg(self, k):
"""
Returns a dictionary of keyword arguments
for the k'th distribution.
:param int k: Index of the distribution in question.
:rtype: ``dict``
"""
if self._dist_kw_args is not None:
return {
key:self._dist_kw_args[key][k,:]
for key in self._dist_kw_args.keys()
}
else:
return {} | [
"def",
"_dist_kw_arg",
"(",
"self",
",",
"k",
")",
":",
"if",
"self",
".",
"_dist_kw_args",
"is",
"not",
"None",
":",
"return",
"{",
"key",
":",
"self",
".",
"_dist_kw_args",
"[",
"key",
"]",
"[",
"k",
",",
":",
"]",
"for",
"key",
"in",
"self",
"... | 28.266667 | 14.666667 |
def upgradeUserInfo1to2(oldUserInfo):
"""
Concatenate the I{firstName} and I{lastName} attributes from the old user
info item and set the result as the I{realName} attribute of the upgraded
item.
"""
newUserInfo = oldUserInfo.upgradeVersion(
UserInfo.typeName, 1, 2,
realName=oldUserInfo.firstName + u" " + oldUserInfo.lastName)
return newUserInfo | [
"def",
"upgradeUserInfo1to2",
"(",
"oldUserInfo",
")",
":",
"newUserInfo",
"=",
"oldUserInfo",
".",
"upgradeVersion",
"(",
"UserInfo",
".",
"typeName",
",",
"1",
",",
"2",
",",
"realName",
"=",
"oldUserInfo",
".",
"firstName",
"+",
"u\" \"",
"+",
"oldUserInfo"... | 38.2 | 16.8 |
def epoch_rates_to_pmf(problems, epoch_rates=None):
"""Create a probability-mass-function based on relative epoch rates.
if epoch_rates=None, then we use uniform epoch rates [1.0] * len(problems)
i.e. it takes each problem the same time to go through one epoch.
If epoch_rates is given, then these are the relative numbers of epochs
of each problem to go through in a given amount of time.
Each must have problem.num_training_examples implemented.
Args:
problems: a list of Problem instances.
epoch_rates: an optional list of float
Returns:
a list of floating point values.
"""
if epoch_rates is None:
epoch_rates = [1.0] * len(problems)
example_rates = [epoch_rate * p.num_training_examples
for p, epoch_rate in zip(problems, epoch_rates)]
return example_rates_to_pmf(example_rates) | [
"def",
"epoch_rates_to_pmf",
"(",
"problems",
",",
"epoch_rates",
"=",
"None",
")",
":",
"if",
"epoch_rates",
"is",
"None",
":",
"epoch_rates",
"=",
"[",
"1.0",
"]",
"*",
"len",
"(",
"problems",
")",
"example_rates",
"=",
"[",
"epoch_rate",
"*",
"p",
"."... | 35.869565 | 20.695652 |
def render_heading(self, token):
"""
Overrides super().render_heading; stores rendered heading first,
then returns it.
"""
rendered = super().render_heading(token)
content = self.parse_rendered_heading(rendered)
if not (self.omit_title and token.level == 1
or token.level > self.depth
or any(cond(content) for cond in self.filter_conds)):
self._headings.append((token.level, content))
return rendered | [
"def",
"render_heading",
"(",
"self",
",",
"token",
")",
":",
"rendered",
"=",
"super",
"(",
")",
".",
"render_heading",
"(",
"token",
")",
"content",
"=",
"self",
".",
"parse_rendered_heading",
"(",
"rendered",
")",
"if",
"not",
"(",
"self",
".",
"omit_... | 41.416667 | 13.083333 |
def _validate_cmds(self):
"""
确保 cmd 没有重复
:return:
"""
cmd_list = list(self.rule_map.keys())
for bp in self.blueprints:
cmd_list.extend(bp.rule_map.keys())
duplicate_cmds = (Counter(cmd_list) - Counter(set(cmd_list))).keys()
assert not duplicate_cmds, 'duplicate cmds: %s' % duplicate_cmds | [
"def",
"_validate_cmds",
"(",
"self",
")",
":",
"cmd_list",
"=",
"list",
"(",
"self",
".",
"rule_map",
".",
"keys",
"(",
")",
")",
"for",
"bp",
"in",
"self",
".",
"blueprints",
":",
"cmd_list",
".",
"extend",
"(",
"bp",
".",
"rule_map",
".",
"keys",
... | 25.428571 | 21.857143 |
def setup(self):
"""
NSCF calculations should use the same FFT mesh as the one employed in the GS task
(in principle, it's possible to interpolate inside Abinit but tests revealed some numerical noise
Here we change the input file of the NSCF task to have the same FFT mesh.
"""
for dep in self.deps:
if "DEN" in dep.exts:
parent_task = dep.node
break
else:
raise RuntimeError("Cannot find parent node producing DEN file")
with parent_task.open_gsr() as gsr:
den_mesh = 3 * [None]
den_mesh[0] = gsr.reader.read_dimvalue("number_of_grid_points_vector1")
den_mesh[1] = gsr.reader.read_dimvalue("number_of_grid_points_vector2")
den_mesh[2] = gsr.reader.read_dimvalue("number_of_grid_points_vector3")
if self.ispaw:
self.set_vars(ngfftdg=den_mesh)
else:
self.set_vars(ngfft=den_mesh)
super().setup() | [
"def",
"setup",
"(",
"self",
")",
":",
"for",
"dep",
"in",
"self",
".",
"deps",
":",
"if",
"\"DEN\"",
"in",
"dep",
".",
"exts",
":",
"parent_task",
"=",
"dep",
".",
"node",
"break",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot find parent node pr... | 41.916667 | 23.583333 |
def stop(self):
"""
Stop the config change monitoring thread.
"""
self.observer_thread.stop()
self.observer_thread.join()
logging.info("Configfile watcher plugin: Stopped") | [
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"observer_thread",
".",
"stop",
"(",
")",
"self",
".",
"observer_thread",
".",
"join",
"(",
")",
"logging",
".",
"info",
"(",
"\"Configfile watcher plugin: Stopped\"",
")"
] | 26.75 | 12.75 |
def rotation_matrix(axis, theta):
"""The Euler–Rodrigues formula.
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
Parameters
----------
axis: vector to rotate around
theta: rotation angle, in rad
"""
axis = np.asarray(axis)
axis = axis / np.linalg.norm(axis)
a = np.cos(theta / 2)
b, c, d = -axis * np.sin(theta / 2)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([
[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc],
]) | [
"def",
"rotation_matrix",
"(",
"axis",
",",
"theta",
")",
":",
"axis",
"=",
"np",
".",
"asarray",
"(",
"axis",
")",
"axis",
"=",
"axis",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"axis",
")",
"a",
"=",
"np",
".",
"cos",
"(",
"theta",
"/",
"2"... | 33.136364 | 16.772727 |
def flatten(d, key_as_tuple=True, sep='.', list_of_dicts=None, all_iters=None):
""" get nested dict as flat {key:val,...},
where key is tuple/string of all nested keys
Parameters
----------
d : object
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
sep : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
all_iters: str or None
if not None, flatten all lists and tuples using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"}, 2:{"b":"B"}}
>>> pprint(flatten(d))
{(1, 'a'): 'A', (2, 'b'): 'B'}
>>> d = {1:{"a":"A"},2:{"b":"B"}}
>>> pprint(flatten(d,key_as_tuple=False))
{'1.a': 'A', '2.b': 'B'}
>>> d = [{'a':1},{'b':[1, 2]}]
>>> pprint(flatten(d,list_of_dicts='__list__'))
{('__list__0', 'a'): 1, ('__list__1', 'b'): [1, 2]}
>>> d = [{'a':1},{'b':[1, 2]}]
>>> pprint(flatten(d,all_iters='__iter__'))
{('__iter__0', 'a'): 1,
('__iter__1', 'b', '__iter__0'): 1,
('__iter__1', 'b', '__iter__1'): 2}
"""
def expand(key, value):
if is_dict_like(value):
flatten_dict = flatten(value, key_as_tuple, sep,
list_of_dicts, all_iters)
if key_as_tuple:
return [(key + k, v) for k, v in flatten_dict.items()]
else:
return [(str(key) + sep + k, v)
for k, v in flatten_dict.items()]
elif is_iter_non_string(value) and all_iters is not None:
value = {'{0}{1}'.format(all_iters, i): v
for i, v in enumerate(value)}
flatten_dict = flatten(value, key_as_tuple, sep,
list_of_dicts, all_iters)
if key_as_tuple:
return [(key + k, v) for k, v in flatten_dict.items()]
else:
return [(str(key) + sep + k, v)
for k, v in flatten_dict.items()]
elif is_list_of_dict_like(value) and list_of_dicts is not None:
value = {'{0}{1}'.format(list_of_dicts, i): v
for i, v in enumerate(value)}
flatten_dict = flatten(value, key_as_tuple, sep,
list_of_dicts, all_iters)
if key_as_tuple:
return [(key + k, v) for k, v in flatten_dict.items()]
else:
return [(str(key) + sep + k, v)
for k, v in flatten_dict.items()]
else:
return [(key, value)]
if is_iter_non_string(d) and all_iters is not None:
d = {'{0}{1}'.format(all_iters, i): v for i, v in enumerate(d)}
elif is_list_of_dict_like(d) and list_of_dicts is not None:
d = {'{0}{1}'.format(list_of_dicts, i): v for i, v in enumerate(d)}
elif not is_dict_like(d):
raise TypeError('d is not dict like: {}'.format(d))
if key_as_tuple:
items = [item for k, v in d.items() for item in expand((k,), v)]
else:
items = [item for k, v in d.items() for item in expand(k, v)]
return dict(items) | [
"def",
"flatten",
"(",
"d",
",",
"key_as_tuple",
"=",
"True",
",",
"sep",
"=",
"'.'",
",",
"list_of_dicts",
"=",
"None",
",",
"all_iters",
"=",
"None",
")",
":",
"def",
"expand",
"(",
"key",
",",
"value",
")",
":",
"if",
"is_dict_like",
"(",
"value",... | 36.906977 | 20.290698 |
def doMove(self, from_path, to_path, overwrite = False, bShareFireCopy = 'false', dummy = 56147):
"""Move a file.
>>> nd.doMove('/Picture/flower.png', '/flower.png')
:param from_path: The path to the file or folder to be moved.
:param to_path: The destination path of the file or folder to be copied. File name should be included in the end of to_path.
:param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.)
:param bShareFireCopy: ???
:return: ``True`` if success to move a file or ``False``.
"""
if overwrite:
overwrite = 'F'
else:
overwrite = 'T'
data = {'orgresource': from_path,
'dstresource': to_path,
'overwrite': overwrite,
'bShareFireCopy': bShareFireCopy,
'userid': self.user_id,
'useridx': self.useridx,
'dummy': dummy,
}
s, metadata = self.POST('doMove', data)
return s | [
"def",
"doMove",
"(",
"self",
",",
"from_path",
",",
"to_path",
",",
"overwrite",
"=",
"False",
",",
"bShareFireCopy",
"=",
"'false'",
",",
"dummy",
"=",
"56147",
")",
":",
"if",
"overwrite",
":",
"overwrite",
"=",
"'F'",
"else",
":",
"overwrite",
"=",
... | 35.965517 | 23.551724 |
def stream_reader_statements(stream_arn):
"""Returns statements to allow Lambda to read from a stream.
Handles both DynamoDB & Kinesis streams. Automatically figures out the
type of stream, and provides the correct actions from the supplied Arn.
Arg:
stream_arn (str): A kinesis or dynamodb stream arn.
Returns:
list: A list of statements.
"""
action_type = get_stream_action_type(stream_arn)
arn_parts = stream_arn.split("/")
# Cut off the last bit and replace it with a wildcard
wildcard_arn_parts = arn_parts[:-1]
wildcard_arn_parts.append("*")
wildcard_arn = "/".join(wildcard_arn_parts)
return [
Statement(
Effect=Allow,
Resource=[stream_arn],
Action=[
action_type("DescribeStream"),
action_type("GetRecords"),
action_type("GetShardIterator"),
]
),
Statement(
Effect=Allow,
Resource=[wildcard_arn],
Action=[action_type("ListStreams")]
)
] | [
"def",
"stream_reader_statements",
"(",
"stream_arn",
")",
":",
"action_type",
"=",
"get_stream_action_type",
"(",
"stream_arn",
")",
"arn_parts",
"=",
"stream_arn",
".",
"split",
"(",
"\"/\"",
")",
"# Cut off the last bit and replace it with a wildcard",
"wildcard_arn_part... | 29.971429 | 18.228571 |
def word_tokenize(sentence):
"""
A generator which yields tokens based on the given sentence without deleting anything.
>>> context = "I love you. Please don't leave."
>>> list(word_tokenize(context))
['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.']
"""
date_pattern = r'\d\d(\d\d)?[\\-]\d\d[\\-]\d\d(\d\d)?'
number_pattern = r'[\+-]?(\d+\.\d+|\d{1,3},(\d{3},)*\d{3}|\d+)'
arr_pattern = r'(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]'
word_pattern = r'[\w]+'
non_space_pattern = r'[{}]|\w'.format(re.escape('!"#$%&()*,./:;<=>?@[\]^_-`{|}~'))
space_pattern = r'\s'
anything_pattern = r'.'
patterns = [date_pattern, number_pattern, arr_pattern, word_pattern, non_space_pattern, space_pattern, anything_pattern]
big_pattern = r'|'.join([('(' + pattern + ')') for pattern in patterns])
for match in re.finditer(big_pattern, sentence):
yield match.group(0) | [
"def",
"word_tokenize",
"(",
"sentence",
")",
":",
"date_pattern",
"=",
"r'\\d\\d(\\d\\d)?[\\\\-]\\d\\d[\\\\-]\\d\\d(\\d\\d)?'",
"number_pattern",
"=",
"r'[\\+-]?(\\d+\\.\\d+|\\d{1,3},(\\d{3},)*\\d{3}|\\d+)'",
"arr_pattern",
"=",
"r'(?: \\w\\.){2,3}|(?:\\A|\\s)(?:\\w\\.){2,3}|[A-Z]\\. [a... | 47.8 | 26 |
def auto_build(self):
"""Auto built tool
"""
options = [
"-a",
"--autobuild"
]
if len(self.args) >= 3 and self.args[0] in options:
AutoBuild(self.args[1], self.args[2:], self.meta.path).run()
else:
usage("") | [
"def",
"auto_build",
"(",
"self",
")",
":",
"options",
"=",
"[",
"\"-a\"",
",",
"\"--autobuild\"",
"]",
"if",
"len",
"(",
"self",
".",
"args",
")",
">=",
"3",
"and",
"self",
".",
"args",
"[",
"0",
"]",
"in",
"options",
":",
"AutoBuild",
"(",
"self"... | 26.636364 | 18.727273 |
def snapshot(name, suffix=None, connection=None, username=None, password=None):
'''
Takes a snapshot of a particular VM or by a UNIX-style wildcard.
.. versionadded:: 2016.3.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: yaml
domain_name:
virt.snapshot:
- suffix: periodic
domain*:
virt.snapshot:
- suffix: periodic
'''
return _virt_call(name, 'snapshot', 'saved', 'Snapshot has been taken', suffix=suffix,
connection=connection, username=username, password=password) | [
"def",
"snapshot",
"(",
"name",
",",
"suffix",
"=",
"None",
",",
"connection",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
")",
":",
"return",
"_virt_call",
"(",
"name",
",",
"'snapshot'",
",",
"'saved'",
",",
"'Snapshot has ... | 28.655172 | 27.689655 |
def make_d2p_id(self):
"""
Make an association id for phenotypic associations with disease
that is defined by:
source of association + disease + relationship + phenotype
+ onset + frequency
:return:
"""
attributes = [self.onset, self.frequency]
assoc_id = self.make_association_id(
self.definedby, self.disease_id, self.rel, self.phenotype_id, attributes)
return assoc_id | [
"def",
"make_d2p_id",
"(",
"self",
")",
":",
"attributes",
"=",
"[",
"self",
".",
"onset",
",",
"self",
".",
"frequency",
"]",
"assoc_id",
"=",
"self",
".",
"make_association_id",
"(",
"self",
".",
"definedby",
",",
"self",
".",
"disease_id",
",",
"self"... | 28.25 | 22.5 |
def enter_maintenance_mode(self):
"""
Put the service in maintenance mode.
@return: Reference to the completed command.
@since: API v2
"""
cmd = self._cmd('enterMaintenanceMode')
if cmd.success:
self._update(_get_service(self._get_resource_root(), self._path()))
return cmd | [
"def",
"enter_maintenance_mode",
"(",
"self",
")",
":",
"cmd",
"=",
"self",
".",
"_cmd",
"(",
"'enterMaintenanceMode'",
")",
"if",
"cmd",
".",
"success",
":",
"self",
".",
"_update",
"(",
"_get_service",
"(",
"self",
".",
"_get_resource_root",
"(",
")",
",... | 27.454545 | 14.545455 |
def oridam_generate_patterns(word_in,cm,ed=1,level=0,pos=0,candidates=None):
""" ed = 1 by default, pos - internal variable for algorithm """
alternates = cm.get(word_in[pos],[])
if not candidates:
candidates = []
assert ed <= len(word_in), 'edit distance has to be comparable to word size [ins/del not explored]'
if (pos >len(word_in)) or ed == 0:
return candidates
pfx = ''
sfx = ''
curr_candidates = []
for p in range(0,pos):
pfx = pfx + word_in[p]
for p in range(pos+1,len(word_in)):
sfx = sfx + word_in[p]
for alt in alternates:
word_alt = pfx + alt + sfx
if not (word_alt in candidates):
candidates.append( word_alt )
curr_candidates.append( word_alt )
for n_pos in range(pos,len(word_in)):
# already what we have ' candidates ' of this round are edit-distance 1
for word in curr_candidates:
oridam_generate_patterns(word,cm,ed-1,level+1,n_pos,candidates)
if level == 0:
#candidates.append(word_in)
for n_pos in range(pos,len(word_in)):
oridam_generate_patterns(word_in,cm,ed, level+1,n_pos,candidates)
return candidates | [
"def",
"oridam_generate_patterns",
"(",
"word_in",
",",
"cm",
",",
"ed",
"=",
"1",
",",
"level",
"=",
"0",
",",
"pos",
"=",
"0",
",",
"candidates",
"=",
"None",
")",
":",
"alternates",
"=",
"cm",
".",
"get",
"(",
"word_in",
"[",
"pos",
"]",
",",
... | 40.758621 | 15.586207 |
def watch(directory=None, auto_clear=False, extensions=[]):
"""Starts a server to render the specified file or directory containing a README."""
if directory and not os.path.isdir(directory):
raise ValueError('Directory not found: ' + directory)
directory = os.path.abspath(directory)
# Initial run
event_handler = ChangeHandler(directory, auto_clear, extensions)
event_handler.run()
# Setup watchdog
observer = Observer()
observer.schedule(event_handler, path=directory, recursive=True)
observer.start()
# Watch and run tests until interrupted by user
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join() | [
"def",
"watch",
"(",
"directory",
"=",
"None",
",",
"auto_clear",
"=",
"False",
",",
"extensions",
"=",
"[",
"]",
")",
":",
"if",
"directory",
"and",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"directory",
")",
":",
"raise",
"ValueError",
"(",
"'Di... | 32.5 | 20.5 |
def get(self, request):
"""
Called after the user is redirected back to our application.
Tries to:
- Complete the OAuth / OAuth2 flow
- Redirect the user to another view that deals with login, connecting
or user creation.
"""
try:
client = request.session[self.get_client().get_session_key()]
logger.debug("API returned: %s", request.GET)
client.complete(dict(request.GET.items()))
request.session[self.get_client().get_session_key()] = client
return HttpResponseRedirect(self.get_redirect())
except KeyError:
return self.error_to_response(request, {'error': "Session expired."})
except OAuthError, error:
return self.error_to_response(request, {'error': error})
except socket.timeout:
return self.error_to_response(request, {'error':
_('Could not connect to service (timed out)')}) | [
"def",
"get",
"(",
"self",
",",
"request",
")",
":",
"try",
":",
"client",
"=",
"request",
".",
"session",
"[",
"self",
".",
"get_client",
"(",
")",
".",
"get_session_key",
"(",
")",
"]",
"logger",
".",
"debug",
"(",
"\"API returned: %s\"",
",",
"reque... | 42.521739 | 21.913043 |
def _connected(self, transport, conn):
"""Login and sync the ElkM1 panel to memory."""
LOG.info("Connected to ElkM1")
self._conn = conn
self._transport = transport
self._connection_retry_timer = 1
if url_scheme_is_secure(self._config['url']):
self._conn.write_data(self._config['userid'], raw=True)
self._conn.write_data(self._config['password'], raw=True)
self.call_sync_handlers()
if not self._config['url'].startswith('serial://'):
self._heartbeat = self.loop.call_later(120, self._reset_connection) | [
"def",
"_connected",
"(",
"self",
",",
"transport",
",",
"conn",
")",
":",
"LOG",
".",
"info",
"(",
"\"Connected to ElkM1\"",
")",
"self",
".",
"_conn",
"=",
"conn",
"self",
".",
"_transport",
"=",
"transport",
"self",
".",
"_connection_retry_timer",
"=",
... | 49.25 | 13.166667 |
def parse(self, data):
"""
Converts a OpenVPN JSON to a NetworkX Graph object
which is then returned.
"""
# initialize graph and list of aggregated nodes
graph = self._init_graph()
server = self._server_common_name
# add server (central node) to graph
graph.add_node(server)
# data may be empty
if data is None:
clients = []
links = []
else:
clients = data.client_list.values()
links = data.routing_table.values()
# add clients in graph as nodes
for client in clients:
if client.common_name == 'UNDEF':
continue
client_properties = {
'label': client.common_name,
'real_address': str(client.real_address.host),
'port': int(client.real_address.port),
'connected_since': client.connected_since.strftime('%Y-%m-%dT%H:%M:%SZ'),
'bytes_received': int(client.bytes_received),
'bytes_sent': int(client.bytes_sent)
}
local_addresses = [
str(route.virtual_address)
for route in data.routing_table.values()
if route.real_address == client.real_address
]
if local_addresses:
client_properties['local_addresses'] = local_addresses
graph.add_node(str(client.real_address.host), **client_properties)
# add links in routing table to graph
for link in links:
if link.common_name == 'UNDEF':
continue
graph.add_edge(server, str(link.real_address.host), weight=1)
return graph | [
"def",
"parse",
"(",
"self",
",",
"data",
")",
":",
"# initialize graph and list of aggregated nodes",
"graph",
"=",
"self",
".",
"_init_graph",
"(",
")",
"server",
"=",
"self",
".",
"_server_common_name",
"# add server (central node) to graph",
"graph",
".",
"add_nod... | 39.511628 | 14.302326 |
def run(addr, *commands, **kwargs):
"""
Non-threaded batch command runner returning output results
"""
results = []
handler = VarnishHandler(addr, **kwargs)
for cmd in commands:
if isinstance(cmd, tuple) and len(cmd)>1:
results.extend([getattr(handler, c[0].replace('.','_'))(*c[1:]) for c in cmd])
else:
results.append(getattr(handler, cmd.replace('.','_'))(*commands[1:]))
break
handler.close()
return results | [
"def",
"run",
"(",
"addr",
",",
"*",
"commands",
",",
"*",
"*",
"kwargs",
")",
":",
"results",
"=",
"[",
"]",
"handler",
"=",
"VarnishHandler",
"(",
"addr",
",",
"*",
"*",
"kwargs",
")",
"for",
"cmd",
"in",
"commands",
":",
"if",
"isinstance",
"(",... | 34.428571 | 18.857143 |
def times(self, multiplier):
'''
Given an FSM and a multiplier, return the multiplied FSM.
'''
if multiplier < 0:
raise Exception("Can't multiply an FSM by " + repr(multiplier))
alphabet = self.alphabet
# metastate is a set of iterations+states
initial = {(self.initial, 0)}
def final(state):
'''If the initial state is final then multiplying doesn't alter that'''
for (substate, iteration) in state:
if substate == self.initial \
and (self.initial in self.finals or iteration == multiplier):
return True
return False
def follow(current, symbol):
next = []
for (substate, iteration) in current:
if iteration < multiplier \
and substate in self.map \
and symbol in self.map[substate]:
next.append((self.map[substate][symbol], iteration))
# final of self? merge with initial on next iteration
if self.map[substate][symbol] in self.finals:
next.append((self.initial, iteration + 1))
if len(next) == 0:
raise OblivionError
return frozenset(next)
return crawl(alphabet, initial, final, follow).reduce() | [
"def",
"times",
"(",
"self",
",",
"multiplier",
")",
":",
"if",
"multiplier",
"<",
"0",
":",
"raise",
"Exception",
"(",
"\"Can't multiply an FSM by \"",
"+",
"repr",
"(",
"multiplier",
")",
")",
"alphabet",
"=",
"self",
".",
"alphabet",
"# metastate is a set o... | 30.371429 | 19.8 |
def handle_vcf_calls(vcf_file, data, orig_items):
"""Prioritize VCF calls based on external annotations supplied through GEMINI.
"""
if not _do_prioritize(orig_items):
return vcf_file
else:
ann_vcf = population.run_vcfanno(vcf_file, data)
if ann_vcf:
priority_file = _prep_priority_filter_vcfanno(ann_vcf, data)
return _apply_priority_filter(ann_vcf, priority_file, data)
# No data available for filtering, return original file
else:
return vcf_file | [
"def",
"handle_vcf_calls",
"(",
"vcf_file",
",",
"data",
",",
"orig_items",
")",
":",
"if",
"not",
"_do_prioritize",
"(",
"orig_items",
")",
":",
"return",
"vcf_file",
"else",
":",
"ann_vcf",
"=",
"population",
".",
"run_vcfanno",
"(",
"vcf_file",
",",
"data... | 40.692308 | 17.076923 |
def assign_enterprise_learner_role(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
Assign an enterprise learner role to EnterpriseCustomerUser whenever a new record is created.
"""
if kwargs['created'] and instance.user:
enterprise_learner_role, __ = SystemWideEnterpriseRole.objects.get_or_create(name=ENTERPRISE_LEARNER_ROLE)
SystemWideEnterpriseUserRoleAssignment.objects.get_or_create(
user=instance.user,
role=enterprise_learner_role
) | [
"def",
"assign_enterprise_learner_role",
"(",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=unused-argument",
"if",
"kwargs",
"[",
"'created'",
"]",
"and",
"instance",
".",
"user",
":",
"enterprise_learner_role",
",",
"__",
"=",
... | 51.8 | 26.4 |
def to_json(self, destination):
"""
Save a dictionnary into a JSON file.
:param destination:
A path to a file where we're going to
write the converted dict into a JSON format.
:type destination: str
"""
try:
with open(destination, "w") as file:
# We open the file we are going to write.
# Note: We always overwrite the destination.
# We save the current dictionnary into a json format.
dump(
self.main_dictionnary,
file,
ensure_ascii=False,
indent=4,
sort_keys=True,
)
except UnicodeEncodeError: # pragma: no cover
with open(destination, "w", encoding="utf-8") as file:
# We open the file we are going to write.
# Note: We always overwrite the destination.
# We save the current dictionnary into a json format.
dump(
self.main_dictionnary,
file,
ensure_ascii=False,
indent=4,
sort_keys=True,
) | [
"def",
"to_json",
"(",
"self",
",",
"destination",
")",
":",
"try",
":",
"with",
"open",
"(",
"destination",
",",
"\"w\"",
")",
"as",
"file",
":",
"# We open the file we are going to write.",
"# Note: We always overwrite the destination.",
"# We save the current dictionna... | 34.083333 | 16.138889 |
def TNE_metric(bpmn_graph):
"""
Returns the value of the TNE metric (Total Number of Events of the Model)
for the BPMNDiagramGraph instance.
:param bpmn_graph: an instance of BpmnDiagramGraph representing BPMN model.
"""
events_counts = get_events_counts(bpmn_graph)
return sum(
[count for _, count in events_counts.items()]
) | [
"def",
"TNE_metric",
"(",
"bpmn_graph",
")",
":",
"events_counts",
"=",
"get_events_counts",
"(",
"bpmn_graph",
")",
"return",
"sum",
"(",
"[",
"count",
"for",
"_",
",",
"count",
"in",
"events_counts",
".",
"items",
"(",
")",
"]",
")"
] | 27.461538 | 22.538462 |
def state_cpfs(self) -> List[CPF]:
'''Returns list of state-fluent CPFs.'''
_, cpfs = self.cpfs
state_cpfs = []
for cpf in cpfs:
name = utils.rename_next_state_fluent(cpf.name)
if name in self.state_fluents:
state_cpfs.append(cpf)
state_cpfs = sorted(state_cpfs, key=lambda cpf: cpf.name)
return state_cpfs | [
"def",
"state_cpfs",
"(",
"self",
")",
"->",
"List",
"[",
"CPF",
"]",
":",
"_",
",",
"cpfs",
"=",
"self",
".",
"cpfs",
"state_cpfs",
"=",
"[",
"]",
"for",
"cpf",
"in",
"cpfs",
":",
"name",
"=",
"utils",
".",
"rename_next_state_fluent",
"(",
"cpf",
... | 38.5 | 12.3 |
def has_permission(self, perm):
"""
Checks if current user (or role) has the given permission.
Args:
perm: Permmission code or object.
Depends on the :attr:`~zengine.auth.auth_backend.AuthBackend` implementation.
Returns:
Boolean.
"""
return self.user.superuser or self.auth.has_permission(perm) | [
"def",
"has_permission",
"(",
"self",
",",
"perm",
")",
":",
"return",
"self",
".",
"user",
".",
"superuser",
"or",
"self",
".",
"auth",
".",
"has_permission",
"(",
"perm",
")"
] | 30.916667 | 22.416667 |
def population_fraction(self):
"""The filtered/unfiltered ratio for cube response.
This value is required for properly calculating population on a cube
where a filter has been applied. Returns 1.0 for an unfiltered cube.
Returns `np.nan` if the unfiltered count is zero, which would
otherwise result in a divide-by-zero error.
"""
numerator = self._cube_dict["result"].get("filtered", {}).get("weighted_n")
denominator = self._cube_dict["result"].get("unfiltered", {}).get("weighted_n")
try:
return numerator / denominator
except ZeroDivisionError:
return np.nan
except Exception:
return 1.0 | [
"def",
"population_fraction",
"(",
"self",
")",
":",
"numerator",
"=",
"self",
".",
"_cube_dict",
"[",
"\"result\"",
"]",
".",
"get",
"(",
"\"filtered\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"weighted_n\"",
")",
"denominator",
"=",
"self",
".",
"_cube... | 43.8125 | 21.0625 |
def parse_sidebar(self, user_page):
"""Parses the DOM and returns user attributes in the sidebar.
:type user_page: :class:`bs4.BeautifulSoup`
:param user_page: MAL user page's DOM
:rtype: dict
:return: User attributes
:raises: :class:`.InvalidUserError`, :class:`.MalformedUserPageError`
"""
user_info = {}
# if MAL says the series doesn't exist, raise an InvalidUserError.
error_tag = user_page.find(u'div', {u'class': u'badresult'})
if error_tag:
raise InvalidUserError(self.username)
try:
username_tag = user_page.find(u'div', {u'id': u'contentWrapper'}).find(u'h1')
if not username_tag.find(u'div'):
# otherwise, raise a MalformedUserPageError.
raise MalformedUserPageError(self.username, user_page, message=u"Could not find title div")
except:
if not self.session.suppress_parse_exceptions:
raise
info_panel_first = user_page.find(u'div', {u'id': u'content'}).find(u'table').find(u'td')
try:
picture_tag = info_panel_first.find(u'img')
user_info[u'picture'] = picture_tag.get(u'src').decode('utf-8')
except:
if not self.session.suppress_parse_exceptions:
raise
try:
# the user ID is always present in the blogfeed link.
all_comments_link = info_panel_first.find(u'a', text=u'Blog Feed')
user_info[u'id'] = int(all_comments_link.get(u'href').split(u'&id=')[1])
except:
if not self.session.suppress_parse_exceptions:
raise
infobar_headers = info_panel_first.find_all(u'div', {u'class': u'normal_header'})
if infobar_headers:
try:
favorite_anime_header = infobar_headers[0]
if u'Favorite Anime' in favorite_anime_header.text:
user_info[u'favorite_anime'] = []
favorite_anime_table = favorite_anime_header.nextSibling.nextSibling
if favorite_anime_table.name == u'table':
for row in favorite_anime_table.find_all(u'tr'):
cols = row.find_all(u'td')
anime_link = cols[1].find(u'a')
link_parts = anime_link.get(u'href').split(u'/')
# of the form /anime/467/Ghost_in_the_Shell:_Stand_Alone_Complex
user_info[u'favorite_anime'].append(self.session.anime(int(link_parts[2])).set({u'title': anime_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
favorite_manga_header = infobar_headers[1]
if u'Favorite Manga' in favorite_manga_header.text:
user_info[u'favorite_manga'] = []
favorite_manga_table = favorite_manga_header.nextSibling.nextSibling
if favorite_manga_table.name == u'table':
for row in favorite_manga_table.find_all(u'tr'):
cols = row.find_all(u'td')
manga_link = cols[1].find(u'a')
link_parts = manga_link.get(u'href').split(u'/')
# of the form /manga/467/Ghost_in_the_Shell:_Stand_Alone_Complex
user_info[u'favorite_manga'].append(self.session.manga(int(link_parts[2])).set({u'title': manga_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
favorite_character_header = infobar_headers[2]
if u'Favorite Characters' in favorite_character_header.text:
user_info[u'favorite_characters'] = {}
favorite_character_table = favorite_character_header.nextSibling.nextSibling
if favorite_character_table.name == u'table':
for row in favorite_character_table.find_all(u'tr'):
cols = row.find_all(u'td')
character_link = cols[1].find(u'a')
link_parts = character_link.get(u'href').split(u'/')
# of the form /character/467/Ghost_in_the_Shell:_Stand_Alone_Complex
character = self.session.character(int(link_parts[2])).set({u'title': character_link.text})
media_link = cols[1].find(u'div').find(u'a')
link_parts = media_link.get(u'href').split(u'/')
# of the form /anime|manga/467
anime = getattr(self.session, link_parts[1])(int(link_parts[2])).set({u'title': media_link.text})
user_info[u'favorite_characters'][character] = anime
except:
if not self.session.suppress_parse_exceptions:
raise
try:
favorite_people_header = infobar_headers[3]
if u'Favorite People' in favorite_people_header.text:
user_info[u'favorite_people'] = []
favorite_person_table = favorite_people_header.nextSibling.nextSibling
if favorite_person_table.name == u'table':
for row in favorite_person_table.find_all(u'tr'):
cols = row.find_all(u'td')
person_link = cols[1].find(u'a')
link_parts = person_link.get(u'href').split(u'/')
# of the form /person/467/Ghost_in_the_Shell:_Stand_Alone_Complex
user_info[u'favorite_people'].append(self.session.person(int(link_parts[2])).set({u'title': person_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
return user_info | [
"def",
"parse_sidebar",
"(",
"self",
",",
"user_page",
")",
":",
"user_info",
"=",
"{",
"}",
"# if MAL says the series doesn't exist, raise an InvalidUserError.",
"error_tag",
"=",
"user_page",
".",
"find",
"(",
"u'div'",
",",
"{",
"u'class'",
":",
"u'badresult'",
"... | 43.991379 | 25.37069 |
def lookup_linke_turbidity(time, latitude, longitude, filepath=None,
interp_turbidity=True):
"""
Look up the Linke Turibidity from the ``LinkeTurbidities.h5``
data file supplied with pvlib.
Parameters
----------
time : pandas.DatetimeIndex
latitude : float
longitude : float
filepath : None or string, default None
The path to the ``.h5`` file.
interp_turbidity : bool, default True
If ``True``, interpolates the monthly Linke turbidity values
found in ``LinkeTurbidities.h5`` to daily values.
Returns
-------
turbidity : Series
"""
# The .h5 file 'LinkeTurbidities.h5' contains a single 2160 x 4320 x 12
# matrix of type uint8 called 'LinkeTurbidity'. The rows represent global
# latitudes from 90 to -90 degrees; the columns represent global longitudes
# from -180 to 180; and the depth (third dimension) represents months of
# the year from January (1) to December (12). To determine the Linke
# turbidity for a position on the Earth's surface for a given month do the
# following: LT = LinkeTurbidity(LatitudeIndex, LongitudeIndex, month).
# Note that the numbers within the matrix are 20 * Linke Turbidity,
# so divide the number from the file by 20 to get the
# turbidity.
# The nodes of the grid are 5' (1/12=0.0833[arcdeg]) apart.
# From Section 8 of Aerosol optical depth and Linke turbidity climatology
# http://www.meteonorm.com/images/uploads/downloads/ieashc36_report_TL_AOD_climatologies.pdf
# 1st row: 89.9583 S, 2nd row: 89.875 S
# 1st column: 179.9583 W, 2nd column: 179.875 W
try:
import tables
except ImportError:
raise ImportError('The Linke turbidity lookup table requires tables. '
'You can still use clearsky.ineichen if you '
'supply your own turbidities.')
if filepath is None:
pvlib_path = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(pvlib_path, 'data', 'LinkeTurbidities.h5')
latitude_index = (
np.around(_linearly_scale(latitude, 90, -90, 0, 2160))
.astype(np.int64))
longitude_index = (
np.around(_linearly_scale(longitude, -180, 180, 0, 4320))
.astype(np.int64))
lt_h5_file = tables.open_file(filepath)
try:
lts = lt_h5_file.root.LinkeTurbidity[latitude_index,
longitude_index, :]
except IndexError:
raise IndexError('Latitude should be between 90 and -90, '
'longitude between -180 and 180.')
finally:
lt_h5_file.close()
if interp_turbidity:
linke_turbidity = _interpolate_turbidity(lts, time)
else:
months = time.month - 1
linke_turbidity = pd.Series(lts[months], index=time)
linke_turbidity /= 20.
return linke_turbidity | [
"def",
"lookup_linke_turbidity",
"(",
"time",
",",
"latitude",
",",
"longitude",
",",
"filepath",
"=",
"None",
",",
"interp_turbidity",
"=",
"True",
")",
":",
"# The .h5 file 'LinkeTurbidities.h5' contains a single 2160 x 4320 x 12",
"# matrix of type uint8 called 'LinkeTurbidi... | 35.8125 | 24.9375 |
def start(self, pin, dutycycle, frequency_hz=2000):
"""Enable PWM output on specified pin. Set to intiial percent duty cycle
value (0.0 to 100.0) and frequency (in Hz).
"""
if dutycycle < 0.0 or dutycycle > 100.0:
raise ValueError('Invalid duty cycle value, must be between 0.0 to 100.0 (inclusive).')
# Make pin an output.
self.rpi_gpio.setup(pin, self.rpi_gpio.OUT)
# Create PWM instance and save a reference for later access.
self.pwm[pin] = self.rpi_gpio.PWM(pin, frequency_hz)
# Start the PWM at the specified duty cycle.
self.pwm[pin].start(dutycycle) | [
"def",
"start",
"(",
"self",
",",
"pin",
",",
"dutycycle",
",",
"frequency_hz",
"=",
"2000",
")",
":",
"if",
"dutycycle",
"<",
"0.0",
"or",
"dutycycle",
">",
"100.0",
":",
"raise",
"ValueError",
"(",
"'Invalid duty cycle value, must be between 0.0 to 100.0 (inclus... | 53.25 | 14.416667 |
def request_instance(vm_=None, call=None):
'''
Put together all of the information necessary to request an instance on EC2,
and then fire off the request the instance.
Returns data about the instance
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The request_instance action must be called with -a or --action.'
)
location = vm_.get('location', get_location(vm_))
# do we launch a regular vm or a spot instance?
# see http://goo.gl/hYZ13f for more information on EC2 API
spot_config = get_spot_config(vm_)
if spot_config is not None:
if 'spot_price' not in spot_config:
raise SaltCloudSystemExit(
'Spot instance config for {0} requires a spot_price '
'attribute.'.format(vm_['name'])
)
params = {'Action': 'RequestSpotInstances',
'InstanceCount': '1',
'Type': spot_config['type']
if 'type' in spot_config else 'one-time',
'SpotPrice': spot_config['spot_price']}
# All of the necessary launch parameters for a VM when using
# spot instances are the same except for the prefix below
# being tacked on.
spot_prefix = 'LaunchSpecification.'
# regular EC2 instance
else:
# WARNING! EXPERIMENTAL!
# This allows more than one instance to be spun up in a single call.
# The first instance will be called by the name provided, but all other
# instances will be nameless (or more specifically, they will use the
# InstanceId as the name). This interface is expected to change, so
# use at your own risk.
min_instance = config.get_cloud_config_value(
'min_instance', vm_, __opts__, search_global=False, default=1
)
max_instance = config.get_cloud_config_value(
'max_instance', vm_, __opts__, search_global=False, default=1
)
params = {'Action': 'RunInstances',
'MinCount': min_instance,
'MaxCount': max_instance}
# Normal instances should have no prefix.
spot_prefix = ''
image_id = get_imageid(vm_)
params[spot_prefix + 'ImageId'] = image_id
userdata = None
userdata_file = config.get_cloud_config_value(
'userdata_file', vm_, __opts__, search_global=False, default=None
)
if userdata_file is None:
userdata = config.get_cloud_config_value(
'userdata', vm_, __opts__, search_global=False, default=None
)
else:
log.trace('userdata_file: %s', userdata_file)
if os.path.exists(userdata_file):
with salt.utils.files.fopen(userdata_file, 'r') as fh_:
userdata = salt.utils.stringutils.to_unicode(fh_.read())
userdata = salt.utils.cloud.userdata_template(__opts__, vm_, userdata)
if userdata is not None:
try:
params[spot_prefix + 'UserData'] = base64.b64encode(
salt.utils.stringutils.to_bytes(userdata)
)
except Exception as exc:
log.exception('Failed to encode userdata: %s', exc)
vm_size = config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
)
params[spot_prefix + 'InstanceType'] = vm_size
ex_keyname = keyname(vm_)
if ex_keyname:
params[spot_prefix + 'KeyName'] = ex_keyname
ex_securitygroup = securitygroup(vm_)
if ex_securitygroup:
if not isinstance(ex_securitygroup, list):
params[spot_prefix + 'SecurityGroup.1'] = ex_securitygroup
else:
for counter, sg_ in enumerate(ex_securitygroup):
params[spot_prefix + 'SecurityGroup.{0}'.format(counter)] = sg_
ex_iam_profile = iam_profile(vm_)
if ex_iam_profile:
try:
if ex_iam_profile.startswith('arn:aws:iam:'):
params[
spot_prefix + 'IamInstanceProfile.Arn'
] = ex_iam_profile
else:
params[
spot_prefix + 'IamInstanceProfile.Name'
] = ex_iam_profile
except AttributeError:
raise SaltCloudConfigError(
'\'iam_profile\' should be a string value.'
)
az_ = get_availability_zone(vm_)
if az_ is not None:
params[spot_prefix + 'Placement.AvailabilityZone'] = az_
tenancy_ = get_tenancy(vm_)
if tenancy_ is not None:
if spot_config is not None:
raise SaltCloudConfigError(
'Spot instance config for {0} does not support '
'specifying tenancy.'.format(vm_['name'])
)
params['Placement.Tenancy'] = tenancy_
subnetid_ = get_subnetid(vm_)
if subnetid_ is not None:
params[spot_prefix + 'SubnetId'] = subnetid_
ex_securitygroupid = securitygroupid(vm_)
if ex_securitygroupid:
if not isinstance(ex_securitygroupid, list):
params[spot_prefix + 'SecurityGroupId.1'] = ex_securitygroupid
else:
for counter, sg_ in enumerate(ex_securitygroupid):
params[
spot_prefix + 'SecurityGroupId.{0}'.format(counter)
] = sg_
placementgroup_ = get_placementgroup(vm_)
if placementgroup_ is not None:
params[spot_prefix + 'Placement.GroupName'] = placementgroup_
blockdevicemappings_holder = block_device_mappings(vm_)
if blockdevicemappings_holder:
for _bd in blockdevicemappings_holder:
if 'tag' in _bd:
_bd.pop('tag')
ex_blockdevicemappings = blockdevicemappings_holder
if ex_blockdevicemappings:
params.update(_param_from_config(spot_prefix + 'BlockDeviceMapping',
ex_blockdevicemappings))
network_interfaces = config.get_cloud_config_value(
'network_interfaces',
vm_,
__opts__,
search_global=False
)
if network_interfaces:
eni_devices = []
for interface in network_interfaces:
log.debug('Create network interface: %s', interface)
_new_eni = _create_eni_if_necessary(interface, vm_)
eni_devices.append(_new_eni)
params.update(_param_from_config(spot_prefix + 'NetworkInterface',
eni_devices))
set_ebs_optimized = config.get_cloud_config_value(
'ebs_optimized', vm_, __opts__, search_global=False
)
if set_ebs_optimized is not None:
if not isinstance(set_ebs_optimized, bool):
raise SaltCloudConfigError(
'\'ebs_optimized\' should be a boolean value.'
)
params[spot_prefix + 'EbsOptimized'] = set_ebs_optimized
set_del_root_vol_on_destroy = config.get_cloud_config_value(
'del_root_vol_on_destroy', vm_, __opts__, search_global=False
)
set_termination_protection = config.get_cloud_config_value(
'termination_protection', vm_, __opts__, search_global=False
)
if set_termination_protection is not None:
if not isinstance(set_termination_protection, bool):
raise SaltCloudConfigError(
'\'termination_protection\' should be a boolean value.'
)
params.update(_param_from_config(spot_prefix + 'DisableApiTermination',
set_termination_protection))
if set_del_root_vol_on_destroy and not isinstance(set_del_root_vol_on_destroy, bool):
raise SaltCloudConfigError(
'\'del_root_vol_on_destroy\' should be a boolean value.'
)
vm_['set_del_root_vol_on_destroy'] = set_del_root_vol_on_destroy
if set_del_root_vol_on_destroy:
# first make sure to look up the root device name
# as Ubuntu and CentOS (and most likely other OSs)
# use different device identifiers
log.info('Attempting to look up root device name for image id %s on '
'VM %s', image_id, vm_['name'])
rd_params = {
'Action': 'DescribeImages',
'ImageId.1': image_id
}
try:
rd_data = aws.query(rd_params,
location=get_location(vm_),
provider=get_provider(),
opts=__opts__,
sigver='4')
if 'error' in rd_data:
return rd_data['error']
log.debug('EC2 Response: \'%s\'', rd_data)
except Exception as exc:
log.error(
'Error getting root device name for image id %s for '
'VM %s: \n%s', image_id, vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
raise
# make sure we have a response
if not rd_data:
err_msg = 'There was an error querying EC2 for the root device ' \
'of image id {0}. Empty response.'.format(image_id)
raise SaltCloudSystemExit(err_msg)
# pull the root device name from the result and use it when
# launching the new VM
rd_name = None
rd_type = None
if 'blockDeviceMapping' in rd_data[0]:
# Some ami instances do not have a root volume. Ignore such cases
if rd_data[0]['blockDeviceMapping'] is not None:
item = rd_data[0]['blockDeviceMapping']['item']
if isinstance(item, list):
item = item[0]
rd_name = item['deviceName']
# Grab the volume type
rd_type = item['ebs'].get('volumeType', None)
log.info('Found root device name: %s', rd_name)
if rd_name is not None:
if ex_blockdevicemappings:
dev_list = [
dev['DeviceName'] for dev in ex_blockdevicemappings
]
else:
dev_list = []
if rd_name in dev_list:
# Device already listed, just grab the index
dev_index = dev_list.index(rd_name)
else:
dev_index = len(dev_list)
# Add the device name in since it wasn't already there
params[
'{0}BlockDeviceMapping.{1}.DeviceName'.format(
spot_prefix, dev_index
)
] = rd_name
# Set the termination value
termination_key = '{0}BlockDeviceMapping.{1}.Ebs.DeleteOnTermination'.format(spot_prefix, dev_index)
params[termination_key] = six.text_type(set_del_root_vol_on_destroy).lower()
# Use default volume type if not specified
if ex_blockdevicemappings and dev_index < len(ex_blockdevicemappings) and \
'Ebs.VolumeType' not in ex_blockdevicemappings[dev_index]:
type_key = '{0}BlockDeviceMapping.{1}.Ebs.VolumeType'.format(spot_prefix, dev_index)
params[type_key] = rd_type
set_del_all_vols_on_destroy = config.get_cloud_config_value(
'del_all_vols_on_destroy', vm_, __opts__, search_global=False, default=False
)
if set_del_all_vols_on_destroy and not isinstance(set_del_all_vols_on_destroy, bool):
raise SaltCloudConfigError(
'\'del_all_vols_on_destroy\' should be a boolean value.'
)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event'](
'requesting', params, list(params)
),
'location': location,
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
provider = get_provider(vm_)
try:
data = aws.query(params,
'instancesSet',
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if 'error' in data:
return data['error']
except Exception as exc:
log.error(
'Error creating %s on EC2 when trying to run the initial '
'deployment: \n%s', vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
raise
# if we're using spot instances, we need to wait for the spot request
# to become active before we continue
if spot_config:
sir_id = data[0]['spotInstanceRequestId']
vm_['spotRequestId'] = sir_id
def __query_spot_instance_request(sir_id, location):
params = {'Action': 'DescribeSpotInstanceRequests',
'SpotInstanceRequestId.1': sir_id}
data = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if not data:
log.error(
'There was an error while querying EC2. Empty response'
)
# Trigger a failure in the wait for spot instance method
return False
if isinstance(data, dict) and 'error' in data:
log.warning('There was an error in the query. %s', data['error'])
# Trigger a failure in the wait for spot instance method
return False
log.debug('Returned query data: %s', data)
state = data[0].get('state')
if state == 'active':
return data
if state == 'open':
# Still waiting for an active state
log.info('Spot instance status: %s', data[0]['status']['message'])
return None
if state in ['cancelled', 'failed', 'closed']:
# Request will never be active, fail
log.error('Spot instance request resulted in state \'{0}\'. '
'Nothing else we can do here.')
return False
__utils__['cloud.fire_event'](
'event',
'waiting for spot instance',
'salt/cloud/{0}/waiting_for_spot'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
try:
data = _wait_for_spot_instance(
__query_spot_instance_request,
update_args=(sir_id, location),
timeout=config.get_cloud_config_value(
'wait_for_spot_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_spot_interval', vm_, __opts__, default=30),
interval_multiplier=config.get_cloud_config_value(
'wait_for_spot_interval_multiplier',
vm_,
__opts__,
default=1),
max_failures=config.get_cloud_config_value(
'wait_for_spot_max_failures',
vm_,
__opts__,
default=10),
)
log.debug('wait_for_spot_instance data %s', data)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# Cancel the existing spot instance request
params = {'Action': 'CancelSpotInstanceRequests',
'SpotInstanceRequestId.1': sir_id}
data = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
log.debug('Canceled spot instance request %s. Data '
'returned: %s', sir_id, data)
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
return data, vm_ | [
"def",
"request_instance",
"(",
"vm_",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'function'",
":",
"# Technically this function may be called other ways too, but it",
"# definitely cannot be called with --function.",
"raise",
"SaltCloudSystemExit",... | 37.384615 | 20.787879 |
def setup_actions(self):
""" Connects slots to signals """
self.actionOpen.triggered.connect(self.on_open)
self.actionNew.triggered.connect(self.on_new)
self.actionSave.triggered.connect(self.on_save)
self.actionSave_as.triggered.connect(self.on_save_as)
self.actionQuit.triggered.connect(
QtWidgets.QApplication.instance().quit)
self.tabWidget.current_changed.connect(self.on_current_tab_changed)
self.tabWidget.last_tab_closed.connect(self.on_last_tab_closed)
self.actionAbout.triggered.connect(self.on_about)
self.actionRun.triggered.connect(self.on_run)
self.interactiveConsole.process_finished.connect(
self.on_process_finished)
self.actionConfigure_run.triggered.connect(self.on_configure_run) | [
"def",
"setup_actions",
"(",
"self",
")",
":",
"self",
".",
"actionOpen",
".",
"triggered",
".",
"connect",
"(",
"self",
".",
"on_open",
")",
"self",
".",
"actionNew",
".",
"triggered",
".",
"connect",
"(",
"self",
".",
"on_new",
")",
"self",
".",
"act... | 53.666667 | 16.133333 |
def declare(self, exchange='', exchange_type='direct', virtual_host='/',
passive=False, durable=False, auto_delete=False,
internal=False, arguments=None):
"""Declare an Exchange.
:param str exchange: Exchange name
:param str exchange_type: Exchange type
:param str virtual_host: Virtual host name
:param bool passive: Do not create
:param bool durable: Durable exchange
:param bool auto_delete: Automatically delete when not in use
:param bool internal: Is the exchange for use by the broker only.
:param dict|None arguments: Exchange key/value arguments
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
if passive:
return self.get(exchange, virtual_host=virtual_host)
exchange_payload = json.dumps(
{
'durable': durable,
'auto_delete': auto_delete,
'internal': internal,
'type': exchange_type,
'arguments': arguments or {},
'vhost': virtual_host
}
)
return self.http_client.put(API_EXCHANGE %
(
quote(virtual_host, ''),
exchange
),
payload=exchange_payload) | [
"def",
"declare",
"(",
"self",
",",
"exchange",
"=",
"''",
",",
"exchange_type",
"=",
"'direct'",
",",
"virtual_host",
"=",
"'/'",
",",
"passive",
"=",
"False",
",",
"durable",
"=",
"False",
",",
"auto_delete",
"=",
"False",
",",
"internal",
"=",
"False"... | 41.054054 | 16.459459 |
def from_rel_ref(baseURI, relative_ref):
"""
Return a |PackURI| instance containing the absolute pack URI formed by
translating *relative_ref* onto *baseURI*.
"""
joined_uri = posixpath.join(baseURI, relative_ref)
abs_uri = posixpath.abspath(joined_uri)
return PackURI(abs_uri) | [
"def",
"from_rel_ref",
"(",
"baseURI",
",",
"relative_ref",
")",
":",
"joined_uri",
"=",
"posixpath",
".",
"join",
"(",
"baseURI",
",",
"relative_ref",
")",
"abs_uri",
"=",
"posixpath",
".",
"abspath",
"(",
"joined_uri",
")",
"return",
"PackURI",
"(",
"abs_u... | 40.75 | 10.25 |
def extract_features(self, text):
"""Extracts features from a body of text.
:rtype: dictionary of features
"""
# Feature extractor may take one or two arguments
try:
return self.feature_extractor(text, self.train_set)
except (TypeError, AttributeError):
return self.feature_extractor(text) | [
"def",
"extract_features",
"(",
"self",
",",
"text",
")",
":",
"# Feature extractor may take one or two arguments",
"try",
":",
"return",
"self",
".",
"feature_extractor",
"(",
"text",
",",
"self",
".",
"train_set",
")",
"except",
"(",
"TypeError",
",",
"Attribute... | 32.090909 | 15.181818 |
def vor_to_am(vor):
r"""
Given a Voronoi tessellation object from Scipy's ``spatial`` module,
converts to a sparse adjacency matrix network representation in COO format.
Parameters
----------
vor : Voronoi Tessellation object
This object is produced by ``scipy.spatial.Voronoi``
Returns
-------
A sparse adjacency matrix in COO format. The network is undirected
and unweighted, so the adjacency matrix is upper-triangular and all the
weights are set to 1.
"""
# Create adjacency matrix in lil format for quick matrix construction
N = vor.vertices.shape[0]
rc = [[], []]
for ij in vor.ridge_dict.keys():
row = vor.ridge_dict[ij].copy()
# Make sure voronoi cell closes upon itself
row.append(row[0])
# Add connections to rc list
rc[0].extend(row[:-1])
rc[1].extend(row[1:])
rc = sp.vstack(rc).T
# Make adj mat upper triangular
rc = sp.sort(rc, axis=1)
# Remove any pairs with ends at infinity (-1)
keep = ~sp.any(rc == -1, axis=1)
rc = rc[keep]
data = sp.ones_like(rc[:, 0])
# Build adj mat in COO format
M = N = sp.amax(rc) + 1
am = sprs.coo_matrix((data, (rc[:, 0], rc[:, 1])), shape=(M, N))
# Remove diagonal, and convert to csr remove duplicates
am = sp.sparse.triu(A=am, k=1, format='csr')
# The convert back to COO and return
am = am.tocoo()
return am | [
"def",
"vor_to_am",
"(",
"vor",
")",
":",
"# Create adjacency matrix in lil format for quick matrix construction",
"N",
"=",
"vor",
".",
"vertices",
".",
"shape",
"[",
"0",
"]",
"rc",
"=",
"[",
"[",
"]",
",",
"[",
"]",
"]",
"for",
"ij",
"in",
"vor",
".",
... | 33.285714 | 17.785714 |
def get_environ_vars(self):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if _environ_prefix_re.search(key):
yield (_environ_prefix_re.sub("", key).lower(), val) | [
"def",
"get_environ_vars",
"(",
"self",
")",
":",
"for",
"key",
",",
"val",
"in",
"os",
".",
"environ",
".",
"items",
"(",
")",
":",
"if",
"_environ_prefix_re",
".",
"search",
"(",
"key",
")",
":",
"yield",
"(",
"_environ_prefix_re",
".",
"sub",
"(",
... | 52.4 | 10 |
def run_calibration(self, interval, applycal):
"""Runs the calibration operation with the current settings
:param interval: The repetition interval between stimuli presentations (seconds)
:type interval: float
:param applycal: Whether to apply a previous saved calibration to this run
:type applycal: bool
:returns: :py:class:`threading.Thread` -- the acquisition thread
"""
if self.selected_calibration_index == 2:
self.tone_calibrator.apply_calibration(applycal)
self.tone_calibrator.setup(interval)
return self.tone_calibrator.run()
else:
self.bs_calibrator.set_stim_by_index(self.selected_calibration_index)
self.bs_calibrator.apply_calibration(applycal)
self.bs_calibrator.setup(interval)
return self.bs_calibrator.run() | [
"def",
"run_calibration",
"(",
"self",
",",
"interval",
",",
"applycal",
")",
":",
"if",
"self",
".",
"selected_calibration_index",
"==",
"2",
":",
"self",
".",
"tone_calibrator",
".",
"apply_calibration",
"(",
"applycal",
")",
"self",
".",
"tone_calibrator",
... | 48.5 | 17.722222 |
def get_db(cls):
"""Return the database for the collection"""
if cls._db:
return getattr(cls._client, cls._db)
return cls._client.get_default_database() | [
"def",
"get_db",
"(",
"cls",
")",
":",
"if",
"cls",
".",
"_db",
":",
"return",
"getattr",
"(",
"cls",
".",
"_client",
",",
"cls",
".",
"_db",
")",
"return",
"cls",
".",
"_client",
".",
"get_default_database",
"(",
")"
] | 36.8 | 12.4 |
def _parseSimpleSelector(self, src):
"""simple_selector
: [ namespace_selector ]? element_name? [ HASH | class | attrib | pseudo ]* S*
;
"""
ctxsrc = src.lstrip()
nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src)
name, src = self._getMatchResult(self.re_element_name, src)
if name:
pass # already *successfully* assigned
elif src[:1] in self.SelectorQualifiers:
name = '*'
else:
raise self.ParseError('Selector name or qualifier expected', src, ctxsrc)
name = self.cssBuilder.resolveNamespacePrefix(nsPrefix, name)
selector = self.cssBuilder.selector(name)
while src and src[:1] in self.SelectorQualifiers:
hash_, src = self._getMatchResult(self.re_hash, src)
if hash_ is not None:
selector.addHashId(hash_)
continue
class_, src = self._getMatchResult(self.re_class, src)
if class_ is not None:
selector.addClass(class_)
continue
if src.startswith('['):
src, selector = self._parseSelectorAttribute(src, selector)
elif src.startswith(':'):
src, selector = self._parseSelectorPseudo(src, selector)
else:
break
return src.lstrip(), selector | [
"def",
"_parseSimpleSelector",
"(",
"self",
",",
"src",
")",
":",
"ctxsrc",
"=",
"src",
".",
"lstrip",
"(",
")",
"nsPrefix",
",",
"src",
"=",
"self",
".",
"_getMatchResult",
"(",
"self",
".",
"re_namespace_selector",
",",
"src",
")",
"name",
",",
"src",
... | 38.083333 | 20 |
def manage(settingspath, root_dir, argv):
"""
Manage all processes
"""
# add settings.json to environment variables
os.environ[ENV_VAR_SETTINGS] = settingspath
# add root_dir
os.environ[ENV_VAR_ROOT_DIR] = root_dir
# get datasets list
with open(settingspath) as settings_file:
settings = json.load(settings_file)
# manage args
datasets_list = generate_datasets_list(settings, argv)
if "make-data-file" == argv[1]:
make_data_file(datasets_list, argv)
elif "parse-data" == argv[1]:
parse_data(datasets_list, argv)
elif "do-operations" == argv[1]:
do_operations(datasets_list, argv)
else:
print_help() | [
"def",
"manage",
"(",
"settingspath",
",",
"root_dir",
",",
"argv",
")",
":",
"# add settings.json to environment variables",
"os",
".",
"environ",
"[",
"ENV_VAR_SETTINGS",
"]",
"=",
"settingspath",
"# add root_dir",
"os",
".",
"environ",
"[",
"ENV_VAR_ROOT_DIR",
"]... | 32.285714 | 9.333333 |
def cluster_application_state(self, application_id):
"""
With the application state API, you can obtain the current
state of an application.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/cluster/apps/{appid}/state'.format(
appid=application_id)
return self.request(path) | [
"def",
"cluster_application_state",
"(",
"self",
",",
"application_id",
")",
":",
"path",
"=",
"'/ws/v1/cluster/apps/{appid}/state'",
".",
"format",
"(",
"appid",
"=",
"application_id",
")",
"return",
"self",
".",
"request",
"(",
"path",
")"
] | 35.230769 | 15.384615 |
def masked_middle_mfcc(self):
"""
Return the MFCC speech frames
in the MIDDLE portion of the wave.
:rtype: :class:`numpy.ndarray` (2D)
"""
begin, end = self._masked_middle_begin_end()
return (self.masked_mfcc)[:, begin:end] | [
"def",
"masked_middle_mfcc",
"(",
"self",
")",
":",
"begin",
",",
"end",
"=",
"self",
".",
"_masked_middle_begin_end",
"(",
")",
"return",
"(",
"self",
".",
"masked_mfcc",
")",
"[",
":",
",",
"begin",
":",
"end",
"]"
] | 30.222222 | 8.666667 |
def get_transactions_xml(self, account: SEPAAccount, start_date: datetime.date = None,
end_date: datetime.date = None) -> list:
"""
Fetches the list of transactions of a bank account in a certain timeframe as camt.052.001.02 XML files.
:param account: SEPA
:param start_date: First day to fetch
:param end_date: Last day to fetch
:return: A list of bytestrings containing XML documents
"""
with self._get_dialog() as dialog:
hkcaz = self._find_highest_supported_command(HKCAZ1)
logger.info('Start fetching from {} to {}'.format(start_date, end_date))
responses = self._fetch_with_touchdowns(
dialog,
lambda touchdown: hkcaz(
account=hkcaz._fields['account'].type.from_sepa_account(account),
all_accounts=False,
date_start=start_date,
date_end=end_date,
touchdown_point=touchdown,
supported_camt_messages=SupportedMessageTypes('urn:iso:std:iso:20022:tech:xsd:camt.052.001.02'),
),
'HICAZ'
)
logger.info('Fetching done.')
xml_streams = []
for seg in responses:
xml_streams.append(seg.statement_booked)
return xml_streams | [
"def",
"get_transactions_xml",
"(",
"self",
",",
"account",
":",
"SEPAAccount",
",",
"start_date",
":",
"datetime",
".",
"date",
"=",
"None",
",",
"end_date",
":",
"datetime",
".",
"date",
"=",
"None",
")",
"->",
"list",
":",
"with",
"self",
".",
"_get_d... | 41.30303 | 21.181818 |
def lookup_path(bin_name):
"""Calls to external binaries can't depend on $PATH
"""
paths = ('/usr/local/sbin/', '/usr/local/bin/', '/usr/sbin/', '/usr/bin/')
for p in paths:
fq_path = p + bin_name
found = os.path.isfile(fq_path) and os.access(fq_path, os.X_OK)
if found:
return fq_path
return False | [
"def",
"lookup_path",
"(",
"bin_name",
")",
":",
"paths",
"=",
"(",
"'/usr/local/sbin/'",
",",
"'/usr/local/bin/'",
",",
"'/usr/sbin/'",
",",
"'/usr/bin/'",
")",
"for",
"p",
"in",
"paths",
":",
"fq_path",
"=",
"p",
"+",
"bin_name",
"found",
"=",
"os",
".",... | 34.5 | 17.5 |
def nonblocking_input(self):
'''Context manager to set the :class:`Terminal`'s input file to read in
a non-blocking way.
Normally, reading from :attr:`sys.stdin` blocks, which is bad if we
want an interactive application that can also update information on the
screen without any input from the user! Therefore, we need to make
:meth:`Terminal.infile.read` a non-blocking operation, which just
returns nothing if we have no input.
:meth:`Root.run` uses this context manager for you to make your
application work in the correct way.
'''
# FIXME: do we handle restoring this during SIGTSTP?
if hasattr(self.infile, 'fileno'):
# Use fcntl to set stdin to non-blocking. WARNING - this is not
# particularly portable!
flags = fcntl.fcntl(self.infile, fcntl.F_GETFL)
fcntl.fcntl(self.infile, fcntl.F_SETFL, flags | os.O_NONBLOCK)
try:
yield
finally:
fcntl.fcntl(self.infile, fcntl.F_SETFL, flags)
else:
yield | [
"def",
"nonblocking_input",
"(",
"self",
")",
":",
"# FIXME: do we handle restoring this during SIGTSTP?",
"if",
"hasattr",
"(",
"self",
".",
"infile",
",",
"'fileno'",
")",
":",
"# Use fcntl to set stdin to non-blocking. WARNING - this is not",
"# particularly portable!",
"fla... | 44 | 24.08 |
def is_me(self): # pragma: no cover, seems not to be used anywhere
"""Check if parameter name if same than name of this object
TODO: is it useful?
:return: true if parameter name if same than this name
:rtype: bool
"""
logger.info("And arbiter is launched with the hostname:%s "
"from an arbiter point of view of addr:%s", self.host_name, socket.getfqdn())
return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname() | [
"def",
"is_me",
"(",
"self",
")",
":",
"# pragma: no cover, seems not to be used anywhere",
"logger",
".",
"info",
"(",
"\"And arbiter is launched with the hostname:%s \"",
"\"from an arbiter point of view of addr:%s\"",
",",
"self",
".",
"host_name",
",",
"socket",
".",
"get... | 46.272727 | 27 |
def get_field_schema(name, field):
"""Returns a JSON Schema representation of a form field."""
field_schema = {
'type': 'string',
}
if field.label:
field_schema['title'] = str(field.label) # force translation
if field.help_text:
field_schema['description'] = str(field.help_text) # force translation
if isinstance(field, (fields.URLField, fields.FileField)):
field_schema['format'] = 'uri'
elif isinstance(field, fields.EmailField):
field_schema['format'] = 'email'
elif isinstance(field, fields.DateTimeField):
field_schema['format'] = 'date-time'
elif isinstance(field, fields.DateField):
field_schema['format'] = 'date'
elif isinstance(field, (fields.DecimalField, fields.FloatField)):
field_schema['type'] = 'number'
elif isinstance(field, fields.IntegerField):
field_schema['type'] = 'integer'
elif isinstance(field, fields.NullBooleanField):
field_schema['type'] = 'boolean'
elif isinstance(field.widget, widgets.CheckboxInput):
field_schema['type'] = 'boolean'
if getattr(field, 'choices', []):
field_schema['enum'] = sorted([choice[0] for choice in field.choices])
# check for multiple values
if isinstance(field.widget, (widgets.Select, widgets.ChoiceWidget)):
if field.widget.allow_multiple_selected:
# promote to array of <type>, move details into the items field
field_schema['items'] = {
'type': field_schema['type'],
}
if 'enum' in field_schema:
field_schema['items']['enum'] = field_schema.pop('enum')
field_schema['type'] = 'array'
return field_schema | [
"def",
"get_field_schema",
"(",
"name",
",",
"field",
")",
":",
"field_schema",
"=",
"{",
"'type'",
":",
"'string'",
",",
"}",
"if",
"field",
".",
"label",
":",
"field_schema",
"[",
"'title'",
"]",
"=",
"str",
"(",
"field",
".",
"label",
")",
"# force ... | 38.545455 | 17.068182 |
def do_pickle_ontology(filename, g=None):
"""
from a valid filename, generate the graph instance and pickle it too
note: option to pass a pre-generated graph instance too
2015-09-17: added code to increase recursion limit if cPickle fails
see http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle
"""
ONTOSPY_LOCAL_MODELS = get_home_location()
pickledpath = ONTOSPY_LOCAL_CACHE + "/" + filename + ".pickle"
if not g:
g = Ontospy(ONTOSPY_LOCAL_MODELS + "/" + filename)
if not GLOBAL_DISABLE_CACHE:
try:
cPickle.dump(g, open(pickledpath, "wb"))
# print Style.DIM + ".. cached <%s>" % pickledpath + Style.RESET_ALL
except Exception as e:
print("\n.. Failed caching <%s>" % filename )
print(str(e))
print("\n... attempting to increase the recursion limit from %d to %d" % (sys.getrecursionlimit(), sys.getrecursionlimit()*10))
try:
sys.setrecursionlimit(sys.getrecursionlimit()*10)
cPickle.dump(g, open(pickledpath, "wb"))
# print(Fore.GREEN + "Cached <%s>" % pickledpath + "..." + Style.RESET_ALL)
except Exception as e:
print("\n... Failed caching <%s>... aborting..." % filename )
print(str(e))
sys.setrecursionlimit(int(sys.getrecursionlimit()/10))
return g | [
"def",
"do_pickle_ontology",
"(",
"filename",
",",
"g",
"=",
"None",
")",
":",
"ONTOSPY_LOCAL_MODELS",
"=",
"get_home_location",
"(",
")",
"pickledpath",
"=",
"ONTOSPY_LOCAL_CACHE",
"+",
"\"/\"",
"+",
"filename",
"+",
"\".pickle\"",
"if",
"not",
"g",
":",
"g",... | 41.133333 | 23.533333 |
def __prepare_resource(data):
"""Prepare the resourcepart of the JID.
:Parameters:
- `data`: Resourcepart of the JID
:raise JIDError: if the resource name is too long.
:raise pyxmpp.xmppstringprep.StringprepError: if the
resourcepart fails Resourceprep preparation."""
if not data:
return None
data = unicode(data)
try:
resource = RESOURCEPREP.prepare(data)
except StringprepError, err:
raise JIDError(u"Local part invalid: {0}".format(err))
if len(resource.encode("utf-8")) > 1023:
raise JIDError("Resource name too long")
return resource | [
"def",
"__prepare_resource",
"(",
"data",
")",
":",
"if",
"not",
"data",
":",
"return",
"None",
"data",
"=",
"unicode",
"(",
"data",
")",
"try",
":",
"resource",
"=",
"RESOURCEPREP",
".",
"prepare",
"(",
"data",
")",
"except",
"StringprepError",
",",
"er... | 35.526316 | 16.157895 |
def _clean_rec_name(rec):
"""Clean illegal characters in input fasta file which cause problems downstream.
"""
out_id = []
for char in list(rec.id):
if char in ALLOWED_CONTIG_NAME_CHARS:
out_id.append(char)
else:
out_id.append("_")
rec.id = "".join(out_id)
rec.description = ""
return rec | [
"def",
"_clean_rec_name",
"(",
"rec",
")",
":",
"out_id",
"=",
"[",
"]",
"for",
"char",
"in",
"list",
"(",
"rec",
".",
"id",
")",
":",
"if",
"char",
"in",
"ALLOWED_CONTIG_NAME_CHARS",
":",
"out_id",
".",
"append",
"(",
"char",
")",
"else",
":",
"out_... | 28.75 | 13 |
def delete_acl_request(request):
"""Submission to remove an ACL."""
uuid_ = request.matchdict['uuid']
posted = request.json
permissions = [(x['uid'], x['permission'],) for x in posted]
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
remove_acl(cursor, uuid_, permissions)
resp = request.response
resp.status_int = 200
return resp | [
"def",
"delete_acl_request",
"(",
"request",
")",
":",
"uuid_",
"=",
"request",
".",
"matchdict",
"[",
"'uuid'",
"]",
"posted",
"=",
"request",
".",
"json",
"permissions",
"=",
"[",
"(",
"x",
"[",
"'uid'",
"]",
",",
"x",
"[",
"'permission'",
"]",
",",
... | 29.692308 | 15.384615 |
def parallelize(mapfunc, workers=None):
'''
Parallelize the mapfunc with multithreading. mapfunc calls will be
partitioned by the provided list of arguments. Each item in the list
will represent one call's arguments. They can be tuples if the function
takes multiple arguments, but one-tupling is not necessary.
If workers argument is not provided, workers will be pulled from an
environment variable PYLT_NUM_WORKERS. If the environment variable is not
found, it will default to 10 workers.
Return: func(args_list: list[arg]) => dict[arg -> result]
'''
workers = workers if workers else _get_default_workers()
def wrapper(args_list):
result = {}
with concurrent.futures.ThreadPoolExecutor(
max_workers=workers) as executor:
tasks = {}
for args in args_list:
if isinstance(args, tuple):
task = executor.submit(mapfunc, *args)
else:
task = executor.submit(mapfunc, args)
tasks[task] = args
for task in concurrent.futures.as_completed(tasks):
args = tasks[task]
task_result = task.result()
result[args] = task_result
return result
return wrapper | [
"def",
"parallelize",
"(",
"mapfunc",
",",
"workers",
"=",
"None",
")",
":",
"workers",
"=",
"workers",
"if",
"workers",
"else",
"_get_default_workers",
"(",
")",
"def",
"wrapper",
"(",
"args_list",
")",
":",
"result",
"=",
"{",
"}",
"with",
"concurrent",
... | 37.617647 | 20.970588 |
def print_usage():
"""
Prints usage message.
"""
print('Usage: ' + os.path.basename(sys.argv[0]) + ' [options] dimacs-file')
print('Options:')
print(' -h, --help Show this message')
print(' -m, --model Print model')
print(' -s, --solver SAT solver to use')
print(' Available values: g3, g4, m22, mgh (default = g4)')
print(' -v, --verbose Be verbose') | [
"def",
"print_usage",
"(",
")",
":",
"print",
"(",
"'Usage: '",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
"+",
"' [options] dimacs-file'",
")",
"print",
"(",
"'Options:'",
")",
"print",
"(",
"' -h, --hel... | 37.916667 | 18.75 |
def covariance_matrix(self,x,y,names=None,cov=None):
"""build a pyemu.Cov instance from GeoStruct
Parameters
----------
x : (iterable of floats)
x-coordinate locations
y : (iterable of floats)
y-coordinate locations
names : (iterable of str)
names of location. If None, cov must not be None. Default is None.
cov : (pyemu.Cov) instance
an existing Cov instance. The contribution of this GeoStruct is added
to cov. If cov is None, names must not be None. Default is None
Returns
-------
cov : pyemu.Cov
the covariance matrix implied by this GeoStruct for the x,y pairs.
cov has row and column names supplied by the names argument unless
the "cov" argument was passed.
Note
----
either "names" or "cov" must be passed. If "cov" is passed, cov.shape
must equal len(x) and len(y).
Example
-------
``>>>pp_df = pyemu.pp_utils.pp_file_to_dataframe("hkpp.dat")``
``>>>cov = gs.covariance_matrix(pp_df.x,pp_df.y,pp_df.name)``
"""
if not isinstance(x,np.ndarray):
x = np.array(x)
if not isinstance(y,np.ndarray):
y = np.array(y)
assert x.shape[0] == y.shape[0]
if names is not None:
assert x.shape[0] == len(names)
c = np.zeros((len(names),len(names)))
np.fill_diagonal(c,self.nugget)
cov = Cov(x=c,names=names)
elif cov is not None:
assert cov.shape[0] == x.shape[0]
names = cov.row_names
c = np.zeros((len(names),1))
c += self.nugget
cont = Cov(x=c,names=names,isdiagonal=True)
cov += cont
else:
raise Exception("GeoStruct.covariance_matrix() requires either " +
"names or cov arg")
for v in self.variograms:
v.covariance_matrix(x,y,cov=cov)
return cov | [
"def",
"covariance_matrix",
"(",
"self",
",",
"x",
",",
"y",
",",
"names",
"=",
"None",
",",
"cov",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
... | 32.901639 | 19.540984 |
def notification_selected_sm_changed(self, model, prop_name, info):
"""If a new state machine is selected, make sure the tab is open"""
selected_state_machine_id = self.model.selected_state_machine_id
if selected_state_machine_id is None:
return
page_id = self.get_page_num(selected_state_machine_id)
# to retrieve the current tab colors
number_of_pages = self.view["notebook"].get_n_pages()
old_label_colors = list(range(number_of_pages))
for p in range(number_of_pages):
page = self.view["notebook"].get_nth_page(p)
label = self.view["notebook"].get_tab_label(page).get_child().get_children()[0]
# old_label_colors[p] = label.get_style().fg[Gtk.StateType.NORMAL]
old_label_colors[p] = label.get_style_context().get_color(Gtk.StateType.NORMAL)
if not self.view.notebook.get_current_page() == page_id:
self.view.notebook.set_current_page(page_id)
# set the old colors
for p in range(number_of_pages):
page = self.view["notebook"].get_nth_page(p)
label = self.view["notebook"].get_tab_label(page).get_child().get_children()[0]
# Gtk TODO
style = label.get_style_context() | [
"def",
"notification_selected_sm_changed",
"(",
"self",
",",
"model",
",",
"prop_name",
",",
"info",
")",
":",
"selected_state_machine_id",
"=",
"self",
".",
"model",
".",
"selected_state_machine_id",
"if",
"selected_state_machine_id",
"is",
"None",
":",
"return",
"... | 46.555556 | 23.925926 |
def imagetransformer_b12l_4h_b128_uncond_dr03_tpu():
"""TPU config for cifar 10."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 2
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.hidden_size = 256
hparams.filter_size = 2048
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
return hparams | [
"def",
"imagetransformer_b12l_4h_b128_uncond_dr03_tpu",
"(",
")",
":",
"hparams",
"=",
"imagetransformer_bas8l_8h_big_uncond_dr03_imgnet",
"(",
")",
"update_hparams_for_tpu",
"(",
"hparams",
")",
"hparams",
".",
"batch_size",
"=",
"2",
"hparams",
".",
"num_heads",
"=",
... | 38 | 10 |
def options(self, route: str(), callback: object()):
"""
Binds a OPTIONS route with the given callback
:rtype: object
"""
self.__set_route('options', {route: callback})
return RouteMapping | [
"def",
"options",
"(",
"self",
",",
"route",
":",
"str",
"(",
")",
",",
"callback",
":",
"object",
"(",
")",
")",
":",
"self",
".",
"__set_route",
"(",
"'options'",
",",
"{",
"route",
":",
"callback",
"}",
")",
"return",
"RouteMapping"
] | 32.857143 | 10 |
def init_jvm(java_home=None,
jvm_dll=None,
jvm_maxmem=None,
jvm_classpath=None,
jvm_properties=None,
jvm_options=None,
config_file=None,
config=None):
"""
Creates a configured Java virtual machine which will be used by jpy.
:param java_home: The Java JRE or JDK home directory used to search JVM shared library, if 'jvm_dll' is omitted.
:param jvm_dll: The JVM shared library file. My be inferred from 'java_home'.
:param jvm_maxmem: The JVM maximum heap space, e.g. '400M', '8G'. Refer to the java executable '-Xmx' option.
:param jvm_classpath: The JVM search paths for Java class files. Separated by colons (Unix) or semicolons
(Windows). Refer to the java executable '-cp' option.
:param jvm_properties: A dictionary of key -> value pairs passed to the JVM as Java system properties.
Refer to the java executable '-D' option.
:param jvm_options: A list of extra options for the JVM. Refer to the java executable options.
:param config_file: Extra configuration file (e.g. 'jpyconfig.py') to be loaded if 'config' parameter is omitted.
:param config: An optional default configuration object providing default attributes
for the 'jvm_maxmem', 'jvm_classpath', 'jvm_properties', 'jvm_options' parameters.
:return: a tuple (cdll, actual_jvm_options) on success, None otherwise.
"""
if not config:
config = _get_python_api_config(config_file=config_file)
cdll = preload_jvm_dll(jvm_dll_file=jvm_dll,
java_home_dir=java_home,
config_file=config_file,
config=config,
fail=False)
import jpy
if not jpy.has_jvm():
jvm_options = get_jvm_options(jvm_maxmem=jvm_maxmem,
jvm_classpath=jvm_classpath,
jvm_properties=jvm_properties,
jvm_options=jvm_options,
config=config)
logger.debug('Creating JVM with options %s' % repr(jvm_options))
jpy.create_jvm(options=jvm_options)
else:
jvm_options = None
# print('jvm_dll =', jvm_dll)
# print('jvm_options =', jvm_options)
return cdll, jvm_options | [
"def",
"init_jvm",
"(",
"java_home",
"=",
"None",
",",
"jvm_dll",
"=",
"None",
",",
"jvm_maxmem",
"=",
"None",
",",
"jvm_classpath",
"=",
"None",
",",
"jvm_properties",
"=",
"None",
",",
"jvm_options",
"=",
"None",
",",
"config_file",
"=",
"None",
",",
"... | 48.530612 | 26.816327 |
def run(scenario, magicc_version=6, **kwargs):
"""
Run a MAGICC scenario and return output data and (optionally) config parameters.
As a reminder, putting ``out_parameters=1`` will cause MAGICC to write out its
parameters into ``out/PARAMETERS.OUT`` and they will then be read into
``output.metadata["parameters"]`` where ``output`` is the returned object.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run
magicc_version : int
MAGICC version to use for the run
**kwargs
Parameters overwriting default parameters
Raises
------
ValueError
If the magicc_version is not available
Returns
-------
output : :obj:`pymagicc.io.MAGICCData`
Output of the run with the data in the ``df`` attribute and parameters and
other metadata in the ``metadata attribute``
"""
if magicc_version == 6:
magicc_cls = MAGICC6
elif magicc_version == 7:
magicc_cls = MAGICC7
else:
raise ValueError("MAGICC version {} is not available".format(magicc_version))
with magicc_cls() as magicc:
results = magicc.run(scenario=scenario, **kwargs)
return results | [
"def",
"run",
"(",
"scenario",
",",
"magicc_version",
"=",
"6",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"magicc_version",
"==",
"6",
":",
"magicc_cls",
"=",
"MAGICC6",
"elif",
"magicc_version",
"==",
"7",
":",
"magicc_cls",
"=",
"MAGICC7",
"else",
":",
... | 29.02439 | 24.097561 |
def platform_data_dir():
"""
Returns path for user-specific data files
Returns:
PathLike : path to the data dir used by the current operating system
"""
if LINUX: # nocover
dpath_ = os.environ.get('XDG_DATA_HOME', '~/.local/share')
elif DARWIN: # nocover
dpath_ = '~/Library/Application Support'
elif WIN32: # nocover
dpath_ = os.environ.get('APPDATA', '~/AppData/Roaming')
else: # nocover
raise '~/AppData/Local'
dpath = normpath(expanduser(dpath_))
return dpath | [
"def",
"platform_data_dir",
"(",
")",
":",
"if",
"LINUX",
":",
"# nocover",
"dpath_",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'XDG_DATA_HOME'",
",",
"'~/.local/share'",
")",
"elif",
"DARWIN",
":",
"# nocover",
"dpath_",
"=",
"'~/Library/Application Support'... | 31.352941 | 16.411765 |
def __get_query_agg_cardinality(cls, field, agg_id=None):
"""
Create an es_dsl aggregation object for getting the approximate count of distinct values of a field.
:param field: field from which the get count of distinct values
:return: a tuple with the aggregation id and es_dsl aggregation object. Ex:
{
"cardinality": {
"field": <field>,
"precision_threshold": 3000
}
"""
if not agg_id:
agg_id = cls.AGGREGATION_ID
query_agg = A("cardinality", field=field, precision_threshold=cls.ES_PRECISION)
return (agg_id, query_agg) | [
"def",
"__get_query_agg_cardinality",
"(",
"cls",
",",
"field",
",",
"agg_id",
"=",
"None",
")",
":",
"if",
"not",
"agg_id",
":",
"agg_id",
"=",
"cls",
".",
"AGGREGATION_ID",
"query_agg",
"=",
"A",
"(",
"\"cardinality\"",
",",
"field",
"=",
"field",
",",
... | 42.8125 | 20.8125 |
def listBlockParents(self, block_name=""):
"""
list parents of a block
"""
if not block_name:
msg = " DBSBlock/listBlockParents. Block_name must be provided as a string or a list. \
No wildcards allowed in block_name/s."
dbsExceptionHandler('dbsException-invalid-input', msg)
elif isinstance(block_name, basestring):
try:
block_name = str(block_name)
if '%' in block_name or '*' in block_name:
dbsExceptionHandler("dbsException-invalid-input", "DBSReaderModel/listBlocksParents: \
NO WILDCARDS allowed in block_name.")
except:
dbsExceptionHandler("dbsException-invalid-input", "DBSBlock/listBlockParents. Block_name must be \
provided as a string or a list. No wildcards allowed in block_name/s .")
elif type(block_name) is list:
for b in block_name:
if '%' in b or '*' in b:
dbsExceptionHandler("dbsException-invalid-input", "DBSReaderModel/listBlocksParents: \
NO WILDCARDS allowed in block_name.")
else:
msg = "DBSBlock/listBlockParents. Block_name must be provided as a string or a list. \
No wildcards allowed in block_name/s ."
dbsExceptionHandler("dbsException-invalid-input", msg)
conn = self.dbi.connection()
try:
results = self.blockparentlist.execute(conn, block_name)
return results
finally:
if conn:
conn.close() | [
"def",
"listBlockParents",
"(",
"self",
",",
"block_name",
"=",
"\"\"",
")",
":",
"if",
"not",
"block_name",
":",
"msg",
"=",
"\" DBSBlock/listBlockParents. Block_name must be provided as a string or a list. \\\n No wildcards allowed in block_name/s.\"",
"dbsExceptio... | 48.757576 | 23.060606 |
def rtgen_family(self, value):
"""Family setter."""
self.bytearray[self._get_slicers(0)] = bytearray(c_ubyte(value or 0)) | [
"def",
"rtgen_family",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"bytearray",
"[",
"self",
".",
"_get_slicers",
"(",
"0",
")",
"]",
"=",
"bytearray",
"(",
"c_ubyte",
"(",
"value",
"or",
"0",
")",
")"
] | 45 | 15.666667 |
def get_item(self, **kwargs):
""" Get collection item taking into account generated queryset
of parent view.
This method allows working with nested resources properly. Thus an item
returned by this method will belong to its parent view's queryset, thus
filtering out objects that don't belong to the parent object.
Returns an object from the applicable ACL. If ACL wasn't applied, it is
applied explicitly.
"""
if six.callable(self.context):
self.reload_context(es_based=False, **kwargs)
objects = self._parent_queryset()
if objects is not None and self.context not in objects:
raise JHTTPNotFound('{}({}) not found'.format(
self.Model.__name__,
self._get_context_key(**kwargs)))
return self.context | [
"def",
"get_item",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"six",
".",
"callable",
"(",
"self",
".",
"context",
")",
":",
"self",
".",
"reload_context",
"(",
"es_based",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"objects",
"=",
"sel... | 39.761905 | 20.666667 |
def move_right(self, keep_anchor=False, nb_chars=1):
"""
Moves the cursor on the right.
:param keep_anchor: True to keep anchor (to select text) or False to
move the anchor (no selection)
:param nb_chars: Number of characters to move.
"""
text_cursor = self._editor.textCursor()
text_cursor.movePosition(
text_cursor.Right, text_cursor.KeepAnchor if keep_anchor else
text_cursor.MoveAnchor, nb_chars)
self._editor.setTextCursor(text_cursor) | [
"def",
"move_right",
"(",
"self",
",",
"keep_anchor",
"=",
"False",
",",
"nb_chars",
"=",
"1",
")",
":",
"text_cursor",
"=",
"self",
".",
"_editor",
".",
"textCursor",
"(",
")",
"text_cursor",
".",
"movePosition",
"(",
"text_cursor",
".",
"Right",
",",
"... | 40.692308 | 12.692308 |
def datasets(self):
"""Distinct datasets (``dataset``) in :class:`.models.Entry`
Distinct datasets are SwissProt or/and TrEMBL
:return: all distinct dataset types
:rtype: list[str]
"""
r = self.session.query(distinct(models.Entry.dataset)).all()
return [x[0] for x in r] | [
"def",
"datasets",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"session",
".",
"query",
"(",
"distinct",
"(",
"models",
".",
"Entry",
".",
"dataset",
")",
")",
".",
"all",
"(",
")",
"return",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"r",
... | 31.9 | 16.8 |
def _apply_template(template, target, *, checkout, extra_context):
"""Apply a template to a temporary directory and then copy results to target."""
with tempfile.TemporaryDirectory() as tempdir:
repo_dir = cc_main.cookiecutter(
template,
checkout=checkout,
no_input=True,
output_dir=tempdir,
extra_context=extra_context)
for item in os.listdir(repo_dir):
src = os.path.join(repo_dir, item)
dst = os.path.join(target, item)
if os.path.isdir(src):
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
else:
if os.path.exists(dst):
os.remove(dst)
shutil.copy2(src, dst) | [
"def",
"_apply_template",
"(",
"template",
",",
"target",
",",
"*",
",",
"checkout",
",",
"extra_context",
")",
":",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
")",
"as",
"tempdir",
":",
"repo_dir",
"=",
"cc_main",
".",
"cookiecutter",
"(",
"templat... | 39.95 | 7.05 |
def _login(self, max_tries=2):
"""Logs in to Kindle Cloud Reader.
Args:
max_tries: The maximum number of login attempts that will be made.
Raises:
BrowserError: If method called when browser not at a signin URL.
LoginError: If login unsuccessful after `max_tries` attempts.
"""
if not self.current_url.startswith(_KindleCloudReaderBrowser._SIGNIN_URL):
raise BrowserError(
'Current url "%s" is not a signin url ("%s")' %
(self.current_url, _KindleCloudReaderBrowser._SIGNIN_URL))
email_field_loaded = lambda br: br.find_elements_by_id('ap_email')
self._wait().until(email_field_loaded)
tries = 0
while tries < max_tries:
# Enter the username
email_elem = self.find_element_by_id('ap_email')
email_elem.clear()
email_elem.send_keys(self._uname)
# Enter the password
pword_elem = self.find_element_by_id('ap_password')
pword_elem.clear()
pword_elem.send_keys(self._pword)
def creds_entered(_):
"""Returns whether the credentials were properly entered."""
email_ok = email_elem.get_attribute('value') == self._uname
pword_ok = pword_elem.get_attribute('value') == self._pword
return email_ok and pword_ok
kcr_page_loaded = lambda br: br.title == u'Kindle Cloud Reader'
try:
self._wait(5).until(creds_entered)
self.find_element_by_id('signInSubmit-input').click()
self._wait(5).until(kcr_page_loaded)
except TimeoutException:
tries += 1
else:
return
raise LoginError | [
"def",
"_login",
"(",
"self",
",",
"max_tries",
"=",
"2",
")",
":",
"if",
"not",
"self",
".",
"current_url",
".",
"startswith",
"(",
"_KindleCloudReaderBrowser",
".",
"_SIGNIN_URL",
")",
":",
"raise",
"BrowserError",
"(",
"'Current url \"%s\" is not a signin url (... | 32.458333 | 21.770833 |
def _get_setup(self, result):
"""Internal method which process the results from the server."""
self.__devices = {}
if ('setup' not in result.keys() or
'devices' not in result['setup'].keys()):
raise Exception(
"Did not find device definition.")
for device_data in result['setup']['devices']:
device = Device(self, device_data)
self.__devices[device.url] = device
self.__location = result['setup']['location']
self.__gateway = result['setup']['gateways'] | [
"def",
"_get_setup",
"(",
"self",
",",
"result",
")",
":",
"self",
".",
"__devices",
"=",
"{",
"}",
"if",
"(",
"'setup'",
"not",
"in",
"result",
".",
"keys",
"(",
")",
"or",
"'devices'",
"not",
"in",
"result",
"[",
"'setup'",
"]",
".",
"keys",
"(",... | 37.2 | 15.866667 |
def _create_options(self, items):
"""Helper method to create options from list, or instance.
Applies preprocess method if available to create a uniform
output
"""
return OrderedDict(map(lambda x: (x.name, x),
coerce_to_list(items, self.preprocess))) | [
"def",
"_create_options",
"(",
"self",
",",
"items",
")",
":",
"return",
"OrderedDict",
"(",
"map",
"(",
"lambda",
"x",
":",
"(",
"x",
".",
"name",
",",
"x",
")",
",",
"coerce_to_list",
"(",
"items",
",",
"self",
".",
"preprocess",
")",
")",
")"
] | 38.75 | 17.375 |
def cmd_ip_geolocation(ip_address, verbose):
"""Get the geolocation of an IP adddress from https://ipapi.co/.
Example:
\b
$ habu.ip.geolocation 8.8.8.8
{
"ip": "8.8.8.8",
"city": "Mountain View",
...
"asn": "AS15169",
"org": "Google LLC"
}
"""
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
print("Looking up %s..." % ip_address, file=sys.stderr)
results = geo_location(ip_address)
if results:
print(json.dumps(results, indent=4))
else:
print("[X] %s is not valid IPv4 address" % ip_address)
return True | [
"def",
"cmd_ip_geolocation",
"(",
"ip_address",
",",
"verbose",
")",
":",
"if",
"verbose",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
",",
"format",
"=",
"'%(message)s'",
")",
"print",
"(",
"\"Looking up %s...\"",
"%",
"ip... | 23.296296 | 22.592593 |
def active_path(context, pattern, css=None):
"""
Highlight menu item based on path.
Returns a css class if ``request.path`` is in given ``pattern``.
:param pattern:
Regex url pattern.
:param css:
Css class to be returned for highlighting. Return active if none set.
"""
request = context['request']
#pattern = "^" + pattern + "$"
if re.search(pattern, request.path):
return css if css else 'active'
return '' | [
"def",
"active_path",
"(",
"context",
",",
"pattern",
",",
"css",
"=",
"None",
")",
":",
"request",
"=",
"context",
"[",
"'request'",
"]",
"#pattern = \"^\" + pattern + \"$\"",
"if",
"re",
".",
"search",
"(",
"pattern",
",",
"request",
".",
"path",
")",
":... | 25.944444 | 18.055556 |
def prepare_encrypted_request(self, session, endpoint, message):
"""
Creates a prepared request to send to the server with an encrypted message
and correct headers
:param session: The handle of the session to prepare requests with
:param endpoint: The endpoint/server to prepare requests to
:param message: The unencrypted message to send to the server
:return: A prepared request that has an encrypted message
"""
host = urlsplit(endpoint).hostname
if self.protocol == 'credssp' and len(message) > self.SIXTEN_KB:
content_type = 'multipart/x-multi-encrypted'
encrypted_message = b''
message_chunks = [message[i:i+self.SIXTEN_KB] for i in range(0, len(message), self.SIXTEN_KB)]
for message_chunk in message_chunks:
encrypted_chunk = self._encrypt_message(message_chunk, host)
encrypted_message += encrypted_chunk
else:
content_type = 'multipart/encrypted'
encrypted_message = self._encrypt_message(message, host)
encrypted_message += self.MIME_BOUNDARY + b"--\r\n"
request = requests.Request('POST', endpoint, data=encrypted_message)
prepared_request = session.prepare_request(request)
prepared_request.headers['Content-Length'] = str(len(prepared_request.body))
prepared_request.headers['Content-Type'] = '{0};protocol="{1}";boundary="Encrypted Boundary"'\
.format(content_type, self.protocol_string.decode())
return prepared_request | [
"def",
"prepare_encrypted_request",
"(",
"self",
",",
"session",
",",
"endpoint",
",",
"message",
")",
":",
"host",
"=",
"urlsplit",
"(",
"endpoint",
")",
".",
"hostname",
"if",
"self",
".",
"protocol",
"==",
"'credssp'",
"and",
"len",
"(",
"message",
")",... | 50.354839 | 26.032258 |
def hpforest(self, data: ['SASdata', str] = None,
freq: str = None,
id: str = None,
input: [str, list, dict] = None,
save: str = None,
score: [str, bool, 'SASdata'] = True,
target: [str, list, dict] = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the HPFOREST procedure
Documentation link:
https://support.sas.com/documentation/solutions/miner/emhp/14.1/emhpprcref.pdf
:param data: SASdata object or string. This parameter is required.
:parm freq: The freq variable can only be a string type.
:parm id: The id variable can only be a string type.
:parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required
:parm save: The save variable can only be a string type.
:parm score: The score variable can only be a string type.
:parm target: The target variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
""" | [
"def",
"hpforest",
"(",
"self",
",",
"data",
":",
"[",
"'SASdata'",
",",
"str",
"]",
"=",
"None",
",",
"freq",
":",
"str",
"=",
"None",
",",
"id",
":",
"str",
"=",
"None",
",",
"input",
":",
"[",
"str",
",",
"list",
",",
"dict",
"]",
"=",
"No... | 57.37037 | 28.481481 |
def from_charmm(cls, path, positions=None, forcefield=None, strict=True, **kwargs):
"""
Loads PSF Charmm structure from `path`. Requires `charmm_parameters`.
Parameters
----------
path : str
Path to PSF file
forcefield : list of str
Paths to Charmm parameters files, such as *.par or *.str. REQUIRED
Returns
-------
psf : SystemHandler
SystemHandler with topology. Charmm parameters are embedded in
the `master` attribute.
"""
psf = CharmmPsfFile(path)
if strict and forcefield is None:
raise ValueError('PSF files require key `forcefield`.')
if strict and positions is None:
raise ValueError('PSF files require key `positions`.')
psf.parmset = CharmmParameterSet(*forcefield)
psf.loadParameters(psf.parmset)
return cls(master=psf, topology=psf.topology, positions=positions, path=path,
**kwargs) | [
"def",
"from_charmm",
"(",
"cls",
",",
"path",
",",
"positions",
"=",
"None",
",",
"forcefield",
"=",
"None",
",",
"strict",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"psf",
"=",
"CharmmPsfFile",
"(",
"path",
")",
"if",
"strict",
"and",
"forcef... | 38.153846 | 19.923077 |
def init():
'''Initialise a WSGI application to be loaded by uWSGI.'''
# Load values from config file
config_file = os.path.realpath(os.path.join(os.getcwd(), 'swaggery.ini'))
config = configparser.RawConfigParser(allow_no_value=True)
config.read(config_file)
log_level = config.get('application', 'logging_level').upper()
api_dirs = list(config['apis'])
do_checks = config.get('application',
'disable_boot_checks').lower() == 'false'
# Set logging level
log.setLevel(getattr(logging, log_level))
log.debug('Log level set to {}'.format(log_level))
# Bootstrap application
log.debug('Exploring directories: {}'.format(api_dirs))
application = Swaggery(api_dirs=api_dirs, do_checks=do_checks)
return application | [
"def",
"init",
"(",
")",
":",
"# Load values from config file",
"config_file",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'swaggery.ini'",
")",
")",
"config",
"=",
"configparser... | 45.882353 | 17.647059 |
def create(cls, session, record, imported=False, auto_reply=False):
"""Create a conversation.
Please note that conversation cannot be created with more than 100
threads, if attempted the API will respond with HTTP 412.
Args:
session (requests.sessions.Session): Authenticated session.
record (helpscout.models.Conversation): The conversation
to be created.
imported (bool, optional): The ``imported`` request parameter
enables conversations to be created for historical purposes (i.e.
if moving from a different platform, you can import your
history). When ``imported`` is set to ``True``, no outgoing
emails or notifications will be generated.
auto_reply (bool): The ``auto_reply`` request parameter enables
auto replies to be sent when a conversation is created via the
API. When ``auto_reply`` is set to ``True``, an auto reply will
be sent as long as there is at least one ``customer`` thread in
the conversation.
Returns:
helpscout.models.Conversation: Newly created conversation.
"""
return super(Conversations, cls).create(
session,
record,
imported=imported,
auto_reply=auto_reply,
) | [
"def",
"create",
"(",
"cls",
",",
"session",
",",
"record",
",",
"imported",
"=",
"False",
",",
"auto_reply",
"=",
"False",
")",
":",
"return",
"super",
"(",
"Conversations",
",",
"cls",
")",
".",
"create",
"(",
"session",
",",
"record",
",",
"imported... | 45.133333 | 25.133333 |
def index(self, value, start=None, stop=None):
"""
Return the index of the first occurence of *value*.
If *start* or *stop* are provided, return the smallest
index such that ``s[index] == value`` and ``start <= index < stop``.
"""
def index_trans(pipe):
len_self, normal_start = self._normalize_index(start or 0, pipe)
__, normal_stop = self._normalize_index(stop or len_self, pipe)
for i, v in enumerate(self.__iter__(pipe=pipe)):
if v == value:
if i < normal_start:
continue
if i >= normal_stop:
break
return i
raise ValueError
return self._transaction(index_trans) | [
"def",
"index",
"(",
"self",
",",
"value",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
")",
":",
"def",
"index_trans",
"(",
"pipe",
")",
":",
"len_self",
",",
"normal_start",
"=",
"self",
".",
"_normalize_index",
"(",
"start",
"or",
"0",
","... | 40.947368 | 14.842105 |
def create_model(modelname, fields, indexes=None, basemodel=None, **props):
"""
Create model dynamically
:param fields: Just format like [
{'name':name, 'type':type, ...},
...
]
type should be a string, eg. 'str', 'int', etc
kwargs will be passed to Property.__init__() according field type,
it'll be a dict
:param props: Model attributes, such as '__mapping_only__', '__replace__'
:param indexes: Multiple fields index, single index can be set directly using `index=True`
to a field, the value format should be:
[
{'name':name, 'fields':[...], ...},
]
e.g. [
{'name':'audit_idx', 'fields':['table_id', 'obj_id']}
]
for kwargs can be ommited.
:param basemodel: Will be the new Model base class, so new Model can inherited
parent methods, it can be a string or a real class object
"""
assert not props or isinstance(props, dict)
assert not indexes or isinstance(indexes, list)
props = SortedDict(props or {})
props['__dynamic__'] = True
props['__config__'] = False
for p in fields:
kwargs = p.copy()
name = kwargs.pop('name')
_type = kwargs.pop('type')
#if the key is start with '_', then remove it
for k in kwargs.keys():
if k.startswith('_'):
kwargs.pop(k, None)
field_type = get_field_type(_type)
prop = field_type(**kwargs)
props[name] = prop
if basemodel:
model = import_attr(basemodel)
# model.clear_relation()
else:
model = Model
# try:
# old = get_model(modelname, signal=False)
# old.clear_relation()
# except ModelNotFound as e:
# pass
cls = type(str(modelname.title()), (model,), props)
tablename = props.get('__tablename__', modelname)
set_model(cls, tablename, appname=__name__, model_path='')
get_model(modelname, signal=False, reload=True)
indexes = indexes or []
for x in indexes:
kwargs = x.copy()
name = kwargs.pop('name')
fields = kwargs.pop('fields')
#if the key is start with '_', then remove it
for k in kwargs.keys():
if k.startswith('_'):
kwargs.pop(k, None)
if not isinstance(fields, (list, tuple)):
raise ValueError("Index value format is not right, the value is %r" % indexes)
props = []
for y in fields:
props.append(cls.c[y])
Index(name, *props, **kwargs)
return cls | [
"def",
"create_model",
"(",
"modelname",
",",
"fields",
",",
"indexes",
"=",
"None",
",",
"basemodel",
"=",
"None",
",",
"*",
"*",
"props",
")",
":",
"assert",
"not",
"props",
"or",
"isinstance",
"(",
"props",
",",
"dict",
")",
"assert",
"not",
"indexe... | 31.103448 | 20.781609 |
def log_error(error, result):
"""Logs an error
"""
p = {'error': error, 'result':result}
_log(TYPE_CODES.ERROR, p) | [
"def",
"log_error",
"(",
"error",
",",
"result",
")",
":",
"p",
"=",
"{",
"'error'",
":",
"error",
",",
"'result'",
":",
"result",
"}",
"_log",
"(",
"TYPE_CODES",
".",
"ERROR",
",",
"p",
")"
] | 25.2 | 4.6 |
def prefix_indent(prefix, textblock, later_prefix=' '):
"""
Prefix and indent all lines in *textblock*.
*prefix* is a prefix string
*later_prefix* is used on all but the first line, if it is a single character
it will be repeated to match length of *prefix*
"""
textblock = textblock.split('\n')
line = prefix + textblock[0] + '\n'
if len(later_prefix) == 1:
later_prefix = ' '*len(prefix)
line = line + '\n'.join([later_prefix + x for x in textblock[1:]])
if line[-1] != '\n':
return line + '\n'
else:
return line | [
"def",
"prefix_indent",
"(",
"prefix",
",",
"textblock",
",",
"later_prefix",
"=",
"' '",
")",
":",
"textblock",
"=",
"textblock",
".",
"split",
"(",
"'\\n'",
")",
"line",
"=",
"prefix",
"+",
"textblock",
"[",
"0",
"]",
"+",
"'\\n'",
"if",
"len",
"(",
... | 34.470588 | 15.647059 |
async def is_object_synced_to_cn(self, client, pid):
"""Check if object with {pid} has successfully synced to the CN.
CNRead.describe() is used as it's a light-weight HTTP HEAD request.
This assumes that the call is being made over a connection that has been
authenticated and has read or better access on the given object if it exists.
"""
try:
await client.describe(pid)
except d1_common.types.exceptions.DataONEException:
return False
return True | [
"async",
"def",
"is_object_synced_to_cn",
"(",
"self",
",",
"client",
",",
"pid",
")",
":",
"try",
":",
"await",
"client",
".",
"describe",
"(",
"pid",
")",
"except",
"d1_common",
".",
"types",
".",
"exceptions",
".",
"DataONEException",
":",
"return",
"Fa... | 37.642857 | 24.142857 |
def cleanup():
"""Close all sockets at exit"""
for sck in list(Wdb._sockets):
try:
sck.close()
except Exception:
log.warn('Error in cleanup', exc_info=True) | [
"def",
"cleanup",
"(",
")",
":",
"for",
"sck",
"in",
"list",
"(",
"Wdb",
".",
"_sockets",
")",
":",
"try",
":",
"sck",
".",
"close",
"(",
")",
"except",
"Exception",
":",
"log",
".",
"warn",
"(",
"'Error in cleanup'",
",",
"exc_info",
"=",
"True",
... | 28.285714 | 15.285714 |
def create(self, properties):
"""
Create and configure a storage group.
The new storage group will be associated with the CPC identified by the
`cpc-uri` input property.
Authorization requirements:
* Object-access permission to the CPC that will be associated with
the new storage group.
* Task permission to the "Configure Storage - System Programmer" task.
Parameters:
properties (dict): Initial property values.
Allowable properties are defined in section 'Request body contents'
in section 'Create Storage Group' in the :term:`HMC API` book.
The 'cpc-uri' property identifies the CPC to which the new
storage group will be associated, and is required to be specified
in this parameter.
Returns:
:class:`~zhmcclient.StorageGroup`:
The resource object for the new storage group.
The object will have its 'object-uri' property set as returned by
the HMC, and will also have the input properties set.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
if properties is None:
properties = {}
result = self.session.post(self._base_uri, body=properties)
# There should not be overlaps, but just in case there are, the
# returned props should overwrite the input props:
props = copy.deepcopy(properties)
props.update(result)
name = props.get(self._name_prop, None)
uri = props[self._uri_prop]
storage_group = StorageGroup(self, uri, name, props)
self._name_uri_cache.update(name, uri)
return storage_group | [
"def",
"create",
"(",
"self",
",",
"properties",
")",
":",
"if",
"properties",
"is",
"None",
":",
"properties",
"=",
"{",
"}",
"result",
"=",
"self",
".",
"session",
".",
"post",
"(",
"self",
".",
"_base_uri",
",",
"body",
"=",
"properties",
")",
"# ... | 36.02 | 21.5 |
def end(self, sql=None):
"""Commit the current transaction."""
self._transaction = False
try:
end = self._con.end
except AttributeError:
return self._con.query(sql or 'end')
else:
if sql:
return end(sql=sql)
else:
return end() | [
"def",
"end",
"(",
"self",
",",
"sql",
"=",
"None",
")",
":",
"self",
".",
"_transaction",
"=",
"False",
"try",
":",
"end",
"=",
"self",
".",
"_con",
".",
"end",
"except",
"AttributeError",
":",
"return",
"self",
".",
"_con",
".",
"query",
"(",
"sq... | 27.916667 | 13.833333 |
def get_arguments(args):
"""Parse the command line."""
usage = "%(prog)s [arguments] [image files]"
programs_str = ', '.join([prog.__name__ for prog in PROGRAMS])
description = "Uses "+programs_str+" if they are on the path."
parser = argparse.ArgumentParser(usage=usage, description=description)
parser.add_argument("-r", "--recurse", action="store_true",
dest="recurse", default=0,
help="Recurse down through directories ignoring the"
"image file arguments on the command line")
parser.add_argument("-v", "--verbose", action="count",
dest="verbose", default=0,
help="Display more output. -v (default) and -vv "
"(noisy)")
parser.add_argument("-Q", "--quiet", action="store_const",
dest="verbose", const=-1,
help="Display little to no output")
parser.add_argument("-a", "--enable_advpng", action="store_true",
dest="advpng", default=0,
help="Optimize with advpng (disabled by default)")
parser.add_argument("-c", "--comics", action="store_true",
dest="comics", default=0,
help="Also optimize comic book archives (cbz & cbr)")
parser.add_argument("-f", "--formats", action="store", dest="formats",
default=DEFAULT_FORMATS,
help="Only optimize images of the specifed '{}' "
"delimited formats from: {}".format(
FORMAT_DELIMETER,
', '.join(sorted(ALL_FORMATS))))
parser.add_argument("-O", "--disable_optipng", action="store_false",
dest="optipng", default=1,
help="Do not optimize with optipng")
parser.add_argument("-P", "--disable_pngout", action="store_false",
dest="pngout", default=1,
help="Do not optimize with pngout")
parser.add_argument("-J", "--disable_jpegrescan", action="store_false",
dest="jpegrescan", default=1,
help="Do not optimize with jpegrescan")
parser.add_argument("-E", "--disable_progressive", action="store_false",
dest="jpegtran_prog", default=1,
help="Don't try to reduce size by making "
"progressive JPEGs with jpegtran")
parser.add_argument("-Z", "--disable_mozjpeg", action="store_false",
dest="mozjpeg", default=1,
help="Do not optimize with mozjpeg")
parser.add_argument("-T", "--disable_jpegtran", action="store_false",
dest="jpegtran", default=1,
help="Do not optimize with jpegtran")
parser.add_argument("-G", "--disable_gifsicle", action="store_false",
dest="gifsicle", default=1,
help="disable optimizing animated GIFs")
parser.add_argument("-Y", "--disable_convert_type", action="store_const",
dest="to_png_formats",
const=png.FORMATS,
default=png.CONVERTABLE_FORMATS,
help="Do not convert other lossless formats like "
" {} to PNG when optimizing. By default, {}"
" does convert these formats to PNG".format(
', '.join(png.LOSSLESS_FORMATS),
PROGRAM_NAME))
parser.add_argument("-S", "--disable_follow_symlinks",
action="store_false",
dest="follow_symlinks", default=1,
help="disable following symlinks for files and "
"directories")
parser.add_argument("-b", "--bigger", action="store_true",
dest="bigger", default=0,
help="Save optimized files that are larger than "
"the originals")
parser.add_argument("-t", "--record_timestamp", action="store_true",
dest="record_timestamp", default=0,
help="Store the time of the optimization of full "
"directories in directory local dotfiles.")
parser.add_argument("-D", "--optimize_after", action="store",
dest="optimize_after", default=None,
help="only optimize files after the specified "
"timestamp. Supercedes -t")
parser.add_argument("-N", "--noop", action="store_true",
dest="test", default=0,
help="Do not replace files with optimized versions")
parser.add_argument("-l", "--list", action="store_true",
dest="list_only", default=0,
help="Only list files that would be optimized")
parser.add_argument("-V", "--version", action="version",
version=__version__,
help="display the version number")
parser.add_argument("-M", "--destroy_metadata", action="store_true",
dest="destroy_metadata", default=0,
help="*Destroy* metadata like EXIF and JFIF")
parser.add_argument("paths", metavar="path", type=str, nargs="+",
help="File or directory paths to optimize")
parser.add_argument("-j", "--jobs", type=int, action="store",
dest="jobs", default=multiprocessing.cpu_count(),
help="Number of parallel jobs to run simultaneously.")
return parser.parse_args(args) | [
"def",
"get_arguments",
"(",
"args",
")",
":",
"usage",
"=",
"\"%(prog)s [arguments] [image files]\"",
"programs_str",
"=",
"', '",
".",
"join",
"(",
"[",
"prog",
".",
"__name__",
"for",
"prog",
"in",
"PROGRAMS",
"]",
")",
"description",
"=",
"\"Uses \"",
"+",... | 59.541667 | 21.072917 |
def main():
"""
Main program entry point
"""
args = command_line()
# TODO: Decouple Book interface and implementation
query = Book(
title=args.title,
author=args.author,
max_results=args.max,
language_code=args.language,
fields=('title', 'authors', 'imageLinks', 'categories', 'description'),
)
if args.verbose:
print('Request URL:')
print(query.url)
print(query.__dict__)
# Temporary Display Interface
books = query.json.get('items', None)
if not books:
raise AttributeError('Web Request Failed')
for book in books:
info = book['volumeInfo']
# Retrieve ISBN/Reference Info
if 'industryIdentifiers' in info:
identifiers = dict([(ref['type'], ref['identifier']) for ref in info['industryIdentifiers']])
# Prefer ISBN 13 over ISBN 10
if 'ISBN_13' in identifiers:
isbn_id, isbn = 'ISBN_13', identifiers['ISBN_13']
else:
isbn_id, isbn = identifiers.popitem()
else:
isbn_id, isbn = 'ISBN_##', 'N/A'
# Format/Print
display = 'Title: {title}\nAuthor: {authors}\n{isbn_id}: {isbn}\n'.format(
title=info['title'],
authors=', '.join(info.get('authors', ['N/A'])),
isbn_id=isbn_id,
isbn=isbn,
)
print(display) | [
"def",
"main",
"(",
")",
":",
"args",
"=",
"command_line",
"(",
")",
"# TODO: Decouple Book interface and implementation",
"query",
"=",
"Book",
"(",
"title",
"=",
"args",
".",
"title",
",",
"author",
"=",
"args",
".",
"author",
",",
"max_results",
"=",
"arg... | 27.64 | 20.44 |
def get_plugin_command(plugin_name, command_name, conn=None):
"""
get_specific_command function queries a specific CommandName
:param plugin_name: <str> PluginName
:param command_name: <str> CommandName
:return: <dict>
"""
commands = RPX.table(plugin_name).filter(
{COMMAND_NAME_KEY: command_name}).run(conn)
command = None
for command in commands:
continue # exhausting the cursor
return command | [
"def",
"get_plugin_command",
"(",
"plugin_name",
",",
"command_name",
",",
"conn",
"=",
"None",
")",
":",
"commands",
"=",
"RPX",
".",
"table",
"(",
"plugin_name",
")",
".",
"filter",
"(",
"{",
"COMMAND_NAME_KEY",
":",
"command_name",
"}",
")",
".",
"run",... | 31.5 | 12.928571 |
def shutit_method_scope(func):
"""Notifies the ShutIt object whenever we call a shutit module method.
This allows setting values for the 'scope' of a function.
"""
def wrapper(self, shutit):
"""Wrapper to call a shutit module method, notifying the ShutIt object.
"""
ret = func(self, shutit)
return ret
return wrapper | [
"def",
"shutit_method_scope",
"(",
"func",
")",
":",
"def",
"wrapper",
"(",
"self",
",",
"shutit",
")",
":",
"\"\"\"Wrapper to call a shutit module method, notifying the ShutIt object.\n\t\t\"\"\"",
"ret",
"=",
"func",
"(",
"self",
",",
"shutit",
")",
"return",
"ret",... | 32.1 | 10.8 |
def convertloc(candsfile, candloc, memory_limit):
""" For given state and location that are too bulky, calculate new location given memory_limit. """
scan, segment, candint, dmind, dtind, beamnum = candloc
# set up state and find absolute integration of candidate
d0 = pickle.load(open(candsfile, 'r'))
filename = os.path.basename(d0['filename'])
readints0 = d0['readints']
nskip0 = (24*3600*(d0['segmenttimes'][segment, 0]
- d0['starttime_mjd'])
/ d0['inttime']).astype(int)
candint_abs = nskip0 + candint
logger.debug('readints0 {} nskip0 {}, candint_abs {}'.format(readints0, nskip0, candint_abs))
# clean up d0 and resubmit to set_pipeline
params = pp.Params()
for key in d0.keys():
if not hasattr(params, key):
_ = d0.pop(key)
d0['logfile'] = False
d0['npix'] = 0
d0['uvres'] = 0
d0['nsegments'] = 0
d0['memory_limit'] = memory_limit
d = rt.set_pipeline(os.path.basename(filename), scan, **d0)
# find best segment for new state
readints = d['readints']
nskips = [(24*3600*(d['segmenttimes'][segment, 0]
- d['starttime_mjd']) / d['inttime']).astype(int)
for segment in range(d['nsegments'])]
posind = [i for i in range(len(nskips)) if candint_abs - nskips[i] > 0]
segment_new = [seg for seg in posind if candint_abs - nskips[seg] == min([candint_abs - nskips[i] for i in posind])][0]
candint_new = candint_abs - nskips[segment_new]
logger.debug('nskips {}, segment_new {}'.format(nskips, segment_new))
return [scan, segment_new, candint_new, dmind, dtind, beamnum] | [
"def",
"convertloc",
"(",
"candsfile",
",",
"candloc",
",",
"memory_limit",
")",
":",
"scan",
",",
"segment",
",",
"candint",
",",
"dmind",
",",
"dtind",
",",
"beamnum",
"=",
"candloc",
"# set up state and find absolute integration of candidate",
"d0",
"=",
"pickl... | 39.829268 | 21.560976 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.