text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
async def can_place(self, building: Union[AbilityData, AbilityId, UnitTypeId], position: Point2) -> bool:
"""Tests if a building can be placed in the given location."""
assert isinstance(building, (AbilityData, AbilityId, UnitTypeId))
if isinstance(building, UnitTypeId):
building = self._game_data.units[building.value].creation_ability
elif isinstance(building, AbilityId):
building = self._game_data.abilities[building.value]
r = await self._client.query_building_placement(building, [position])
return r[0] == ActionResult.Success | [
"async",
"def",
"can_place",
"(",
"self",
",",
"building",
":",
"Union",
"[",
"AbilityData",
",",
"AbilityId",
",",
"UnitTypeId",
"]",
",",
"position",
":",
"Point2",
")",
"->",
"bool",
":",
"assert",
"isinstance",
"(",
"building",
",",
"(",
"AbilityData",
",",
"AbilityId",
",",
"UnitTypeId",
")",
")",
"if",
"isinstance",
"(",
"building",
",",
"UnitTypeId",
")",
":",
"building",
"=",
"self",
".",
"_game_data",
".",
"units",
"[",
"building",
".",
"value",
"]",
".",
"creation_ability",
"elif",
"isinstance",
"(",
"building",
",",
"AbilityId",
")",
":",
"building",
"=",
"self",
".",
"_game_data",
".",
"abilities",
"[",
"building",
".",
"value",
"]",
"r",
"=",
"await",
"self",
".",
"_client",
".",
"query_building_placement",
"(",
"building",
",",
"[",
"position",
"]",
")",
"return",
"r",
"[",
"0",
"]",
"==",
"ActionResult",
".",
"Success"
] | 49.833333 | 27.333333 |
def transform_grid_to_reference_frame(self, grid):
"""Transform a grid of (y,x) coordinates to the reference frame of the profile, including a translation to \
its centre.
Parameters
----------
grid : ndarray
The (y, x) coordinates in the original reference frame of the grid.
"""
transformed = np.subtract(grid, self.centre)
return transformed.view(TransformedGrid) | [
"def",
"transform_grid_to_reference_frame",
"(",
"self",
",",
"grid",
")",
":",
"transformed",
"=",
"np",
".",
"subtract",
"(",
"grid",
",",
"self",
".",
"centre",
")",
"return",
"transformed",
".",
"view",
"(",
"TransformedGrid",
")"
] | 39.363636 | 17.454545 |
def pointInsidePolygon(x, y, poly):
"""
Determine if a point is inside a given polygon or not
Polygon is a list of (x,y) pairs.
[code taken from: http://www.ariel.com.au/a/python-point-int-poly.html]
let's make an easy square:
>>> poly = [ (0,0),\
(1,0),\
(1,1),\
(0,1) ]
>>> pointInsidePolygon(0.5,0.5, poly)
True
>>> pointInsidePolygon(1.5,1.5, poly)
False
"""
n = len(poly)
inside = False
p1x, p1y = poly[0]
for i in range(n + 1):
p2x, p2y = poly[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
return inside | [
"def",
"pointInsidePolygon",
"(",
"x",
",",
"y",
",",
"poly",
")",
":",
"n",
"=",
"len",
"(",
"poly",
")",
"inside",
"=",
"False",
"p1x",
",",
"p1y",
"=",
"poly",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"n",
"+",
"1",
")",
":",
"p2x",
",",
"p2y",
"=",
"poly",
"[",
"i",
"%",
"n",
"]",
"if",
"y",
">",
"min",
"(",
"p1y",
",",
"p2y",
")",
":",
"if",
"y",
"<=",
"max",
"(",
"p1y",
",",
"p2y",
")",
":",
"if",
"x",
"<=",
"max",
"(",
"p1x",
",",
"p2x",
")",
":",
"if",
"p1y",
"!=",
"p2y",
":",
"xinters",
"=",
"(",
"y",
"-",
"p1y",
")",
"*",
"(",
"p2x",
"-",
"p1x",
")",
"/",
"(",
"p2y",
"-",
"p1y",
")",
"+",
"p1x",
"if",
"p1x",
"==",
"p2x",
"or",
"x",
"<=",
"xinters",
":",
"inside",
"=",
"not",
"inside",
"p1x",
",",
"p1y",
"=",
"p2x",
",",
"p2y",
"return",
"inside"
] | 28.28125 | 16.15625 |
def radiance_to_bt(arr, wc_, a__, b__):
"""Convert to BT.
"""
return a__ + b__ * (C2 * wc_ / (da.log(1 + (C1 * (wc_ ** 3) / arr)))) | [
"def",
"radiance_to_bt",
"(",
"arr",
",",
"wc_",
",",
"a__",
",",
"b__",
")",
":",
"return",
"a__",
"+",
"b__",
"*",
"(",
"C2",
"*",
"wc_",
"/",
"(",
"da",
".",
"log",
"(",
"1",
"+",
"(",
"C1",
"*",
"(",
"wc_",
"**",
"3",
")",
"/",
"arr",
")",
")",
")",
")"
] | 35 | 8.5 |
def diff_sevice_by_text(service_name, service, environment, cf_client, repo_root):
"""
Render the local template and compare it to the template that was last
applied in the target environment.
"""
global ret_code
logger.info('Investigating textual diff for `%s`:`%s` in environment `%s`',
service['type'], service_name, environment)
try:
local_template = render_local_template(service_name, environment,
repo_root, service['template_file'])
current_template = fetch_current_cloudformation_template(
service_name, environment, cf_client)
except Exception as e:
ret_code = 2
logger.error(e)
return
ret = diff_string_templates(local_template, current_template)
if not ret:
logger.info('Deployed service `%s` in environment `%s` matches '
'the local template.', service_name, environment)
else:
ret_code = 1
logger.error('Service `%s` in environment `%s` differs from '
'the local template.',
service_name, environment)
logger.info('Change details:\n %s', indentify(ret)) | [
"def",
"diff_sevice_by_text",
"(",
"service_name",
",",
"service",
",",
"environment",
",",
"cf_client",
",",
"repo_root",
")",
":",
"global",
"ret_code",
"logger",
".",
"info",
"(",
"'Investigating textual diff for `%s`:`%s` in environment `%s`'",
",",
"service",
"[",
"'type'",
"]",
",",
"service_name",
",",
"environment",
")",
"try",
":",
"local_template",
"=",
"render_local_template",
"(",
"service_name",
",",
"environment",
",",
"repo_root",
",",
"service",
"[",
"'template_file'",
"]",
")",
"current_template",
"=",
"fetch_current_cloudformation_template",
"(",
"service_name",
",",
"environment",
",",
"cf_client",
")",
"except",
"Exception",
"as",
"e",
":",
"ret_code",
"=",
"2",
"logger",
".",
"error",
"(",
"e",
")",
"return",
"ret",
"=",
"diff_string_templates",
"(",
"local_template",
",",
"current_template",
")",
"if",
"not",
"ret",
":",
"logger",
".",
"info",
"(",
"'Deployed service `%s` in environment `%s` matches '",
"'the local template.'",
",",
"service_name",
",",
"environment",
")",
"else",
":",
"ret_code",
"=",
"1",
"logger",
".",
"error",
"(",
"'Service `%s` in environment `%s` differs from '",
"'the local template.'",
",",
"service_name",
",",
"environment",
")",
"logger",
".",
"info",
"(",
"'Change details:\\n %s'",
",",
"indentify",
"(",
"ret",
")",
")"
] | 37.53125 | 25.09375 |
def calculate_extents(labels, indexes):
"""Return the area of each object divided by the area of its bounding box"""
fix = fixup_scipy_ndimage_result
areas = fix(scind.sum(np.ones(labels.shape),labels,np.array(indexes, dtype=np.int32)))
y,x = np.mgrid[0:labels.shape[0],0:labels.shape[1]]
xmin = fix(scind.minimum(x, labels, indexes))
xmax = fix(scind.maximum(x, labels, indexes))
ymin = fix(scind.minimum(y, labels, indexes))
ymax = fix(scind.maximum(y, labels, indexes))
bbareas = (xmax-xmin+1)*(ymax-ymin+1)
return areas / bbareas | [
"def",
"calculate_extents",
"(",
"labels",
",",
"indexes",
")",
":",
"fix",
"=",
"fixup_scipy_ndimage_result",
"areas",
"=",
"fix",
"(",
"scind",
".",
"sum",
"(",
"np",
".",
"ones",
"(",
"labels",
".",
"shape",
")",
",",
"labels",
",",
"np",
".",
"array",
"(",
"indexes",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
")",
")",
"y",
",",
"x",
"=",
"np",
".",
"mgrid",
"[",
"0",
":",
"labels",
".",
"shape",
"[",
"0",
"]",
",",
"0",
":",
"labels",
".",
"shape",
"[",
"1",
"]",
"]",
"xmin",
"=",
"fix",
"(",
"scind",
".",
"minimum",
"(",
"x",
",",
"labels",
",",
"indexes",
")",
")",
"xmax",
"=",
"fix",
"(",
"scind",
".",
"maximum",
"(",
"x",
",",
"labels",
",",
"indexes",
")",
")",
"ymin",
"=",
"fix",
"(",
"scind",
".",
"minimum",
"(",
"y",
",",
"labels",
",",
"indexes",
")",
")",
"ymax",
"=",
"fix",
"(",
"scind",
".",
"maximum",
"(",
"y",
",",
"labels",
",",
"indexes",
")",
")",
"bbareas",
"=",
"(",
"xmax",
"-",
"xmin",
"+",
"1",
")",
"*",
"(",
"ymax",
"-",
"ymin",
"+",
"1",
")",
"return",
"areas",
"/",
"bbareas"
] | 51.181818 | 11 |
def _traverse_toc(pdf_base, visitor_fn, log):
"""
Walk the table of contents, calling visitor_fn() at each node
The /Outlines data structure is a messy data structure, but rather than
navigating hierarchically we just track unique nodes. Enqueue nodes when
we find them, and never visit them again. set() is awesome. We look for
the two types of object in the table of contents that can be page bookmarks
and update the page entry.
"""
visited = set()
queue = set()
link_keys = ('/Parent', '/First', '/Last', '/Prev', '/Next')
if not '/Outlines' in pdf_base.root:
return
queue.add(pdf_base.root.Outlines.objgen)
while queue:
objgen = queue.pop()
visited.add(objgen)
node = pdf_base.get_object(objgen)
log.debug('fix toc: exploring outline entries at %r', objgen)
# Enumerate other nodes we could visit from here
for key in link_keys:
if key not in node:
continue
item = node[key]
if not item.is_indirect:
# Direct references are not allowed here, but it's not clear
# what we should do if we find any. Removing them is an option:
# node[key] = pdf_base.make_indirect(None)
continue
objgen = item.objgen
if objgen not in visited:
queue.add(objgen)
if visitor_fn:
visitor_fn(pdf_base, node, log) | [
"def",
"_traverse_toc",
"(",
"pdf_base",
",",
"visitor_fn",
",",
"log",
")",
":",
"visited",
"=",
"set",
"(",
")",
"queue",
"=",
"set",
"(",
")",
"link_keys",
"=",
"(",
"'/Parent'",
",",
"'/First'",
",",
"'/Last'",
",",
"'/Prev'",
",",
"'/Next'",
")",
"if",
"not",
"'/Outlines'",
"in",
"pdf_base",
".",
"root",
":",
"return",
"queue",
".",
"add",
"(",
"pdf_base",
".",
"root",
".",
"Outlines",
".",
"objgen",
")",
"while",
"queue",
":",
"objgen",
"=",
"queue",
".",
"pop",
"(",
")",
"visited",
".",
"add",
"(",
"objgen",
")",
"node",
"=",
"pdf_base",
".",
"get_object",
"(",
"objgen",
")",
"log",
".",
"debug",
"(",
"'fix toc: exploring outline entries at %r'",
",",
"objgen",
")",
"# Enumerate other nodes we could visit from here",
"for",
"key",
"in",
"link_keys",
":",
"if",
"key",
"not",
"in",
"node",
":",
"continue",
"item",
"=",
"node",
"[",
"key",
"]",
"if",
"not",
"item",
".",
"is_indirect",
":",
"# Direct references are not allowed here, but it's not clear",
"# what we should do if we find any. Removing them is an option:",
"# node[key] = pdf_base.make_indirect(None)",
"continue",
"objgen",
"=",
"item",
".",
"objgen",
"if",
"objgen",
"not",
"in",
"visited",
":",
"queue",
".",
"add",
"(",
"objgen",
")",
"if",
"visitor_fn",
":",
"visitor_fn",
"(",
"pdf_base",
",",
"node",
",",
"log",
")"
] | 34.52381 | 20.52381 |
def save_imgs(x, fname):
"""Helper method to save a grid of images to a PNG file.
Args:
x: A numpy array of shape [n_images, height, width].
fname: The filename to write to (including extension).
"""
n = x.shape[0]
fig = figure.Figure(figsize=(n, 1), frameon=False)
canvas = backend_agg.FigureCanvasAgg(fig)
for i in range(n):
ax = fig.add_subplot(1, n, i+1)
ax.imshow(x[i].squeeze(),
interpolation="none",
cmap=cm.get_cmap("binary"))
ax.axis("off")
canvas.print_figure(fname, format="png")
print("saved %s" % fname) | [
"def",
"save_imgs",
"(",
"x",
",",
"fname",
")",
":",
"n",
"=",
"x",
".",
"shape",
"[",
"0",
"]",
"fig",
"=",
"figure",
".",
"Figure",
"(",
"figsize",
"=",
"(",
"n",
",",
"1",
")",
",",
"frameon",
"=",
"False",
")",
"canvas",
"=",
"backend_agg",
".",
"FigureCanvasAgg",
"(",
"fig",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"1",
",",
"n",
",",
"i",
"+",
"1",
")",
"ax",
".",
"imshow",
"(",
"x",
"[",
"i",
"]",
".",
"squeeze",
"(",
")",
",",
"interpolation",
"=",
"\"none\"",
",",
"cmap",
"=",
"cm",
".",
"get_cmap",
"(",
"\"binary\"",
")",
")",
"ax",
".",
"axis",
"(",
"\"off\"",
")",
"canvas",
".",
"print_figure",
"(",
"fname",
",",
"format",
"=",
"\"png\"",
")",
"print",
"(",
"\"saved %s\"",
"%",
"fname",
")"
] | 31.444444 | 13.388889 |
def add_token(self, uri, http_method='GET', body=None, headers=None,
token_placement=None, **kwargs):
"""Add token to the request uri, body or authorization header.
The access token type provides the client with the information
required to successfully utilize the access token to make a protected
resource request (along with type-specific attributes). The client
MUST NOT use an access token if it does not understand the token
type.
For example, the "bearer" token type defined in
[`I-D.ietf-oauth-v2-bearer`_] is utilized by simply including the access
token string in the request:
.. code-block:: http
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: Bearer mF_9.B5f-4.1JqM
while the "mac" token type defined in [`I-D.ietf-oauth-v2-http-mac`_] is
utilized by issuing a MAC key together with the access token which is
used to sign certain components of the HTTP requests:
.. code-block:: http
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: MAC id="h480djs93hd8",
nonce="274312:dj83hs9s",
mac="kDZvddkndxvhGRXZhvuDjEWhGeE="
.. _`I-D.ietf-oauth-v2-bearer`: https://tools.ietf.org/html/rfc6749#section-12.2
.. _`I-D.ietf-oauth-v2-http-mac`: https://tools.ietf.org/html/rfc6749#section-12.2
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
token_placement = token_placement or self.default_token_placement
case_insensitive_token_types = dict(
(k.lower(), v) for k, v in self.token_types.items())
if not self.token_type.lower() in case_insensitive_token_types:
raise ValueError("Unsupported token type: %s" % self.token_type)
if not (self.access_token or self.token.get('access_token')):
raise ValueError("Missing access token.")
if self._expires_at and self._expires_at < time.time():
raise TokenExpiredError()
return case_insensitive_token_types[self.token_type.lower()](uri, http_method, body,
headers, token_placement, **kwargs) | [
"def",
"add_token",
"(",
"self",
",",
"uri",
",",
"http_method",
"=",
"'GET'",
",",
"body",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"token_placement",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"is_secure_transport",
"(",
"uri",
")",
":",
"raise",
"InsecureTransportError",
"(",
")",
"token_placement",
"=",
"token_placement",
"or",
"self",
".",
"default_token_placement",
"case_insensitive_token_types",
"=",
"dict",
"(",
"(",
"k",
".",
"lower",
"(",
")",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"token_types",
".",
"items",
"(",
")",
")",
"if",
"not",
"self",
".",
"token_type",
".",
"lower",
"(",
")",
"in",
"case_insensitive_token_types",
":",
"raise",
"ValueError",
"(",
"\"Unsupported token type: %s\"",
"%",
"self",
".",
"token_type",
")",
"if",
"not",
"(",
"self",
".",
"access_token",
"or",
"self",
".",
"token",
".",
"get",
"(",
"'access_token'",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Missing access token.\"",
")",
"if",
"self",
".",
"_expires_at",
"and",
"self",
".",
"_expires_at",
"<",
"time",
".",
"time",
"(",
")",
":",
"raise",
"TokenExpiredError",
"(",
")",
"return",
"case_insensitive_token_types",
"[",
"self",
".",
"token_type",
".",
"lower",
"(",
")",
"]",
"(",
"uri",
",",
"http_method",
",",
"body",
",",
"headers",
",",
"token_placement",
",",
"*",
"*",
"kwargs",
")"
] | 43.528302 | 26.45283 |
def convert_permute(builder, layer, input_names, output_names, keras_layer):
"""Convert a softmax layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = (input_names[0], output_names[0])
keras_dims = keras_layer.dims
# Keras permute layer index begins at 1
if len(keras_dims) == 3:
# Keras input tensor interpret as (H,W,C)
x = list(np.array(keras_dims))
i1, i2, i3 = x.index(1), x.index(2), x.index(3)
x[i1], x[i2], x[i3] = 2, 3, 1
# add a sequence axis
x = [0] + x
dim = tuple(x)
elif len(keras_dims) == 4:
# Here we use Keras converter as a place holder for inserting
# permutations - the values here are not valid Keras dim parameters
# but parameters we need to use to convert to CoreML model
dim = keras_dims
else:
raise NotImplementedError('Supports only 3d permutation.')
builder.add_permute(name = layer, dim=dim, input_name = input_name,
output_name = output_name) | [
"def",
"convert_permute",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"keras_dims",
"=",
"keras_layer",
".",
"dims",
"# Keras permute layer index begins at 1",
"if",
"len",
"(",
"keras_dims",
")",
"==",
"3",
":",
"# Keras input tensor interpret as (H,W,C)",
"x",
"=",
"list",
"(",
"np",
".",
"array",
"(",
"keras_dims",
")",
")",
"i1",
",",
"i2",
",",
"i3",
"=",
"x",
".",
"index",
"(",
"1",
")",
",",
"x",
".",
"index",
"(",
"2",
")",
",",
"x",
".",
"index",
"(",
"3",
")",
"x",
"[",
"i1",
"]",
",",
"x",
"[",
"i2",
"]",
",",
"x",
"[",
"i3",
"]",
"=",
"2",
",",
"3",
",",
"1",
"# add a sequence axis",
"x",
"=",
"[",
"0",
"]",
"+",
"x",
"dim",
"=",
"tuple",
"(",
"x",
")",
"elif",
"len",
"(",
"keras_dims",
")",
"==",
"4",
":",
"# Here we use Keras converter as a place holder for inserting",
"# permutations - the values here are not valid Keras dim parameters",
"# but parameters we need to use to convert to CoreML model",
"dim",
"=",
"keras_dims",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Supports only 3d permutation.'",
")",
"builder",
".",
"add_permute",
"(",
"name",
"=",
"layer",
",",
"dim",
"=",
"dim",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"output_name",
")"
] | 34.878788 | 18.606061 |
def _extract_numbers(arg: Message_T) -> List[float]:
"""Extract all numbers (integers and floats) from a message-like object."""
s = str(arg)
return list(map(float, re.findall(r'[+-]?(\d*\.?\d+|\d+\.?\d*)', s))) | [
"def",
"_extract_numbers",
"(",
"arg",
":",
"Message_T",
")",
"->",
"List",
"[",
"float",
"]",
":",
"s",
"=",
"str",
"(",
"arg",
")",
"return",
"list",
"(",
"map",
"(",
"float",
",",
"re",
".",
"findall",
"(",
"r'[+-]?(\\d*\\.?\\d+|\\d+\\.?\\d*)'",
",",
"s",
")",
")",
")"
] | 55 | 17.25 |
def call(self, inputs):
"""Call `Layer`."""
# if context.executing_eagerly():
# if not self.initialized:
# self._data_dep_init(inputs)
self._compute_weights() # Recompute weights for each forward pass
output = self.layer.call(inputs)
return output | [
"def",
"call",
"(",
"self",
",",
"inputs",
")",
":",
"# if context.executing_eagerly():",
"# if not self.initialized:",
"# self._data_dep_init(inputs)",
"self",
".",
"_compute_weights",
"(",
")",
"# Recompute weights for each forward pass",
"output",
"=",
"self",
".",
"layer",
".",
"call",
"(",
"inputs",
")",
"return",
"output"
] | 30.555556 | 14.222222 |
def parse_section_entry_points(self, section_options):
"""Parses `entry_points` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['entry_points'] = parsed | [
"def",
"parse_section_entry_points",
"(",
"self",
",",
"section_options",
")",
":",
"parsed",
"=",
"self",
".",
"_parse_section_to_dict",
"(",
"section_options",
",",
"self",
".",
"_parse_list",
")",
"self",
"[",
"'entry_points'",
"]",
"=",
"parsed"
] | 39.571429 | 14.285714 |
def add_months(self, month_int):
"""
addition of a number of months
:param BusinessDate d:
:param int month_int:
:return bankdate:
"""
month_int += self.month
while month_int > 12:
self = BusinessDate.add_years(self, 1)
month_int -= 12
while month_int < 1:
self = BusinessDate.add_years(self, -1)
month_int += 12
l = monthrange(self.year, month_int)[1]
return BusinessDate.from_ymd(self.year, month_int, min(l, self.day)) | [
"def",
"add_months",
"(",
"self",
",",
"month_int",
")",
":",
"month_int",
"+=",
"self",
".",
"month",
"while",
"month_int",
">",
"12",
":",
"self",
"=",
"BusinessDate",
".",
"add_years",
"(",
"self",
",",
"1",
")",
"month_int",
"-=",
"12",
"while",
"month_int",
"<",
"1",
":",
"self",
"=",
"BusinessDate",
".",
"add_years",
"(",
"self",
",",
"-",
"1",
")",
"month_int",
"+=",
"12",
"l",
"=",
"monthrange",
"(",
"self",
".",
"year",
",",
"month_int",
")",
"[",
"1",
"]",
"return",
"BusinessDate",
".",
"from_ymd",
"(",
"self",
".",
"year",
",",
"month_int",
",",
"min",
"(",
"l",
",",
"self",
".",
"day",
")",
")"
] | 30.111111 | 13.777778 |
def custom_filter_tags(self, value, search):
"""Support tags query."""
if not isinstance(value, list):
value = value.split(',')
filters = [Q('match', **{'tags': item}) for item in value]
search = search.query('bool', must=filters)
return search | [
"def",
"custom_filter_tags",
"(",
"self",
",",
"value",
",",
"search",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"value",
".",
"split",
"(",
"','",
")",
"filters",
"=",
"[",
"Q",
"(",
"'match'",
",",
"*",
"*",
"{",
"'tags'",
":",
"item",
"}",
")",
"for",
"item",
"in",
"value",
"]",
"search",
"=",
"search",
".",
"query",
"(",
"'bool'",
",",
"must",
"=",
"filters",
")",
"return",
"search"
] | 32.222222 | 16.111111 |
def start_trial(self, trial, checkpoint=None):
"""Starts the trial.
Will not return resources if trial repeatedly fails on start.
Args:
trial (Trial): Trial to be started.
checkpoint (Checkpoint): A Python object or path storing the state
of trial.
"""
self._commit_resources(trial.resources)
try:
self._start_trial(trial, checkpoint)
except Exception as e:
logger.exception("Error starting runner for Trial %s", str(trial))
error_msg = traceback.format_exc()
time.sleep(2)
self._stop_trial(trial, error=True, error_msg=error_msg)
if isinstance(e, AbortTrialExecution):
return # don't retry fatal Tune errors
try:
# This forces the trial to not start from checkpoint.
trial.clear_checkpoint()
logger.info(
"Trying to start runner for Trial %s without checkpoint.",
str(trial))
self._start_trial(trial)
except Exception:
logger.exception(
"Error starting runner for Trial %s, aborting!",
str(trial))
error_msg = traceback.format_exc()
self._stop_trial(trial, error=True, error_msg=error_msg) | [
"def",
"start_trial",
"(",
"self",
",",
"trial",
",",
"checkpoint",
"=",
"None",
")",
":",
"self",
".",
"_commit_resources",
"(",
"trial",
".",
"resources",
")",
"try",
":",
"self",
".",
"_start_trial",
"(",
"trial",
",",
"checkpoint",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"\"Error starting runner for Trial %s\"",
",",
"str",
"(",
"trial",
")",
")",
"error_msg",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"time",
".",
"sleep",
"(",
"2",
")",
"self",
".",
"_stop_trial",
"(",
"trial",
",",
"error",
"=",
"True",
",",
"error_msg",
"=",
"error_msg",
")",
"if",
"isinstance",
"(",
"e",
",",
"AbortTrialExecution",
")",
":",
"return",
"# don't retry fatal Tune errors",
"try",
":",
"# This forces the trial to not start from checkpoint.",
"trial",
".",
"clear_checkpoint",
"(",
")",
"logger",
".",
"info",
"(",
"\"Trying to start runner for Trial %s without checkpoint.\"",
",",
"str",
"(",
"trial",
")",
")",
"self",
".",
"_start_trial",
"(",
"trial",
")",
"except",
"Exception",
":",
"logger",
".",
"exception",
"(",
"\"Error starting runner for Trial %s, aborting!\"",
",",
"str",
"(",
"trial",
")",
")",
"error_msg",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"self",
".",
"_stop_trial",
"(",
"trial",
",",
"error",
"=",
"True",
",",
"error_msg",
"=",
"error_msg",
")"
] | 40.029412 | 18.117647 |
def getpeptides(self, chain):
"""If peptide ligand chains are defined via the command line options,
try to extract the underlying ligand formed by all residues in the
given chain without water
"""
all_from_chain = [o for o in pybel.ob.OBResidueIter(
self.proteincomplex.OBMol) if o.GetChain() == chain] # All residues from chain
if len(all_from_chain) == 0:
return None
else:
non_water = [o for o in all_from_chain if not o.GetResidueProperty(9)]
ligand = self.extract_ligand(non_water)
return ligand | [
"def",
"getpeptides",
"(",
"self",
",",
"chain",
")",
":",
"all_from_chain",
"=",
"[",
"o",
"for",
"o",
"in",
"pybel",
".",
"ob",
".",
"OBResidueIter",
"(",
"self",
".",
"proteincomplex",
".",
"OBMol",
")",
"if",
"o",
".",
"GetChain",
"(",
")",
"==",
"chain",
"]",
"# All residues from chain",
"if",
"len",
"(",
"all_from_chain",
")",
"==",
"0",
":",
"return",
"None",
"else",
":",
"non_water",
"=",
"[",
"o",
"for",
"o",
"in",
"all_from_chain",
"if",
"not",
"o",
".",
"GetResidueProperty",
"(",
"9",
")",
"]",
"ligand",
"=",
"self",
".",
"extract_ligand",
"(",
"non_water",
")",
"return",
"ligand"
] | 46.538462 | 18.384615 |
def databasesKEGG(organism,ens_ids):
"""
Finds KEGG database identifiers for a respective organism given example ensembl ids.
:param organism: an organism as listed in organismsKEGG()
:param ens_ids: a list of ensenbl ids of the respective organism
:returns: nothing if no database was found, or a string if a database was found
"""
all_genes=urlopen("http://rest.kegg.jp/list/"+organism).read()
all_genes=all_genes.split("\n")
dbs=[]
while len(dbs) == 0:
for g in all_genes:
if len(dbs) == 0:
kid = g.split("\t")[0]
gene=urlopen("http://rest.kegg.jp/get/"+kid).read()
DBLINKS=gene.split("\n")
DBLINKS=[ s for s in DBLINKS if ":" in s ]
for d in DBLINKS:
test=d.split(" ")
test=test[len(test)-1]
if test in ens_ids:
DBLINK=[ s for s in DBLINKS if test in s ]
DBLINK=DBLINK[0].split(":")
DBLINK=DBLINK[len(DBLINK)-2]
dbs.append(DBLINK)
else:
break
ens_db=dbs[0].split(" ")
ens_db=ens_db[len(ens_db)-1]
test_db=urlopen("http://rest.genome.jp/link/"+ens_db+"/"+organism).read()
test_db=test_db.split("\n")
if len(test_db) == 1:
print("For "+organism+" the following db was found: "+ens_db)
print("This database does not seem to be valid KEGG-linked database identifier")
print("For \n'hsa' use 'ensembl-hsa'\n'mmu' use 'ensembl-mmu'\n'cel' use 'EnsemblGenomes-Gn'\n'dme' use 'FlyBase'")
sys.stdout.flush()
ens_db = None
else:
print("For "+organism+" the following db was found: "+ens_db)
sys.stdout.flush()
return ens_db | [
"def",
"databasesKEGG",
"(",
"organism",
",",
"ens_ids",
")",
":",
"all_genes",
"=",
"urlopen",
"(",
"\"http://rest.kegg.jp/list/\"",
"+",
"organism",
")",
".",
"read",
"(",
")",
"all_genes",
"=",
"all_genes",
".",
"split",
"(",
"\"\\n\"",
")",
"dbs",
"=",
"[",
"]",
"while",
"len",
"(",
"dbs",
")",
"==",
"0",
":",
"for",
"g",
"in",
"all_genes",
":",
"if",
"len",
"(",
"dbs",
")",
"==",
"0",
":",
"kid",
"=",
"g",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"0",
"]",
"gene",
"=",
"urlopen",
"(",
"\"http://rest.kegg.jp/get/\"",
"+",
"kid",
")",
".",
"read",
"(",
")",
"DBLINKS",
"=",
"gene",
".",
"split",
"(",
"\"\\n\"",
")",
"DBLINKS",
"=",
"[",
"s",
"for",
"s",
"in",
"DBLINKS",
"if",
"\":\"",
"in",
"s",
"]",
"for",
"d",
"in",
"DBLINKS",
":",
"test",
"=",
"d",
".",
"split",
"(",
"\" \"",
")",
"test",
"=",
"test",
"[",
"len",
"(",
"test",
")",
"-",
"1",
"]",
"if",
"test",
"in",
"ens_ids",
":",
"DBLINK",
"=",
"[",
"s",
"for",
"s",
"in",
"DBLINKS",
"if",
"test",
"in",
"s",
"]",
"DBLINK",
"=",
"DBLINK",
"[",
"0",
"]",
".",
"split",
"(",
"\":\"",
")",
"DBLINK",
"=",
"DBLINK",
"[",
"len",
"(",
"DBLINK",
")",
"-",
"2",
"]",
"dbs",
".",
"append",
"(",
"DBLINK",
")",
"else",
":",
"break",
"ens_db",
"=",
"dbs",
"[",
"0",
"]",
".",
"split",
"(",
"\" \"",
")",
"ens_db",
"=",
"ens_db",
"[",
"len",
"(",
"ens_db",
")",
"-",
"1",
"]",
"test_db",
"=",
"urlopen",
"(",
"\"http://rest.genome.jp/link/\"",
"+",
"ens_db",
"+",
"\"/\"",
"+",
"organism",
")",
".",
"read",
"(",
")",
"test_db",
"=",
"test_db",
".",
"split",
"(",
"\"\\n\"",
")",
"if",
"len",
"(",
"test_db",
")",
"==",
"1",
":",
"print",
"(",
"\"For \"",
"+",
"organism",
"+",
"\" the following db was found: \"",
"+",
"ens_db",
")",
"print",
"(",
"\"This database does not seem to be valid KEGG-linked database identifier\"",
")",
"print",
"(",
"\"For \\n'hsa' use 'ensembl-hsa'\\n'mmu' use 'ensembl-mmu'\\n'cel' use 'EnsemblGenomes-Gn'\\n'dme' use 'FlyBase'\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"ens_db",
"=",
"None",
"else",
":",
"print",
"(",
"\"For \"",
"+",
"organism",
"+",
"\" the following db was found: \"",
"+",
"ens_db",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"return",
"ens_db"
] | 39.666667 | 20.644444 |
async def gantry_position(
self,
mount: top_types.Mount,
critical_point: CriticalPoint = None) -> top_types.Point:
""" Return the position of the critical point as pertains to the gantry
This ignores the plunger position and gives the Z-axis a predictable
name (as :py:attr:`.Point.z`).
`critical_point` specifies an override to the current critical point to
use (see :py:meth:`current_position`).
"""
cur_pos = await self.current_position(mount, critical_point)
return top_types.Point(x=cur_pos[Axis.X],
y=cur_pos[Axis.Y],
z=cur_pos[Axis.by_mount(mount)]) | [
"async",
"def",
"gantry_position",
"(",
"self",
",",
"mount",
":",
"top_types",
".",
"Mount",
",",
"critical_point",
":",
"CriticalPoint",
"=",
"None",
")",
"->",
"top_types",
".",
"Point",
":",
"cur_pos",
"=",
"await",
"self",
".",
"current_position",
"(",
"mount",
",",
"critical_point",
")",
"return",
"top_types",
".",
"Point",
"(",
"x",
"=",
"cur_pos",
"[",
"Axis",
".",
"X",
"]",
",",
"y",
"=",
"cur_pos",
"[",
"Axis",
".",
"Y",
"]",
",",
"z",
"=",
"cur_pos",
"[",
"Axis",
".",
"by_mount",
"(",
"mount",
")",
"]",
")"
] | 44.0625 | 18.9375 |
def create(self, *args, **kwargs):
"""
Allow an 'author' kwarg to automatically fill in the created_by and last_modified_by fields.
"""
if kwargs.has_key('author'):
kwargs['created_by'] = kwargs['author']
kwargs['last_modified_by'] = kwargs['author']
del kwargs['author']
return super(CMSPageManager, self).create(*args, **kwargs) | [
"def",
"create",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
".",
"has_key",
"(",
"'author'",
")",
":",
"kwargs",
"[",
"'created_by'",
"]",
"=",
"kwargs",
"[",
"'author'",
"]",
"kwargs",
"[",
"'last_modified_by'",
"]",
"=",
"kwargs",
"[",
"'author'",
"]",
"del",
"kwargs",
"[",
"'author'",
"]",
"return",
"super",
"(",
"CMSPageManager",
",",
"self",
")",
".",
"create",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 44.222222 | 14.666667 |
def eval(self, script, numkeys, *keys_and_args):
"""Emulate eval"""
sha = self.script_load(script)
return self.evalsha(sha, numkeys, *keys_and_args) | [
"def",
"eval",
"(",
"self",
",",
"script",
",",
"numkeys",
",",
"*",
"keys_and_args",
")",
":",
"sha",
"=",
"self",
".",
"script_load",
"(",
"script",
")",
"return",
"self",
".",
"evalsha",
"(",
"sha",
",",
"numkeys",
",",
"*",
"keys_and_args",
")"
] | 42.25 | 6.75 |
def adaptive_grid_archiver(random, population, archive, args):
"""Archive only the best individual(s) using a fixed size grid.
This function archives the best solutions by using a fixed-size grid
to determine which existing solutions should be removed in order to
make room for new ones. This archiver is designed specifically for
use with the Pareto Archived Evolution Strategy (PAES).
.. Arguments:
random -- the random number generator object
population -- the population of individuals
archive -- the current archive of individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *max_archive_size* -- the maximum number of individuals in the archive
(default len(population))
- *num_grid_divisions* -- the number of grid divisions (default 1)
"""
def get_grid_location(fitness, num_grid_divisions, global_smallest, global_largest):
loc = 0
n = 1
num_objectives = len(fitness)
inc = [0 for _ in range(num_objectives)]
width = [0 for _ in range(num_objectives)]
local_smallest = global_smallest[:]
for i, f in enumerate(fitness):
if f < local_smallest[i] or f > local_smallest[i] + global_largest[i] - global_smallest[i]:
return -1
for i in range(num_objectives):
inc[i] = n
n *= 2
width[i] = global_largest[i] - global_smallest[i]
for d in range(num_grid_divisions):
for i, f in enumerate(fitness):
if f < width[i] / 2.0 + local_smallest[i]:
loc += inc[i]
else:
local_smallest[i] += width[i] / 2.0
for i in range(num_objectives):
inc[i] *= num_objectives * 2
width[i] /= 2.0
return loc
def update_grid(individual, archive, num_grid_divisions, global_smallest, global_largest, grid_population):
if len(archive) == 0:
num_objectives = len(individual.fitness)
smallest = [individual.fitness[o] for o in range(num_objectives)]
largest = [individual.fitness[o] for o in range(num_objectives)]
else:
num_objectives = min(min([len(a.fitness) for a in archive]), len(individual.fitness))
smallest = [min(min([a.fitness[o] for a in archive]), individual.fitness[o]) for o in range(num_objectives)]
largest = [max(max([a.fitness[o] for a in archive]), individual.fitness[o]) for o in range(num_objectives)]
for i in range(num_objectives):
global_smallest[i] = smallest[i] - abs(0.2 * smallest[i])
global_largest[i] = largest[i] + abs(0.2 * largest[i])
for i in range(len(grid_population)):
grid_population[i] = 0
for a in archive:
loc = get_grid_location(a.fitness, num_grid_divisions, global_smallest, global_largest)
a.grid_location = loc
grid_population[loc] += 1
loc = get_grid_location(individual.fitness, num_grid_divisions, global_smallest, global_largest)
individual.grid_location = loc
grid_population[loc] += 1
max_archive_size = args.setdefault('max_archive_size', len(population))
num_grid_divisions = args.setdefault('num_grid_divisions', 1)
if not 'grid_population' in dir(adaptive_grid_archiver):
adaptive_grid_archiver.grid_population = [0 for _ in range(2**(min([len(p.fitness) for p in population]) * num_grid_divisions))]
if not 'global_smallest' in dir(adaptive_grid_archiver):
adaptive_grid_archiver.global_smallest = [0 for _ in range(min([len(p.fitness) for p in population]))]
if not 'global_largest' in dir(adaptive_grid_archiver):
adaptive_grid_archiver.global_largest = [0 for _ in range(min([len(p.fitness) for p in population]))]
new_archive = archive
for ind in population:
update_grid(ind, new_archive, num_grid_divisions, adaptive_grid_archiver.global_smallest,
adaptive_grid_archiver.global_largest, adaptive_grid_archiver.grid_population)
should_be_added = True
for a in new_archive:
if ind == a or a > ind:
should_be_added = False
if should_be_added:
if len(new_archive) == 0:
new_archive.append(ind)
else:
join = False
nondominated = True
removal_set = []
for i, a in enumerate(new_archive):
if ind > a and not join:
new_archive[i] = ind
join = True
elif ind > a:
if not a in removal_set:
removal_set.append(a)
# Otherwise, the individual is nondominated against this archive member.
# We can't use set difference because Individual objects are not hashable.
# We'd like to say...
# new_archive = list(set(new_archive) - set(removal_set))
# So this code gets that same result without using sets.
temp_archive = []
for ind in new_archive:
if ind not in removal_set:
temp_archive.append(ind)
new_archive = temp_archive
if not join and nondominated:
if len(new_archive) == max_archive_size:
replaced_index = 0
found_replacement = False
loc = get_grid_location(ind.fitness, num_grid_divisions,
adaptive_grid_archiver.global_smallest,
adaptive_grid_archiver.global_largest)
ind.grid_location = loc
if ind.grid_location >= 0:
most = adaptive_grid_archiver.grid_population[ind.grid_location]
else:
most = -1
for i, a in enumerate(new_archive):
pop_at_a = adaptive_grid_archiver.grid_population[a.grid_location]
if pop_at_a > most:
most = pop_at_a
replaced_index = i
found_replacement = True
if found_replacement:
new_archive[replaced_index] = ind
else:
new_archive.append(ind)
return new_archive | [
"def",
"adaptive_grid_archiver",
"(",
"random",
",",
"population",
",",
"archive",
",",
"args",
")",
":",
"def",
"get_grid_location",
"(",
"fitness",
",",
"num_grid_divisions",
",",
"global_smallest",
",",
"global_largest",
")",
":",
"loc",
"=",
"0",
"n",
"=",
"1",
"num_objectives",
"=",
"len",
"(",
"fitness",
")",
"inc",
"=",
"[",
"0",
"for",
"_",
"in",
"range",
"(",
"num_objectives",
")",
"]",
"width",
"=",
"[",
"0",
"for",
"_",
"in",
"range",
"(",
"num_objectives",
")",
"]",
"local_smallest",
"=",
"global_smallest",
"[",
":",
"]",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"fitness",
")",
":",
"if",
"f",
"<",
"local_smallest",
"[",
"i",
"]",
"or",
"f",
">",
"local_smallest",
"[",
"i",
"]",
"+",
"global_largest",
"[",
"i",
"]",
"-",
"global_smallest",
"[",
"i",
"]",
":",
"return",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"num_objectives",
")",
":",
"inc",
"[",
"i",
"]",
"=",
"n",
"n",
"*=",
"2",
"width",
"[",
"i",
"]",
"=",
"global_largest",
"[",
"i",
"]",
"-",
"global_smallest",
"[",
"i",
"]",
"for",
"d",
"in",
"range",
"(",
"num_grid_divisions",
")",
":",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"fitness",
")",
":",
"if",
"f",
"<",
"width",
"[",
"i",
"]",
"/",
"2.0",
"+",
"local_smallest",
"[",
"i",
"]",
":",
"loc",
"+=",
"inc",
"[",
"i",
"]",
"else",
":",
"local_smallest",
"[",
"i",
"]",
"+=",
"width",
"[",
"i",
"]",
"/",
"2.0",
"for",
"i",
"in",
"range",
"(",
"num_objectives",
")",
":",
"inc",
"[",
"i",
"]",
"*=",
"num_objectives",
"*",
"2",
"width",
"[",
"i",
"]",
"/=",
"2.0",
"return",
"loc",
"def",
"update_grid",
"(",
"individual",
",",
"archive",
",",
"num_grid_divisions",
",",
"global_smallest",
",",
"global_largest",
",",
"grid_population",
")",
":",
"if",
"len",
"(",
"archive",
")",
"==",
"0",
":",
"num_objectives",
"=",
"len",
"(",
"individual",
".",
"fitness",
")",
"smallest",
"=",
"[",
"individual",
".",
"fitness",
"[",
"o",
"]",
"for",
"o",
"in",
"range",
"(",
"num_objectives",
")",
"]",
"largest",
"=",
"[",
"individual",
".",
"fitness",
"[",
"o",
"]",
"for",
"o",
"in",
"range",
"(",
"num_objectives",
")",
"]",
"else",
":",
"num_objectives",
"=",
"min",
"(",
"min",
"(",
"[",
"len",
"(",
"a",
".",
"fitness",
")",
"for",
"a",
"in",
"archive",
"]",
")",
",",
"len",
"(",
"individual",
".",
"fitness",
")",
")",
"smallest",
"=",
"[",
"min",
"(",
"min",
"(",
"[",
"a",
".",
"fitness",
"[",
"o",
"]",
"for",
"a",
"in",
"archive",
"]",
")",
",",
"individual",
".",
"fitness",
"[",
"o",
"]",
")",
"for",
"o",
"in",
"range",
"(",
"num_objectives",
")",
"]",
"largest",
"=",
"[",
"max",
"(",
"max",
"(",
"[",
"a",
".",
"fitness",
"[",
"o",
"]",
"for",
"a",
"in",
"archive",
"]",
")",
",",
"individual",
".",
"fitness",
"[",
"o",
"]",
")",
"for",
"o",
"in",
"range",
"(",
"num_objectives",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"num_objectives",
")",
":",
"global_smallest",
"[",
"i",
"]",
"=",
"smallest",
"[",
"i",
"]",
"-",
"abs",
"(",
"0.2",
"*",
"smallest",
"[",
"i",
"]",
")",
"global_largest",
"[",
"i",
"]",
"=",
"largest",
"[",
"i",
"]",
"+",
"abs",
"(",
"0.2",
"*",
"largest",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"grid_population",
")",
")",
":",
"grid_population",
"[",
"i",
"]",
"=",
"0",
"for",
"a",
"in",
"archive",
":",
"loc",
"=",
"get_grid_location",
"(",
"a",
".",
"fitness",
",",
"num_grid_divisions",
",",
"global_smallest",
",",
"global_largest",
")",
"a",
".",
"grid_location",
"=",
"loc",
"grid_population",
"[",
"loc",
"]",
"+=",
"1",
"loc",
"=",
"get_grid_location",
"(",
"individual",
".",
"fitness",
",",
"num_grid_divisions",
",",
"global_smallest",
",",
"global_largest",
")",
"individual",
".",
"grid_location",
"=",
"loc",
"grid_population",
"[",
"loc",
"]",
"+=",
"1",
"max_archive_size",
"=",
"args",
".",
"setdefault",
"(",
"'max_archive_size'",
",",
"len",
"(",
"population",
")",
")",
"num_grid_divisions",
"=",
"args",
".",
"setdefault",
"(",
"'num_grid_divisions'",
",",
"1",
")",
"if",
"not",
"'grid_population'",
"in",
"dir",
"(",
"adaptive_grid_archiver",
")",
":",
"adaptive_grid_archiver",
".",
"grid_population",
"=",
"[",
"0",
"for",
"_",
"in",
"range",
"(",
"2",
"**",
"(",
"min",
"(",
"[",
"len",
"(",
"p",
".",
"fitness",
")",
"for",
"p",
"in",
"population",
"]",
")",
"*",
"num_grid_divisions",
")",
")",
"]",
"if",
"not",
"'global_smallest'",
"in",
"dir",
"(",
"adaptive_grid_archiver",
")",
":",
"adaptive_grid_archiver",
".",
"global_smallest",
"=",
"[",
"0",
"for",
"_",
"in",
"range",
"(",
"min",
"(",
"[",
"len",
"(",
"p",
".",
"fitness",
")",
"for",
"p",
"in",
"population",
"]",
")",
")",
"]",
"if",
"not",
"'global_largest'",
"in",
"dir",
"(",
"adaptive_grid_archiver",
")",
":",
"adaptive_grid_archiver",
".",
"global_largest",
"=",
"[",
"0",
"for",
"_",
"in",
"range",
"(",
"min",
"(",
"[",
"len",
"(",
"p",
".",
"fitness",
")",
"for",
"p",
"in",
"population",
"]",
")",
")",
"]",
"new_archive",
"=",
"archive",
"for",
"ind",
"in",
"population",
":",
"update_grid",
"(",
"ind",
",",
"new_archive",
",",
"num_grid_divisions",
",",
"adaptive_grid_archiver",
".",
"global_smallest",
",",
"adaptive_grid_archiver",
".",
"global_largest",
",",
"adaptive_grid_archiver",
".",
"grid_population",
")",
"should_be_added",
"=",
"True",
"for",
"a",
"in",
"new_archive",
":",
"if",
"ind",
"==",
"a",
"or",
"a",
">",
"ind",
":",
"should_be_added",
"=",
"False",
"if",
"should_be_added",
":",
"if",
"len",
"(",
"new_archive",
")",
"==",
"0",
":",
"new_archive",
".",
"append",
"(",
"ind",
")",
"else",
":",
"join",
"=",
"False",
"nondominated",
"=",
"True",
"removal_set",
"=",
"[",
"]",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"new_archive",
")",
":",
"if",
"ind",
">",
"a",
"and",
"not",
"join",
":",
"new_archive",
"[",
"i",
"]",
"=",
"ind",
"join",
"=",
"True",
"elif",
"ind",
">",
"a",
":",
"if",
"not",
"a",
"in",
"removal_set",
":",
"removal_set",
".",
"append",
"(",
"a",
")",
"# Otherwise, the individual is nondominated against this archive member.",
"# We can't use set difference because Individual objects are not hashable.",
"# We'd like to say...",
"# new_archive = list(set(new_archive) - set(removal_set))",
"# So this code gets that same result without using sets.",
"temp_archive",
"=",
"[",
"]",
"for",
"ind",
"in",
"new_archive",
":",
"if",
"ind",
"not",
"in",
"removal_set",
":",
"temp_archive",
".",
"append",
"(",
"ind",
")",
"new_archive",
"=",
"temp_archive",
"if",
"not",
"join",
"and",
"nondominated",
":",
"if",
"len",
"(",
"new_archive",
")",
"==",
"max_archive_size",
":",
"replaced_index",
"=",
"0",
"found_replacement",
"=",
"False",
"loc",
"=",
"get_grid_location",
"(",
"ind",
".",
"fitness",
",",
"num_grid_divisions",
",",
"adaptive_grid_archiver",
".",
"global_smallest",
",",
"adaptive_grid_archiver",
".",
"global_largest",
")",
"ind",
".",
"grid_location",
"=",
"loc",
"if",
"ind",
".",
"grid_location",
">=",
"0",
":",
"most",
"=",
"adaptive_grid_archiver",
".",
"grid_population",
"[",
"ind",
".",
"grid_location",
"]",
"else",
":",
"most",
"=",
"-",
"1",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"new_archive",
")",
":",
"pop_at_a",
"=",
"adaptive_grid_archiver",
".",
"grid_population",
"[",
"a",
".",
"grid_location",
"]",
"if",
"pop_at_a",
">",
"most",
":",
"most",
"=",
"pop_at_a",
"replaced_index",
"=",
"i",
"found_replacement",
"=",
"True",
"if",
"found_replacement",
":",
"new_archive",
"[",
"replaced_index",
"]",
"=",
"ind",
"else",
":",
"new_archive",
".",
"append",
"(",
"ind",
")",
"return",
"new_archive"
] | 48.794118 | 21.691176 |
def delete_namespace(self):
'''Remove all keys from the namespace
'''
conn = redis.Redis(connection_pool=self.pool)
keys = conn.keys("%s*" % self._namespace_str)
for i in xrange(0, len(keys), 10000):
conn.delete(*keys[i:i+10000])
logger.debug('tearing down %r', self._namespace_str) | [
"def",
"delete_namespace",
"(",
"self",
")",
":",
"conn",
"=",
"redis",
".",
"Redis",
"(",
"connection_pool",
"=",
"self",
".",
"pool",
")",
"keys",
"=",
"conn",
".",
"keys",
"(",
"\"%s*\"",
"%",
"self",
".",
"_namespace_str",
")",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"len",
"(",
"keys",
")",
",",
"10000",
")",
":",
"conn",
".",
"delete",
"(",
"*",
"keys",
"[",
"i",
":",
"i",
"+",
"10000",
"]",
")",
"logger",
".",
"debug",
"(",
"'tearing down %r'",
",",
"self",
".",
"_namespace_str",
")"
] | 37.222222 | 15.444444 |
def wait_for_close(
raiden: 'RaidenService',
payment_network_id: PaymentNetworkID,
token_address: TokenAddress,
channel_ids: List[ChannelID],
retry_timeout: float,
) -> None:
"""Wait until all channels are closed.
Note:
This does not time out, use gevent.Timeout.
"""
return wait_for_channel_in_states(
raiden=raiden,
payment_network_id=payment_network_id,
token_address=token_address,
channel_ids=channel_ids,
retry_timeout=retry_timeout,
target_states=CHANNEL_AFTER_CLOSE_STATES,
) | [
"def",
"wait_for_close",
"(",
"raiden",
":",
"'RaidenService'",
",",
"payment_network_id",
":",
"PaymentNetworkID",
",",
"token_address",
":",
"TokenAddress",
",",
"channel_ids",
":",
"List",
"[",
"ChannelID",
"]",
",",
"retry_timeout",
":",
"float",
",",
")",
"->",
"None",
":",
"return",
"wait_for_channel_in_states",
"(",
"raiden",
"=",
"raiden",
",",
"payment_network_id",
"=",
"payment_network_id",
",",
"token_address",
"=",
"token_address",
",",
"channel_ids",
"=",
"channel_ids",
",",
"retry_timeout",
"=",
"retry_timeout",
",",
"target_states",
"=",
"CHANNEL_AFTER_CLOSE_STATES",
",",
")"
] | 29.05 | 12.5 |
def get_xpath_branch(xroot, xpath):
""" :return: the relative part of an XPATH: that which extends past the root provided """
if xroot and xpath and xpath.startswith(xroot):
xpath = xpath[len(xroot):]
xpath = xpath.lstrip(XPATH_DELIM)
return xpath | [
"def",
"get_xpath_branch",
"(",
"xroot",
",",
"xpath",
")",
":",
"if",
"xroot",
"and",
"xpath",
"and",
"xpath",
".",
"startswith",
"(",
"xroot",
")",
":",
"xpath",
"=",
"xpath",
"[",
"len",
"(",
"xroot",
")",
":",
"]",
"xpath",
"=",
"xpath",
".",
"lstrip",
"(",
"XPATH_DELIM",
")",
"return",
"xpath"
] | 33.75 | 15.875 |
def schema(ds):
"""
Convert the table and column descriptions of a `Dataset` into specifications for the
DB schema.
:param ds:
:return: A pair (tables, reference_tables).
"""
tables, ref_tables = {}, {}
table_lookup = {t.url.string: t for t in ds.tables if ds.get_tabletype(t)}
for table in table_lookup.values():
spec = TableSpec(ds.get_tabletype(table))
spec.primary_key = [
c for c in table.tableSchema.columns if
c.propertyUrl and c.propertyUrl.uri == term_uri('id')][0].name
# Map the column name to the default:
if spec.name in PROPERTY_URL_TO_COL:
spec.primary_key = PROPERTY_URL_TO_COL[spec.name][term_uri('id')]
for c in table.tableSchema.columns:
if c.propertyUrl and c.propertyUrl.uri == term_uri('source'):
# A column referencing sources is replaced by an association table.
otype = ds.get_tabletype(table).replace('Table', '')
ref_tables[ds.get_tabletype(table)] = TableSpec(
'{0}Source'.format(otype), # The name of the association table.
[ColSpec(otype + '_ID'), ColSpec('Source_ID'), ColSpec('Context')],
[
( # The foreign key to the referencing object:
['dataset_ID', otype + '_ID'],
ds.get_tabletype(table),
['dataset_ID', spec.primary_key]),
( # The foreign key to the referenced source:
['dataset_ID', 'Source_ID'],
'SourceTable',
['dataset_ID', 'ID']),
],
c.name)
else:
cname = c.header
if c.propertyUrl and spec.name in PROPERTY_URL_TO_COL:
if c.propertyUrl.uri in PROPERTY_URL_TO_COL[spec.name]:
cname = PROPERTY_URL_TO_COL[spec.name][c.propertyUrl.uri]
spec.columns.append(ColSpec(
cname,
c.datatype.base if c.datatype else c.datatype,
c.separator,
cname == spec.primary_key,
cldf_name=c.header))
for fk in table.tableSchema.foreignKeys:
if fk.reference.schemaReference:
# We only support Foreign Key references between tables!
continue # pragma: no cover
ref = table_lookup[fk.reference.resource.string]
ref_type = ds.get_tabletype(ref)
if ref_type:
colRefs = sorted(fk.columnReference)
if spec.name in PROPERTY_URL_TO_COL:
# Must map foreign keys
colRefs = []
for c in sorted(fk.columnReference):
c = ds[spec.name, c]
if c.propertyUrl and c.propertyUrl.uri in PROPERTY_URL_TO_COL[spec.name]:
colRefs.append(PROPERTY_URL_TO_COL[spec.name][c.propertyUrl.uri])
else:
colRefs.append(c.header)
rcolRefs = sorted(fk.reference.columnReference)
if ref_type in PROPERTY_URL_TO_COL:
# Must map foreign key targets!
rcolRefs = []
for c in sorted(fk.reference.columnReference):
c = ds[ref_type, c]
if c.propertyUrl and c.propertyUrl.uri in PROPERTY_URL_TO_COL[ref_type]:
rcolRefs.append(PROPERTY_URL_TO_COL[ref_type][c.propertyUrl.uri])
else:
rcolRefs.append(c.header)
spec.foreign_keys.append((
tuple(['dataset_ID'] + colRefs),
ds.get_tabletype(table_lookup[fk.reference.resource.string]),
tuple(['dataset_ID'] + rcolRefs)))
tables[spec.name] = spec
# must determine the order in which tables must be created!
ordered = OrderedDict()
i = 0
#
# We loop through the tables repeatedly, and whenever we find one, which has all
# referenced tables already in ordered, we move it from tables to ordered.
#
while tables and i < 100:
i += 1
for table in list(tables.keys()):
if all(ref[1] in ordered for ref in tables[table].foreign_keys):
# All referenced tables are already created.
ordered[table] = tables.pop(table)
break
if tables: # pragma: no cover
raise ValueError('there seem to be cyclic dependencies between the tables')
return list(ordered.values()), ref_tables | [
"def",
"schema",
"(",
"ds",
")",
":",
"tables",
",",
"ref_tables",
"=",
"{",
"}",
",",
"{",
"}",
"table_lookup",
"=",
"{",
"t",
".",
"url",
".",
"string",
":",
"t",
"for",
"t",
"in",
"ds",
".",
"tables",
"if",
"ds",
".",
"get_tabletype",
"(",
"t",
")",
"}",
"for",
"table",
"in",
"table_lookup",
".",
"values",
"(",
")",
":",
"spec",
"=",
"TableSpec",
"(",
"ds",
".",
"get_tabletype",
"(",
"table",
")",
")",
"spec",
".",
"primary_key",
"=",
"[",
"c",
"for",
"c",
"in",
"table",
".",
"tableSchema",
".",
"columns",
"if",
"c",
".",
"propertyUrl",
"and",
"c",
".",
"propertyUrl",
".",
"uri",
"==",
"term_uri",
"(",
"'id'",
")",
"]",
"[",
"0",
"]",
".",
"name",
"# Map the column name to the default:",
"if",
"spec",
".",
"name",
"in",
"PROPERTY_URL_TO_COL",
":",
"spec",
".",
"primary_key",
"=",
"PROPERTY_URL_TO_COL",
"[",
"spec",
".",
"name",
"]",
"[",
"term_uri",
"(",
"'id'",
")",
"]",
"for",
"c",
"in",
"table",
".",
"tableSchema",
".",
"columns",
":",
"if",
"c",
".",
"propertyUrl",
"and",
"c",
".",
"propertyUrl",
".",
"uri",
"==",
"term_uri",
"(",
"'source'",
")",
":",
"# A column referencing sources is replaced by an association table.",
"otype",
"=",
"ds",
".",
"get_tabletype",
"(",
"table",
")",
".",
"replace",
"(",
"'Table'",
",",
"''",
")",
"ref_tables",
"[",
"ds",
".",
"get_tabletype",
"(",
"table",
")",
"]",
"=",
"TableSpec",
"(",
"'{0}Source'",
".",
"format",
"(",
"otype",
")",
",",
"# The name of the association table.",
"[",
"ColSpec",
"(",
"otype",
"+",
"'_ID'",
")",
",",
"ColSpec",
"(",
"'Source_ID'",
")",
",",
"ColSpec",
"(",
"'Context'",
")",
"]",
",",
"[",
"(",
"# The foreign key to the referencing object:",
"[",
"'dataset_ID'",
",",
"otype",
"+",
"'_ID'",
"]",
",",
"ds",
".",
"get_tabletype",
"(",
"table",
")",
",",
"[",
"'dataset_ID'",
",",
"spec",
".",
"primary_key",
"]",
")",
",",
"(",
"# The foreign key to the referenced source:",
"[",
"'dataset_ID'",
",",
"'Source_ID'",
"]",
",",
"'SourceTable'",
",",
"[",
"'dataset_ID'",
",",
"'ID'",
"]",
")",
",",
"]",
",",
"c",
".",
"name",
")",
"else",
":",
"cname",
"=",
"c",
".",
"header",
"if",
"c",
".",
"propertyUrl",
"and",
"spec",
".",
"name",
"in",
"PROPERTY_URL_TO_COL",
":",
"if",
"c",
".",
"propertyUrl",
".",
"uri",
"in",
"PROPERTY_URL_TO_COL",
"[",
"spec",
".",
"name",
"]",
":",
"cname",
"=",
"PROPERTY_URL_TO_COL",
"[",
"spec",
".",
"name",
"]",
"[",
"c",
".",
"propertyUrl",
".",
"uri",
"]",
"spec",
".",
"columns",
".",
"append",
"(",
"ColSpec",
"(",
"cname",
",",
"c",
".",
"datatype",
".",
"base",
"if",
"c",
".",
"datatype",
"else",
"c",
".",
"datatype",
",",
"c",
".",
"separator",
",",
"cname",
"==",
"spec",
".",
"primary_key",
",",
"cldf_name",
"=",
"c",
".",
"header",
")",
")",
"for",
"fk",
"in",
"table",
".",
"tableSchema",
".",
"foreignKeys",
":",
"if",
"fk",
".",
"reference",
".",
"schemaReference",
":",
"# We only support Foreign Key references between tables!",
"continue",
"# pragma: no cover",
"ref",
"=",
"table_lookup",
"[",
"fk",
".",
"reference",
".",
"resource",
".",
"string",
"]",
"ref_type",
"=",
"ds",
".",
"get_tabletype",
"(",
"ref",
")",
"if",
"ref_type",
":",
"colRefs",
"=",
"sorted",
"(",
"fk",
".",
"columnReference",
")",
"if",
"spec",
".",
"name",
"in",
"PROPERTY_URL_TO_COL",
":",
"# Must map foreign keys",
"colRefs",
"=",
"[",
"]",
"for",
"c",
"in",
"sorted",
"(",
"fk",
".",
"columnReference",
")",
":",
"c",
"=",
"ds",
"[",
"spec",
".",
"name",
",",
"c",
"]",
"if",
"c",
".",
"propertyUrl",
"and",
"c",
".",
"propertyUrl",
".",
"uri",
"in",
"PROPERTY_URL_TO_COL",
"[",
"spec",
".",
"name",
"]",
":",
"colRefs",
".",
"append",
"(",
"PROPERTY_URL_TO_COL",
"[",
"spec",
".",
"name",
"]",
"[",
"c",
".",
"propertyUrl",
".",
"uri",
"]",
")",
"else",
":",
"colRefs",
".",
"append",
"(",
"c",
".",
"header",
")",
"rcolRefs",
"=",
"sorted",
"(",
"fk",
".",
"reference",
".",
"columnReference",
")",
"if",
"ref_type",
"in",
"PROPERTY_URL_TO_COL",
":",
"# Must map foreign key targets!",
"rcolRefs",
"=",
"[",
"]",
"for",
"c",
"in",
"sorted",
"(",
"fk",
".",
"reference",
".",
"columnReference",
")",
":",
"c",
"=",
"ds",
"[",
"ref_type",
",",
"c",
"]",
"if",
"c",
".",
"propertyUrl",
"and",
"c",
".",
"propertyUrl",
".",
"uri",
"in",
"PROPERTY_URL_TO_COL",
"[",
"ref_type",
"]",
":",
"rcolRefs",
".",
"append",
"(",
"PROPERTY_URL_TO_COL",
"[",
"ref_type",
"]",
"[",
"c",
".",
"propertyUrl",
".",
"uri",
"]",
")",
"else",
":",
"rcolRefs",
".",
"append",
"(",
"c",
".",
"header",
")",
"spec",
".",
"foreign_keys",
".",
"append",
"(",
"(",
"tuple",
"(",
"[",
"'dataset_ID'",
"]",
"+",
"colRefs",
")",
",",
"ds",
".",
"get_tabletype",
"(",
"table_lookup",
"[",
"fk",
".",
"reference",
".",
"resource",
".",
"string",
"]",
")",
",",
"tuple",
"(",
"[",
"'dataset_ID'",
"]",
"+",
"rcolRefs",
")",
")",
")",
"tables",
"[",
"spec",
".",
"name",
"]",
"=",
"spec",
"# must determine the order in which tables must be created!",
"ordered",
"=",
"OrderedDict",
"(",
")",
"i",
"=",
"0",
"#",
"# We loop through the tables repeatedly, and whenever we find one, which has all",
"# referenced tables already in ordered, we move it from tables to ordered.",
"#",
"while",
"tables",
"and",
"i",
"<",
"100",
":",
"i",
"+=",
"1",
"for",
"table",
"in",
"list",
"(",
"tables",
".",
"keys",
"(",
")",
")",
":",
"if",
"all",
"(",
"ref",
"[",
"1",
"]",
"in",
"ordered",
"for",
"ref",
"in",
"tables",
"[",
"table",
"]",
".",
"foreign_keys",
")",
":",
"# All referenced tables are already created.",
"ordered",
"[",
"table",
"]",
"=",
"tables",
".",
"pop",
"(",
"table",
")",
"break",
"if",
"tables",
":",
"# pragma: no cover",
"raise",
"ValueError",
"(",
"'there seem to be cyclic dependencies between the tables'",
")",
"return",
"list",
"(",
"ordered",
".",
"values",
"(",
")",
")",
",",
"ref_tables"
] | 48.153061 | 20.234694 |
def get_indexes(self, schema, **kwargs):
"""
get all the indexes
schema -- Schema()
return -- dict -- the indexes in {indexname: fields} format
"""
with self.connection(**kwargs) as connection:
kwargs['connection'] = connection
return self._get_indexes(schema, **kwargs) | [
"def",
"get_indexes",
"(",
"self",
",",
"schema",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"self",
".",
"connection",
"(",
"*",
"*",
"kwargs",
")",
"as",
"connection",
":",
"kwargs",
"[",
"'connection'",
"]",
"=",
"connection",
"return",
"self",
".",
"_get_indexes",
"(",
"schema",
",",
"*",
"*",
"kwargs",
")"
] | 30.363636 | 15.090909 |
def loc_forecast(self, request, step, isotime=None):
"""
Return location-specific forecast data (including lists of available
sites and time capabilities) for given time step.
request:
metoffer.SITELIST Returns available sites
metoffer.CAPABILITIES Returns available times
site ID, e.g. "3021" Returns forecast data for site
metoffer.ALL Returns forecast data for ALL sites
step:
"" Step not required with SITELIST
or CAPABILITIES
metoffer.DAILY Returns daily forecasts
metoffer.THREE_HOURLY Returns forecast for every three hours
isotime:
An ISO 8601 formatted datetime as string
Returns only data for this time step.
Possible time steps may be obtained
through metoffer.CAPABILITIES
"""
return json.loads(self._query(VAL, FORECAST, ALL, request, step, isotime).decode(errors="replace")) | [
"def",
"loc_forecast",
"(",
"self",
",",
"request",
",",
"step",
",",
"isotime",
"=",
"None",
")",
":",
"return",
"json",
".",
"loads",
"(",
"self",
".",
"_query",
"(",
"VAL",
",",
"FORECAST",
",",
"ALL",
",",
"request",
",",
"step",
",",
"isotime",
")",
".",
"decode",
"(",
"errors",
"=",
"\"replace\"",
")",
")"
] | 52.045455 | 24.409091 |
def delete(self, pk, **kwargs):
"""
Delete the object by primary_key:
.. code-block:: python
DBSession.sacrud(Users).delete(1)
DBSession.sacrud(Users).delete('1')
DBSession.sacrud(User2Groups).delete({'user_id': 4, 'group_id': 2})
JSON support:
.. code-block:: python
DBSession.sacrud(User2Groups).delete(
'{"user_id": 4, "group_id": 2}'
)
Default it run ``session.commit() or transaction.commit()``.
If it is not necessary use attribute ``commit=False``.
"""
pk = unjson(pk)
obj = get_obj(self.session, self.table, pk)
if self._delete(obj, **kwargs):
return {'pk': pk, 'name': obj.__repr__()} | [
"def",
"delete",
"(",
"self",
",",
"pk",
",",
"*",
"*",
"kwargs",
")",
":",
"pk",
"=",
"unjson",
"(",
"pk",
")",
"obj",
"=",
"get_obj",
"(",
"self",
".",
"session",
",",
"self",
".",
"table",
",",
"pk",
")",
"if",
"self",
".",
"_delete",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"{",
"'pk'",
":",
"pk",
",",
"'name'",
":",
"obj",
".",
"__repr__",
"(",
")",
"}"
] | 28.884615 | 19.807692 |
def get_view(name, context=None, request=None):
"""Get the view by name
:param name: The name of the view
:type name: str
:param context: The context to query the view
:type context: ATContentType/DexterityContentType/CatalogBrain
:param request: The request to query the view
:type request: HTTPRequest object
:returns: HTTP Request
:rtype: Products.Five.metaclass View object
"""
context = context or get_portal()
request = request or get_request() or None
return getMultiAdapter((get_object(context), request), name=name) | [
"def",
"get_view",
"(",
"name",
",",
"context",
"=",
"None",
",",
"request",
"=",
"None",
")",
":",
"context",
"=",
"context",
"or",
"get_portal",
"(",
")",
"request",
"=",
"request",
"or",
"get_request",
"(",
")",
"or",
"None",
"return",
"getMultiAdapter",
"(",
"(",
"get_object",
"(",
"context",
")",
",",
"request",
")",
",",
"name",
"=",
"name",
")"
] | 37.533333 | 11.8 |
def _http_request(url,
headers=None,
data=None):
'''
Make the HTTP request and return the body as python object.
'''
if not headers:
headers = _get_headers()
session = requests.session()
log.debug('Querying %s', url)
req = session.post(url,
headers=headers,
data=salt.utils.json.dumps(data))
req_body = req.json()
ret = _default_ret()
log.debug('Status code: %d', req.status_code)
log.debug('Response body:')
log.debug(req_body)
if req.status_code != 200:
if req.status_code == 500:
ret['comment'] = req_body.pop('message', '')
ret['out'] = req_body
return ret
ret.update({
'comment': req_body.get('error', '')
})
return ret
ret.update({
'result': True,
'out': req.json()
})
return ret | [
"def",
"_http_request",
"(",
"url",
",",
"headers",
"=",
"None",
",",
"data",
"=",
"None",
")",
":",
"if",
"not",
"headers",
":",
"headers",
"=",
"_get_headers",
"(",
")",
"session",
"=",
"requests",
".",
"session",
"(",
")",
"log",
".",
"debug",
"(",
"'Querying %s'",
",",
"url",
")",
"req",
"=",
"session",
".",
"post",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"data",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"dumps",
"(",
"data",
")",
")",
"req_body",
"=",
"req",
".",
"json",
"(",
")",
"ret",
"=",
"_default_ret",
"(",
")",
"log",
".",
"debug",
"(",
"'Status code: %d'",
",",
"req",
".",
"status_code",
")",
"log",
".",
"debug",
"(",
"'Response body:'",
")",
"log",
".",
"debug",
"(",
"req_body",
")",
"if",
"req",
".",
"status_code",
"!=",
"200",
":",
"if",
"req",
".",
"status_code",
"==",
"500",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"req_body",
".",
"pop",
"(",
"'message'",
",",
"''",
")",
"ret",
"[",
"'out'",
"]",
"=",
"req_body",
"return",
"ret",
"ret",
".",
"update",
"(",
"{",
"'comment'",
":",
"req_body",
".",
"get",
"(",
"'error'",
",",
"''",
")",
"}",
")",
"return",
"ret",
"ret",
".",
"update",
"(",
"{",
"'result'",
":",
"True",
",",
"'out'",
":",
"req",
".",
"json",
"(",
")",
"}",
")",
"return",
"ret"
] | 28.25 | 16.25 |
def group_memberships_destroy_many(self, ids=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/group_memberships#bulk-delete-memberships"
api_path = "/api/v2/group_memberships/destroy_many.json"
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if ids:
api_query.update({
"ids": ids,
})
return self.call(api_path, query=api_query, method="DELETE", **kwargs) | [
"def",
"group_memberships_destroy_many",
"(",
"self",
",",
"ids",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/group_memberships/destroy_many.json\"",
"api_query",
"=",
"{",
"}",
"if",
"\"query\"",
"in",
"kwargs",
".",
"keys",
"(",
")",
":",
"api_query",
".",
"update",
"(",
"kwargs",
"[",
"\"query\"",
"]",
")",
"del",
"kwargs",
"[",
"\"query\"",
"]",
"if",
"ids",
":",
"api_query",
".",
"update",
"(",
"{",
"\"ids\"",
":",
"ids",
",",
"}",
")",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"query",
"=",
"api_query",
",",
"method",
"=",
"\"DELETE\"",
",",
"*",
"*",
"kwargs",
")"
] | 43.583333 | 21.083333 |
def loads(self, schema_txt: str) -> ShExJ.Schema:
""" Parse and return schema as a ShExJ Schema
:param schema_txt: ShExC or ShExJ representation of a ShEx Schema
:return: ShEx Schema representation of schema
"""
self.schema_text = schema_txt
if schema_txt.strip()[0] == '{':
# TODO: figure out how to propagate self.base_location into this parse
return cast(ShExJ.Schema, loads(schema_txt, ShExJ))
else:
return generate_shexj.parse(schema_txt, self.base_location) | [
"def",
"loads",
"(",
"self",
",",
"schema_txt",
":",
"str",
")",
"->",
"ShExJ",
".",
"Schema",
":",
"self",
".",
"schema_text",
"=",
"schema_txt",
"if",
"schema_txt",
".",
"strip",
"(",
")",
"[",
"0",
"]",
"==",
"'{'",
":",
"# TODO: figure out how to propagate self.base_location into this parse",
"return",
"cast",
"(",
"ShExJ",
".",
"Schema",
",",
"loads",
"(",
"schema_txt",
",",
"ShExJ",
")",
")",
"else",
":",
"return",
"generate_shexj",
".",
"parse",
"(",
"schema_txt",
",",
"self",
".",
"base_location",
")"
] | 45.416667 | 18.416667 |
def enumerate(vendor_id=0, product_id=0):
""" Enumerate the HID Devices.
Returns a generator that yields all of the HID devices attached to the
system.
:param vendor_id: Only return devices which match this vendor id
:type vendor_id: int
:param product_id: Only return devices which match this product id
:type product_id: int
:return: Generator that yields informations about attached
HID devices
:rval: generator(DeviceInfo)
"""
info = hidapi.hid_enumerate(vendor_id, product_id)
while info:
yield DeviceInfo(info)
info = info.next
hidapi.hid_free_enumeration(info) | [
"def",
"enumerate",
"(",
"vendor_id",
"=",
"0",
",",
"product_id",
"=",
"0",
")",
":",
"info",
"=",
"hidapi",
".",
"hid_enumerate",
"(",
"vendor_id",
",",
"product_id",
")",
"while",
"info",
":",
"yield",
"DeviceInfo",
"(",
"info",
")",
"info",
"=",
"info",
".",
"next",
"hidapi",
".",
"hid_free_enumeration",
"(",
"info",
")"
] | 33.75 | 19.1 |
def _raw(self, msg):
""" Print any command sent in raw format
:param msg: arbitrary code to be printed
:type msg: bytes
"""
self.device.write(self.out_ep, msg, self.timeout) | [
"def",
"_raw",
"(",
"self",
",",
"msg",
")",
":",
"self",
".",
"device",
".",
"write",
"(",
"self",
".",
"out_ep",
",",
"msg",
",",
"self",
".",
"timeout",
")"
] | 29.714286 | 14.428571 |
def bin_centers(self, axis=None):
"""Return bin centers along an axis, or if axis=None, list of bin_centers along each axis"""
if axis is None:
return np.array([self.bin_centers(axis=i) for i in range(self.dimensions)])
axis = self.get_axis_number(axis)
return 0.5 * (self.bin_edges[axis][1:] + self.bin_edges[axis][:-1]) | [
"def",
"bin_centers",
"(",
"self",
",",
"axis",
"=",
"None",
")",
":",
"if",
"axis",
"is",
"None",
":",
"return",
"np",
".",
"array",
"(",
"[",
"self",
".",
"bin_centers",
"(",
"axis",
"=",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"dimensions",
")",
"]",
")",
"axis",
"=",
"self",
".",
"get_axis_number",
"(",
"axis",
")",
"return",
"0.5",
"*",
"(",
"self",
".",
"bin_edges",
"[",
"axis",
"]",
"[",
"1",
":",
"]",
"+",
"self",
".",
"bin_edges",
"[",
"axis",
"]",
"[",
":",
"-",
"1",
"]",
")"
] | 60 | 17.666667 |
def check_subsequent_web_request(self, item_session: ItemSession,
is_redirect: bool=False) -> Tuple[bool, str]:
'''Check URL filters and scripting hook.
Returns:
tuple: (bool, str)
'''
verdict, reason, test_info = self.consult_filters(
item_session.request.url_info,
item_session.url_record, is_redirect=is_redirect)
# TODO: provide an option to change this
if item_session.is_virtual:
verdict = True
verdict, reason = self.consult_hook(item_session, verdict,
reason, test_info)
return verdict, reason | [
"def",
"check_subsequent_web_request",
"(",
"self",
",",
"item_session",
":",
"ItemSession",
",",
"is_redirect",
":",
"bool",
"=",
"False",
")",
"->",
"Tuple",
"[",
"bool",
",",
"str",
"]",
":",
"verdict",
",",
"reason",
",",
"test_info",
"=",
"self",
".",
"consult_filters",
"(",
"item_session",
".",
"request",
".",
"url_info",
",",
"item_session",
".",
"url_record",
",",
"is_redirect",
"=",
"is_redirect",
")",
"# TODO: provide an option to change this",
"if",
"item_session",
".",
"is_virtual",
":",
"verdict",
"=",
"True",
"verdict",
",",
"reason",
"=",
"self",
".",
"consult_hook",
"(",
"item_session",
",",
"verdict",
",",
"reason",
",",
"test_info",
")",
"return",
"verdict",
",",
"reason"
] | 35.789474 | 22.315789 |
def change_port_speed(self, hardware_id, public, speed):
"""Allows you to change the port speed of a server's NICs.
:param int hardware_id: The ID of the server
:param bool public: Flag to indicate which interface to change.
True (default) means the public interface.
False indicates the private interface.
:param int speed: The port speed to set.
.. warning::
A port speed of 0 will disable the interface.
Example::
#change the Public interface to 10Mbps on instance 12345
result = mgr.change_port_speed(hardware_id=12345,
public=True, speed=10)
# result will be True or an Exception
"""
if public:
return self.client.call('Hardware_Server',
'setPublicNetworkInterfaceSpeed',
speed, id=hardware_id)
else:
return self.client.call('Hardware_Server',
'setPrivateNetworkInterfaceSpeed',
speed, id=hardware_id) | [
"def",
"change_port_speed",
"(",
"self",
",",
"hardware_id",
",",
"public",
",",
"speed",
")",
":",
"if",
"public",
":",
"return",
"self",
".",
"client",
".",
"call",
"(",
"'Hardware_Server'",
",",
"'setPublicNetworkInterfaceSpeed'",
",",
"speed",
",",
"id",
"=",
"hardware_id",
")",
"else",
":",
"return",
"self",
".",
"client",
".",
"call",
"(",
"'Hardware_Server'",
",",
"'setPrivateNetworkInterfaceSpeed'",
",",
"speed",
",",
"id",
"=",
"hardware_id",
")"
] | 43.37037 | 22.148148 |
def _find_scc(self):
"""
Set ``self._num_scc`` and ``self._scc_proj``
by calling ``scipy.sparse.csgraph.connected_components``:
* docs.scipy.org/doc/scipy/reference/sparse.csgraph.html
* github.com/scipy/scipy/blob/master/scipy/sparse/csgraph/_traversal.pyx
``self._scc_proj`` is a list of length `n` that assigns to each node
the label of the strongly connected component to which it belongs.
"""
# Find the strongly connected components
self._num_scc, self._scc_proj = \
csgraph.connected_components(self.csgraph, connection='strong') | [
"def",
"_find_scc",
"(",
"self",
")",
":",
"# Find the strongly connected components",
"self",
".",
"_num_scc",
",",
"self",
".",
"_scc_proj",
"=",
"csgraph",
".",
"connected_components",
"(",
"self",
".",
"csgraph",
",",
"connection",
"=",
"'strong'",
")"
] | 44.071429 | 22.5 |
def identify_and_tag_URLs(line):
"""Given a reference line, identify URLs in the line, record the
information about them, and replace them with a "<cds.URL />" tag.
URLs are identified in 2 forms:
+ Raw: http://invenio-software.org/
+ HTML marked-up: <a href="http://invenio-software.org/">CERN Document
Server Software Consortium</a>
These URLs are considered to have 2 components: The URL itself
(url string); and the URL description. The description is effectively
the text used for the created Hyperlink when the URL is marked-up
in HTML. When an HTML marked-up URL has been recognised, the text
between the anchor tags is therefore taken as the URL description.
In the case of a raw URL recognition, however, the URL itself will
also be used as the URL description.
For example, in the following reference line:
[1] See <a href="http://invenio-software.org/">CERN Document Server
Software Consortium</a>.
...the URL string will be "http://invenio-software.org/" and the URL
description will be
"CERN Document Server Software Consortium".
The line returned from this function will be:
[1] See <cds.URL />
In the following line, however:
[1] See http //invenio-software.org/ for more details.
...the URL string will be "http://invenio-software.org/" and the URL
description will also be "http://invenio-software.org/".
The line returned will be:
[1] See <cds.URL /> for more details.
@param line: (string) the reference line in which to search for URLs.
@return: (tuple) - containing 2 items:
+ the line after URLs have been recognised and removed;
+ a list of 2-item tuples where each tuple represents a recognised URL
and its description:
[(url, url-description), (url, url-description), ... ]
@Exceptions raised:
+ an IndexError if there is a problem with the number of URLs
recognised (this should not happen.)
"""
# Take a copy of the line:
line_pre_url_check = line
# Dictionaries to record details of matched URLs:
found_url_full_matchlen = {}
found_url_urlstring = {}
found_url_urldescr = {}
# List to contain details of all matched URLs:
identified_urls = []
# Attempt to identify and tag all HTML-MARKED-UP URLs in the line:
m_tagged_url_iter = re_html_tagged_url.finditer(line)
for m_tagged_url in m_tagged_url_iter:
startposn = m_tagged_url.start() # start position of matched URL
endposn = m_tagged_url.end() # end position of matched URL
matchlen = len(m_tagged_url.group(0)) # total length of URL match
found_url_full_matchlen[startposn] = matchlen
found_url_urlstring[startposn] = m_tagged_url.group('url')
found_url_urldescr[startposn] = m_tagged_url.group('desc')
# temporarily replace the URL match with underscores so that
# it won't be re-found
line = line[0:startposn] + u"_" * matchlen + line[endposn:]
# Attempt to identify and tag all RAW (i.e. not
# HTML-marked-up) URLs in the line:
m_raw_url_iter = re_raw_url.finditer(line)
for m_raw_url in m_raw_url_iter:
startposn = m_raw_url.start() # start position of matched URL
endposn = m_raw_url.end() # end position of matched URL
matchlen = len(m_raw_url.group(0)) # total length of URL match
matched_url = m_raw_url.group('url')
if len(matched_url) > 0 and matched_url[-1] in (".", ","):
# Strip the full-stop or comma from the end of the url:
matched_url = matched_url[:-1]
found_url_full_matchlen[startposn] = matchlen
found_url_urlstring[startposn] = matched_url
found_url_urldescr[startposn] = matched_url
# temporarily replace the URL match with underscores
# so that it won't be re-found
line = line[0:startposn] + u"_" * matchlen + line[endposn:]
# Now that all URLs have been identified, insert them
# back into the line, tagged:
found_url_positions = found_url_urlstring.keys()
found_url_positions.sort()
found_url_positions.reverse()
for url_position in found_url_positions:
line = line[0:url_position] + "<cds.URL />" \
+ line[url_position + found_url_full_matchlen[url_position]:]
# The line has been rebuilt. Now record the information about the
# matched URLs:
found_url_positions = found_url_urlstring.keys()
found_url_positions.sort()
for url_position in found_url_positions:
identified_urls.append((found_url_urlstring[url_position],
found_url_urldescr[url_position]))
# Somehow the number of URLs found doesn't match the number of
# URLs recorded in "identified_urls". Raise an IndexError.
msg = """Error: The number of URLs found in the reference line """ \
"""does not match the number of URLs recorded in the """ \
"""list of identified URLs!\nLine pre-URL checking: %s\n""" \
"""Line post-URL checking: %s\n""" \
% (line_pre_url_check, line)
assert len(identified_urls) == len(found_url_positions), msg
# return the line containing the tagged URLs:
return line, identified_urls | [
"def",
"identify_and_tag_URLs",
"(",
"line",
")",
":",
"# Take a copy of the line:",
"line_pre_url_check",
"=",
"line",
"# Dictionaries to record details of matched URLs:",
"found_url_full_matchlen",
"=",
"{",
"}",
"found_url_urlstring",
"=",
"{",
"}",
"found_url_urldescr",
"=",
"{",
"}",
"# List to contain details of all matched URLs:",
"identified_urls",
"=",
"[",
"]",
"# Attempt to identify and tag all HTML-MARKED-UP URLs in the line:",
"m_tagged_url_iter",
"=",
"re_html_tagged_url",
".",
"finditer",
"(",
"line",
")",
"for",
"m_tagged_url",
"in",
"m_tagged_url_iter",
":",
"startposn",
"=",
"m_tagged_url",
".",
"start",
"(",
")",
"# start position of matched URL",
"endposn",
"=",
"m_tagged_url",
".",
"end",
"(",
")",
"# end position of matched URL",
"matchlen",
"=",
"len",
"(",
"m_tagged_url",
".",
"group",
"(",
"0",
")",
")",
"# total length of URL match",
"found_url_full_matchlen",
"[",
"startposn",
"]",
"=",
"matchlen",
"found_url_urlstring",
"[",
"startposn",
"]",
"=",
"m_tagged_url",
".",
"group",
"(",
"'url'",
")",
"found_url_urldescr",
"[",
"startposn",
"]",
"=",
"m_tagged_url",
".",
"group",
"(",
"'desc'",
")",
"# temporarily replace the URL match with underscores so that",
"# it won't be re-found",
"line",
"=",
"line",
"[",
"0",
":",
"startposn",
"]",
"+",
"u\"_\"",
"*",
"matchlen",
"+",
"line",
"[",
"endposn",
":",
"]",
"# Attempt to identify and tag all RAW (i.e. not",
"# HTML-marked-up) URLs in the line:",
"m_raw_url_iter",
"=",
"re_raw_url",
".",
"finditer",
"(",
"line",
")",
"for",
"m_raw_url",
"in",
"m_raw_url_iter",
":",
"startposn",
"=",
"m_raw_url",
".",
"start",
"(",
")",
"# start position of matched URL",
"endposn",
"=",
"m_raw_url",
".",
"end",
"(",
")",
"# end position of matched URL",
"matchlen",
"=",
"len",
"(",
"m_raw_url",
".",
"group",
"(",
"0",
")",
")",
"# total length of URL match",
"matched_url",
"=",
"m_raw_url",
".",
"group",
"(",
"'url'",
")",
"if",
"len",
"(",
"matched_url",
")",
">",
"0",
"and",
"matched_url",
"[",
"-",
"1",
"]",
"in",
"(",
"\".\"",
",",
"\",\"",
")",
":",
"# Strip the full-stop or comma from the end of the url:",
"matched_url",
"=",
"matched_url",
"[",
":",
"-",
"1",
"]",
"found_url_full_matchlen",
"[",
"startposn",
"]",
"=",
"matchlen",
"found_url_urlstring",
"[",
"startposn",
"]",
"=",
"matched_url",
"found_url_urldescr",
"[",
"startposn",
"]",
"=",
"matched_url",
"# temporarily replace the URL match with underscores",
"# so that it won't be re-found",
"line",
"=",
"line",
"[",
"0",
":",
"startposn",
"]",
"+",
"u\"_\"",
"*",
"matchlen",
"+",
"line",
"[",
"endposn",
":",
"]",
"# Now that all URLs have been identified, insert them",
"# back into the line, tagged:",
"found_url_positions",
"=",
"found_url_urlstring",
".",
"keys",
"(",
")",
"found_url_positions",
".",
"sort",
"(",
")",
"found_url_positions",
".",
"reverse",
"(",
")",
"for",
"url_position",
"in",
"found_url_positions",
":",
"line",
"=",
"line",
"[",
"0",
":",
"url_position",
"]",
"+",
"\"<cds.URL />\"",
"+",
"line",
"[",
"url_position",
"+",
"found_url_full_matchlen",
"[",
"url_position",
"]",
":",
"]",
"# The line has been rebuilt. Now record the information about the",
"# matched URLs:",
"found_url_positions",
"=",
"found_url_urlstring",
".",
"keys",
"(",
")",
"found_url_positions",
".",
"sort",
"(",
")",
"for",
"url_position",
"in",
"found_url_positions",
":",
"identified_urls",
".",
"append",
"(",
"(",
"found_url_urlstring",
"[",
"url_position",
"]",
",",
"found_url_urldescr",
"[",
"url_position",
"]",
")",
")",
"# Somehow the number of URLs found doesn't match the number of",
"# URLs recorded in \"identified_urls\". Raise an IndexError.",
"msg",
"=",
"\"\"\"Error: The number of URLs found in the reference line \"\"\"",
"\"\"\"does not match the number of URLs recorded in the \"\"\"",
"\"\"\"list of identified URLs!\\nLine pre-URL checking: %s\\n\"\"\"",
"\"\"\"Line post-URL checking: %s\\n\"\"\"",
"%",
"(",
"line_pre_url_check",
",",
"line",
")",
"assert",
"len",
"(",
"identified_urls",
")",
"==",
"len",
"(",
"found_url_positions",
")",
",",
"msg",
"# return the line containing the tagged URLs:",
"return",
"line",
",",
"identified_urls"
] | 47.756757 | 19 |
def iter_blocks(block_list):
"""A generator for blocks contained in a block list.
Yields tuples containing the block name, the depth that the block was
found at, and finally a handle to the block itself.
"""
# queue the block and the depth of the block
queue = [(block, 0) for block in block_list
if isinstance(block, kurt.Block)]
while queue:
block, depth = queue.pop(0)
assert block.type.text
yield block.type.text, depth, block
for arg in block.args:
if hasattr(arg, '__iter__'):
queue[0:0] = [(x, depth + 1) for x in arg
if isinstance(x, kurt.Block)]
elif isinstance(arg, kurt.Block):
queue.append((arg, depth)) | [
"def",
"iter_blocks",
"(",
"block_list",
")",
":",
"# queue the block and the depth of the block",
"queue",
"=",
"[",
"(",
"block",
",",
"0",
")",
"for",
"block",
"in",
"block_list",
"if",
"isinstance",
"(",
"block",
",",
"kurt",
".",
"Block",
")",
"]",
"while",
"queue",
":",
"block",
",",
"depth",
"=",
"queue",
".",
"pop",
"(",
"0",
")",
"assert",
"block",
".",
"type",
".",
"text",
"yield",
"block",
".",
"type",
".",
"text",
",",
"depth",
",",
"block",
"for",
"arg",
"in",
"block",
".",
"args",
":",
"if",
"hasattr",
"(",
"arg",
",",
"'__iter__'",
")",
":",
"queue",
"[",
"0",
":",
"0",
"]",
"=",
"[",
"(",
"x",
",",
"depth",
"+",
"1",
")",
"for",
"x",
"in",
"arg",
"if",
"isinstance",
"(",
"x",
",",
"kurt",
".",
"Block",
")",
"]",
"elif",
"isinstance",
"(",
"arg",
",",
"kurt",
".",
"Block",
")",
":",
"queue",
".",
"append",
"(",
"(",
"arg",
",",
"depth",
")",
")"
] | 41.25 | 14.2 |
def _init_table(self):
"""
Initialize the observation table.
"""
self.observation_table.sm_vector.append(self.epsilon)
self.observation_table.smi_vector = [random.choice(self.alphabet)]
self.observation_table.em_vector.append(self.epsilon)
self._fill_table_entry(self.epsilon, self.epsilon)
for s in self.observation_table.smi_vector:
self._fill_table_entry(s, self.epsilon) | [
"def",
"_init_table",
"(",
"self",
")",
":",
"self",
".",
"observation_table",
".",
"sm_vector",
".",
"append",
"(",
"self",
".",
"epsilon",
")",
"self",
".",
"observation_table",
".",
"smi_vector",
"=",
"[",
"random",
".",
"choice",
"(",
"self",
".",
"alphabet",
")",
"]",
"self",
".",
"observation_table",
".",
"em_vector",
".",
"append",
"(",
"self",
".",
"epsilon",
")",
"self",
".",
"_fill_table_entry",
"(",
"self",
".",
"epsilon",
",",
"self",
".",
"epsilon",
")",
"for",
"s",
"in",
"self",
".",
"observation_table",
".",
"smi_vector",
":",
"self",
".",
"_fill_table_entry",
"(",
"s",
",",
"self",
".",
"epsilon",
")"
] | 40.090909 | 15.909091 |
def silent(duration=1000, frame_rate=11025):
"""
Creates an AudioSegment object of the specified duration/frame_rate filled with digital silence.
:param duration: The duration of the returned object in ms.
:param frame_rate: The samples per second of the returned object.
:returns: AudioSegment object filled with pure digital silence.
"""
seg = pydub.AudioSegment.silent(duration=duration, frame_rate=frame_rate)
return AudioSegment(seg, "") | [
"def",
"silent",
"(",
"duration",
"=",
"1000",
",",
"frame_rate",
"=",
"11025",
")",
":",
"seg",
"=",
"pydub",
".",
"AudioSegment",
".",
"silent",
"(",
"duration",
"=",
"duration",
",",
"frame_rate",
"=",
"frame_rate",
")",
"return",
"AudioSegment",
"(",
"seg",
",",
"\"\"",
")"
] | 46.6 | 22.8 |
def remove(self, *args):
"""Remove the instance tied to the field for the given "value" (via `args`) from the index
For the parameters, see ``BaseIndex.remove``
"""
key = self.get_storage_key(*args)
pk = self.instance.pk.get()
logger.debug("removing %s from index %s" % (pk, key))
self.connection.srem(key, pk)
self._deindexed_values.add(tuple(args)) | [
"def",
"remove",
"(",
"self",
",",
"*",
"args",
")",
":",
"key",
"=",
"self",
".",
"get_storage_key",
"(",
"*",
"args",
")",
"pk",
"=",
"self",
".",
"instance",
".",
"pk",
".",
"get",
"(",
")",
"logger",
".",
"debug",
"(",
"\"removing %s from index %s\"",
"%",
"(",
"pk",
",",
"key",
")",
")",
"self",
".",
"connection",
".",
"srem",
"(",
"key",
",",
"pk",
")",
"self",
".",
"_deindexed_values",
".",
"add",
"(",
"tuple",
"(",
"args",
")",
")"
] | 33.833333 | 15.416667 |
def compress_histogram(buckets, bps=NORMAL_HISTOGRAM_BPS):
"""Creates fixed size histogram by adding compression to accumulated state.
This routine transforms a histogram at a particular step by linearly
interpolating its variable number of buckets to represent their cumulative
weight at a constant number of compression points. This significantly reduces
the size of the histogram and makes it suitable for a two-dimensional area
plot where the output of this routine constitutes the ranges for a single x
coordinate.
Args:
buckets: A list of buckets, each of which is a 3-tuple of the form
`(min, max, count)`.
bps: Compression points represented in basis points, 1/100ths of a percent.
Defaults to normal distribution.
Returns:
List of values for each basis point.
"""
# See also: Histogram::Percentile() in core/lib/histogram/histogram.cc
buckets = np.array(buckets)
if not buckets.size:
return [CompressedHistogramValue(b, 0.0) for b in bps]
(minmin, maxmax) = (buckets[0][0], buckets[-1][1])
counts = buckets[:, 2]
right_edges = list(buckets[:, 1])
weights = (counts * bps[-1] / (counts.sum() or 1.0)).cumsum()
result = []
bp_index = 0
while bp_index < len(bps):
i = np.searchsorted(weights, bps[bp_index], side='right')
while i < len(weights):
cumsum = weights[i]
cumsum_prev = weights[i - 1] if i > 0 else 0.0
if cumsum == cumsum_prev: # prevent division-by-zero in `_lerp`
i += 1
continue
if not i or not cumsum_prev:
lhs = minmin
else:
lhs = max(right_edges[i - 1], minmin)
rhs = min(right_edges[i], maxmax)
weight = _lerp(bps[bp_index], cumsum_prev, cumsum, lhs, rhs)
result.append(CompressedHistogramValue(bps[bp_index], weight))
bp_index += 1
break
else:
break
while bp_index < len(bps):
result.append(CompressedHistogramValue(bps[bp_index], maxmax))
bp_index += 1
return result | [
"def",
"compress_histogram",
"(",
"buckets",
",",
"bps",
"=",
"NORMAL_HISTOGRAM_BPS",
")",
":",
"# See also: Histogram::Percentile() in core/lib/histogram/histogram.cc",
"buckets",
"=",
"np",
".",
"array",
"(",
"buckets",
")",
"if",
"not",
"buckets",
".",
"size",
":",
"return",
"[",
"CompressedHistogramValue",
"(",
"b",
",",
"0.0",
")",
"for",
"b",
"in",
"bps",
"]",
"(",
"minmin",
",",
"maxmax",
")",
"=",
"(",
"buckets",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"buckets",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
")",
"counts",
"=",
"buckets",
"[",
":",
",",
"2",
"]",
"right_edges",
"=",
"list",
"(",
"buckets",
"[",
":",
",",
"1",
"]",
")",
"weights",
"=",
"(",
"counts",
"*",
"bps",
"[",
"-",
"1",
"]",
"/",
"(",
"counts",
".",
"sum",
"(",
")",
"or",
"1.0",
")",
")",
".",
"cumsum",
"(",
")",
"result",
"=",
"[",
"]",
"bp_index",
"=",
"0",
"while",
"bp_index",
"<",
"len",
"(",
"bps",
")",
":",
"i",
"=",
"np",
".",
"searchsorted",
"(",
"weights",
",",
"bps",
"[",
"bp_index",
"]",
",",
"side",
"=",
"'right'",
")",
"while",
"i",
"<",
"len",
"(",
"weights",
")",
":",
"cumsum",
"=",
"weights",
"[",
"i",
"]",
"cumsum_prev",
"=",
"weights",
"[",
"i",
"-",
"1",
"]",
"if",
"i",
">",
"0",
"else",
"0.0",
"if",
"cumsum",
"==",
"cumsum_prev",
":",
"# prevent division-by-zero in `_lerp`",
"i",
"+=",
"1",
"continue",
"if",
"not",
"i",
"or",
"not",
"cumsum_prev",
":",
"lhs",
"=",
"minmin",
"else",
":",
"lhs",
"=",
"max",
"(",
"right_edges",
"[",
"i",
"-",
"1",
"]",
",",
"minmin",
")",
"rhs",
"=",
"min",
"(",
"right_edges",
"[",
"i",
"]",
",",
"maxmax",
")",
"weight",
"=",
"_lerp",
"(",
"bps",
"[",
"bp_index",
"]",
",",
"cumsum_prev",
",",
"cumsum",
",",
"lhs",
",",
"rhs",
")",
"result",
".",
"append",
"(",
"CompressedHistogramValue",
"(",
"bps",
"[",
"bp_index",
"]",
",",
"weight",
")",
")",
"bp_index",
"+=",
"1",
"break",
"else",
":",
"break",
"while",
"bp_index",
"<",
"len",
"(",
"bps",
")",
":",
"result",
".",
"append",
"(",
"CompressedHistogramValue",
"(",
"bps",
"[",
"bp_index",
"]",
",",
"maxmax",
")",
")",
"bp_index",
"+=",
"1",
"return",
"result"
] | 36.54717 | 22.283019 |
def unpack_messages(msgs):
import msgpack
""" Deserialize a message to python structures """
for key, msg in msgs:
record = msgpack.unpackb(msg)
record['_key'] = key
yield record | [
"def",
"unpack_messages",
"(",
"msgs",
")",
":",
"import",
"msgpack",
"for",
"key",
",",
"msg",
"in",
"msgs",
":",
"record",
"=",
"msgpack",
".",
"unpackb",
"(",
"msg",
")",
"record",
"[",
"'_key'",
"]",
"=",
"key",
"yield",
"record"
] | 29 | 13.5 |
def handshake(self, protocol='vnc', width=1024, height=768, dpi=96,
audio=None, video=None, image=None, **kwargs):
"""
Establish connection with Guacamole guacd server via handshake.
"""
if protocol not in PROTOCOLS:
self.logger.debug('Invalid protocol: %s' % protocol)
raise GuacamoleError('Cannot start Handshake. Missing protocol.')
if audio is None:
audio = list()
if video is None:
video = list()
if image is None:
image = list()
# 1. Send 'select' instruction
self.logger.debug('Send `select` instruction.')
self.send_instruction(Instruction('select', protocol))
# 2. Receive `args` instruction
instruction = self.read_instruction()
self.logger.debug('Expecting `args` instruction, received: %s'
% str(instruction))
if not instruction:
self.close()
raise GuacamoleError(
'Cannot establish Handshake. Connection Lost!')
if instruction.opcode != 'args':
self.close()
raise GuacamoleError(
'Cannot establish Handshake. Expected opcode `args`, '
'received `%s` instead.' % instruction.opcode)
# 3. Respond with size, audio & video support
self.logger.debug('Send `size` instruction (%s, %s, %s)'
% (width, height, dpi))
self.send_instruction(Instruction('size', width, height, dpi))
self.logger.debug('Send `audio` instruction (%s)' % audio)
self.send_instruction(Instruction('audio', *audio))
self.logger.debug('Send `video` instruction (%s)' % video)
self.send_instruction(Instruction('video', *video))
self.logger.debug('Send `image` instruction (%s)' % image)
self.send_instruction(Instruction('image', *image))
# 4. Send `connect` instruction with proper values
connection_args = [
kwargs.get(arg.replace('-', '_'), '') for arg in instruction.args
]
self.logger.debug('Send `connect` instruction (%s)' % connection_args)
self.send_instruction(Instruction('connect', *connection_args))
# 5. Receive ``ready`` instruction, with client ID.
instruction = self.read_instruction()
self.logger.debug('Expecting `ready` instruction, received: %s'
% str(instruction))
if instruction.opcode != 'ready':
self.logger.warning(
'Expected `ready` instruction, received: %s instead')
if instruction.args:
self._id = instruction.args[0]
self.logger.debug(
'Established connection with client id: %s' % self.id)
self.logger.debug('Handshake completed.')
self.connected = True | [
"def",
"handshake",
"(",
"self",
",",
"protocol",
"=",
"'vnc'",
",",
"width",
"=",
"1024",
",",
"height",
"=",
"768",
",",
"dpi",
"=",
"96",
",",
"audio",
"=",
"None",
",",
"video",
"=",
"None",
",",
"image",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"protocol",
"not",
"in",
"PROTOCOLS",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Invalid protocol: %s'",
"%",
"protocol",
")",
"raise",
"GuacamoleError",
"(",
"'Cannot start Handshake. Missing protocol.'",
")",
"if",
"audio",
"is",
"None",
":",
"audio",
"=",
"list",
"(",
")",
"if",
"video",
"is",
"None",
":",
"video",
"=",
"list",
"(",
")",
"if",
"image",
"is",
"None",
":",
"image",
"=",
"list",
"(",
")",
"# 1. Send 'select' instruction",
"self",
".",
"logger",
".",
"debug",
"(",
"'Send `select` instruction.'",
")",
"self",
".",
"send_instruction",
"(",
"Instruction",
"(",
"'select'",
",",
"protocol",
")",
")",
"# 2. Receive `args` instruction",
"instruction",
"=",
"self",
".",
"read_instruction",
"(",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Expecting `args` instruction, received: %s'",
"%",
"str",
"(",
"instruction",
")",
")",
"if",
"not",
"instruction",
":",
"self",
".",
"close",
"(",
")",
"raise",
"GuacamoleError",
"(",
"'Cannot establish Handshake. Connection Lost!'",
")",
"if",
"instruction",
".",
"opcode",
"!=",
"'args'",
":",
"self",
".",
"close",
"(",
")",
"raise",
"GuacamoleError",
"(",
"'Cannot establish Handshake. Expected opcode `args`, '",
"'received `%s` instead.'",
"%",
"instruction",
".",
"opcode",
")",
"# 3. Respond with size, audio & video support",
"self",
".",
"logger",
".",
"debug",
"(",
"'Send `size` instruction (%s, %s, %s)'",
"%",
"(",
"width",
",",
"height",
",",
"dpi",
")",
")",
"self",
".",
"send_instruction",
"(",
"Instruction",
"(",
"'size'",
",",
"width",
",",
"height",
",",
"dpi",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Send `audio` instruction (%s)'",
"%",
"audio",
")",
"self",
".",
"send_instruction",
"(",
"Instruction",
"(",
"'audio'",
",",
"*",
"audio",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Send `video` instruction (%s)'",
"%",
"video",
")",
"self",
".",
"send_instruction",
"(",
"Instruction",
"(",
"'video'",
",",
"*",
"video",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Send `image` instruction (%s)'",
"%",
"image",
")",
"self",
".",
"send_instruction",
"(",
"Instruction",
"(",
"'image'",
",",
"*",
"image",
")",
")",
"# 4. Send `connect` instruction with proper values",
"connection_args",
"=",
"[",
"kwargs",
".",
"get",
"(",
"arg",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
",",
"''",
")",
"for",
"arg",
"in",
"instruction",
".",
"args",
"]",
"self",
".",
"logger",
".",
"debug",
"(",
"'Send `connect` instruction (%s)'",
"%",
"connection_args",
")",
"self",
".",
"send_instruction",
"(",
"Instruction",
"(",
"'connect'",
",",
"*",
"connection_args",
")",
")",
"# 5. Receive ``ready`` instruction, with client ID.",
"instruction",
"=",
"self",
".",
"read_instruction",
"(",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Expecting `ready` instruction, received: %s'",
"%",
"str",
"(",
"instruction",
")",
")",
"if",
"instruction",
".",
"opcode",
"!=",
"'ready'",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"'Expected `ready` instruction, received: %s instead'",
")",
"if",
"instruction",
".",
"args",
":",
"self",
".",
"_id",
"=",
"instruction",
".",
"args",
"[",
"0",
"]",
"self",
".",
"logger",
".",
"debug",
"(",
"'Established connection with client id: %s'",
"%",
"self",
".",
"id",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Handshake completed.'",
")",
"self",
".",
"connected",
"=",
"True"
] | 37.184211 | 22.078947 |
def deploy(
config,
name,
bucket,
timeout,
memory,
description,
subnet_ids,
security_group_ids
):
""" Deploy/Update a function from a project directory """
# options should override config if it is there
myname = name or config.name
mybucket = bucket or config.bucket
mytimeout = timeout or config.timeout
mymemory = memory or config.memory
mydescription = description or config.description
mysubnet_ids = subnet_ids or config.subnet_ids
mysecurity_group_ids = security_group_ids or config.security_group_ids
vpc_config = {}
if mysubnet_ids and mysecurity_group_ids:
vpc_config = {
'SubnetIds': mysubnet_ids.split(','),
'SecurityGroupIds': mysecurity_group_ids.split(',')
}
click.echo('Deploying {} to {}'.format(myname, mybucket))
lambder.deploy_function(
myname,
mybucket,
mytimeout,
mymemory,
mydescription,
vpc_config
) | [
"def",
"deploy",
"(",
"config",
",",
"name",
",",
"bucket",
",",
"timeout",
",",
"memory",
",",
"description",
",",
"subnet_ids",
",",
"security_group_ids",
")",
":",
"# options should override config if it is there",
"myname",
"=",
"name",
"or",
"config",
".",
"name",
"mybucket",
"=",
"bucket",
"or",
"config",
".",
"bucket",
"mytimeout",
"=",
"timeout",
"or",
"config",
".",
"timeout",
"mymemory",
"=",
"memory",
"or",
"config",
".",
"memory",
"mydescription",
"=",
"description",
"or",
"config",
".",
"description",
"mysubnet_ids",
"=",
"subnet_ids",
"or",
"config",
".",
"subnet_ids",
"mysecurity_group_ids",
"=",
"security_group_ids",
"or",
"config",
".",
"security_group_ids",
"vpc_config",
"=",
"{",
"}",
"if",
"mysubnet_ids",
"and",
"mysecurity_group_ids",
":",
"vpc_config",
"=",
"{",
"'SubnetIds'",
":",
"mysubnet_ids",
".",
"split",
"(",
"','",
")",
",",
"'SecurityGroupIds'",
":",
"mysecurity_group_ids",
".",
"split",
"(",
"','",
")",
"}",
"click",
".",
"echo",
"(",
"'Deploying {} to {}'",
".",
"format",
"(",
"myname",
",",
"mybucket",
")",
")",
"lambder",
".",
"deploy_function",
"(",
"myname",
",",
"mybucket",
",",
"mytimeout",
",",
"mymemory",
",",
"mydescription",
",",
"vpc_config",
")"
] | 26.833333 | 20.805556 |
def get_all_organization_names(configuration=None, **kwargs):
# type: (Optional[Configuration], Any) -> List[str]
"""Get all organization names in HDX
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
**kwargs: See below
sort (str): Sort the search results according to field name and sort-order. Allowed fields are ‘name’, ‘package_count’ and ‘title’. Defaults to 'name asc'.
organizations (List[str]): List of names of the groups to return.
all_fields (bool): Return group dictionaries instead of just names. Only core fields are returned - get some more using the include_* options. Defaults to False.
include_extras (bool): If all_fields, include the group extra fields. Defaults to False.
include_tags (bool): If all_fields, include the group tags. Defaults to False.
include_groups: If all_fields, include the groups the groups are in. Defaults to False.
Returns:
List[str]: List of all organization names in HDX
"""
organization = Organization(configuration=configuration)
organization['id'] = 'all organizations' # only for error message if produced
return organization._write_to_hdx('list', kwargs, 'id') | [
"def",
"get_all_organization_names",
"(",
"configuration",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (Optional[Configuration], Any) -> List[str]",
"organization",
"=",
"Organization",
"(",
"configuration",
"=",
"configuration",
")",
"organization",
"[",
"'id'",
"]",
"=",
"'all organizations'",
"# only for error message if produced",
"return",
"organization",
".",
"_write_to_hdx",
"(",
"'list'",
",",
"kwargs",
",",
"'id'",
")"
] | 65.95 | 41.2 |
def get_ipaths(self):
"""
Returns generator of paths from nodes marked as added, changed or
removed.
"""
for node in itertools.chain(self.added, self.changed, self.removed):
yield node.path | [
"def",
"get_ipaths",
"(",
"self",
")",
":",
"for",
"node",
"in",
"itertools",
".",
"chain",
"(",
"self",
".",
"added",
",",
"self",
".",
"changed",
",",
"self",
".",
"removed",
")",
":",
"yield",
"node",
".",
"path"
] | 33.571429 | 17.857143 |
def get_dataset(self, key, info):
"""Load a dataset."""
logger.debug('Reading %s.', key.name)
variable = self.nc[key.name]
return variable | [
"def",
"get_dataset",
"(",
"self",
",",
"key",
",",
"info",
")",
":",
"logger",
".",
"debug",
"(",
"'Reading %s.'",
",",
"key",
".",
"name",
")",
"variable",
"=",
"self",
".",
"nc",
"[",
"key",
".",
"name",
"]",
"return",
"variable"
] | 27.666667 | 12.166667 |
def fetch_task_to_run(self):
"""
Returns the first task that is ready to run or
None if no task can be submitted at present"
Raises:
`StopIteration` if all tasks are done.
"""
# All the tasks are done so raise an exception
# that will be handled by the client code.
if all(task.is_completed for task in self):
raise StopIteration("All tasks completed.")
for task in self:
if task.can_run:
return task
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.warning("Possible deadlock in fetch_task_to_run!")
return None | [
"def",
"fetch_task_to_run",
"(",
"self",
")",
":",
"# All the tasks are done so raise an exception",
"# that will be handled by the client code.",
"if",
"all",
"(",
"task",
".",
"is_completed",
"for",
"task",
"in",
"self",
")",
":",
"raise",
"StopIteration",
"(",
"\"All tasks completed.\"",
")",
"for",
"task",
"in",
"self",
":",
"if",
"task",
".",
"can_run",
":",
"return",
"task",
"# No task found, this usually happens when we have dependencies.",
"# Beware of possible deadlocks here!",
"logger",
".",
"warning",
"(",
"\"Possible deadlock in fetch_task_to_run!\"",
")",
"return",
"None"
] | 33.857143 | 17.380952 |
def run(self, timeout=POD_RUN_WAIT_TIMEOUT_SECONDS):
"""
Forces a K8sCronJob to run immediately.
- Fail if the K8sCronJob is currently running on-schedule.
- Suspend the K8sCronJob.
- Spawn a K8sPod.
- Unsuspend a K8sCronJob.
:param timeout: The timeout, in seconds, after which to kill the K8sPod.
:return: None.
"""
if not isinstance(timeout, int):
raise SyntaxError("K8sCronJob.run() timeout: [ {} ] is invalid.")
if len(self.active):
raise CronJobAlreadyRunningException(
"K8sCronJob.run() failed: CronJob: [ {} ] "
"has [ {} ] active Jobs currently.".format(self.name, len(self.active)))
self.suspend = True
self.update()
pod = self.pod
if timeout:
self.POD_RUN_WAIT_TIMEOUT_SECONDS = timeout
try:
pod.create()
start_time = time.time()
while pod.phase not in ['Succeeded', 'Failed']:
pod.get()
time.sleep(2)
self._check_timeout(start_time)
except Exception as err:
raise CronJobRunException("K8sCronJob.run() failed: {}".format(err))
finally:
pod.delete()
self.suspend = False
self.update() | [
"def",
"run",
"(",
"self",
",",
"timeout",
"=",
"POD_RUN_WAIT_TIMEOUT_SECONDS",
")",
":",
"if",
"not",
"isinstance",
"(",
"timeout",
",",
"int",
")",
":",
"raise",
"SyntaxError",
"(",
"\"K8sCronJob.run() timeout: [ {} ] is invalid.\"",
")",
"if",
"len",
"(",
"self",
".",
"active",
")",
":",
"raise",
"CronJobAlreadyRunningException",
"(",
"\"K8sCronJob.run() failed: CronJob: [ {} ] \"",
"\"has [ {} ] active Jobs currently.\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"len",
"(",
"self",
".",
"active",
")",
")",
")",
"self",
".",
"suspend",
"=",
"True",
"self",
".",
"update",
"(",
")",
"pod",
"=",
"self",
".",
"pod",
"if",
"timeout",
":",
"self",
".",
"POD_RUN_WAIT_TIMEOUT_SECONDS",
"=",
"timeout",
"try",
":",
"pod",
".",
"create",
"(",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"while",
"pod",
".",
"phase",
"not",
"in",
"[",
"'Succeeded'",
",",
"'Failed'",
"]",
":",
"pod",
".",
"get",
"(",
")",
"time",
".",
"sleep",
"(",
"2",
")",
"self",
".",
"_check_timeout",
"(",
"start_time",
")",
"except",
"Exception",
"as",
"err",
":",
"raise",
"CronJobRunException",
"(",
"\"K8sCronJob.run() failed: {}\"",
".",
"format",
"(",
"err",
")",
")",
"finally",
":",
"pod",
".",
"delete",
"(",
")",
"self",
".",
"suspend",
"=",
"False",
"self",
".",
"update",
"(",
")"
] | 29.681818 | 21.681818 |
def parse_image_response(self, response):
"""
Parse multiple objects from the RETS feed. A lot of string methods are used to handle the response before
encoding it back into bytes for the object.
:param response: The response from the feed
:return: list of SingleObjectParser
"""
if 'xml' in response.headers.get('Content-Type'):
# Got an XML response, likely an error code.
xml = xmltodict.parse(response.text)
self.analyze_reply_code(xml_response_dict=xml)
multi_parts = self._get_multiparts(response)
parsed = []
# go through each part of the multipart message
for part in multi_parts:
clean_part = part.strip('\r\n\r\n')
if '\r\n\r\n' in clean_part:
header, body = clean_part.split('\r\n\r\n', 1)
else:
header = clean_part
body = None
part_header_dict = {k.strip(): v.strip() for k, v in (h.split(':', 1) for h in header.split('\r\n'))}
# Some multipart requests respond with a text/XML part stating an error
if 'xml' in part_header_dict.get('Content-Type'):
# Got an XML response, likely an error code.
# Some rets servers give characters after the closing brace.
body = body[:body.index('/>') + 2] if '/>' in body else body
xml = xmltodict.parse(body)
try:
self.analyze_reply_code(xml_response_dict=xml)
except RETSException as e:
if e.reply_code == '20403':
# The requested object_id was not found.
continue
raise e
if body:
obj = self._response_object_from_header(
obj_head_dict=part_header_dict,
content=body.encode('latin-1') if six.PY3 else body)
else:
obj = self._response_object_from_header(obj_head_dict=part_header_dict)
parsed.append(obj)
return parsed | [
"def",
"parse_image_response",
"(",
"self",
",",
"response",
")",
":",
"if",
"'xml'",
"in",
"response",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
")",
":",
"# Got an XML response, likely an error code.",
"xml",
"=",
"xmltodict",
".",
"parse",
"(",
"response",
".",
"text",
")",
"self",
".",
"analyze_reply_code",
"(",
"xml_response_dict",
"=",
"xml",
")",
"multi_parts",
"=",
"self",
".",
"_get_multiparts",
"(",
"response",
")",
"parsed",
"=",
"[",
"]",
"# go through each part of the multipart message",
"for",
"part",
"in",
"multi_parts",
":",
"clean_part",
"=",
"part",
".",
"strip",
"(",
"'\\r\\n\\r\\n'",
")",
"if",
"'\\r\\n\\r\\n'",
"in",
"clean_part",
":",
"header",
",",
"body",
"=",
"clean_part",
".",
"split",
"(",
"'\\r\\n\\r\\n'",
",",
"1",
")",
"else",
":",
"header",
"=",
"clean_part",
"body",
"=",
"None",
"part_header_dict",
"=",
"{",
"k",
".",
"strip",
"(",
")",
":",
"v",
".",
"strip",
"(",
")",
"for",
"k",
",",
"v",
"in",
"(",
"h",
".",
"split",
"(",
"':'",
",",
"1",
")",
"for",
"h",
"in",
"header",
".",
"split",
"(",
"'\\r\\n'",
")",
")",
"}",
"# Some multipart requests respond with a text/XML part stating an error",
"if",
"'xml'",
"in",
"part_header_dict",
".",
"get",
"(",
"'Content-Type'",
")",
":",
"# Got an XML response, likely an error code.",
"# Some rets servers give characters after the closing brace.",
"body",
"=",
"body",
"[",
":",
"body",
".",
"index",
"(",
"'/>'",
")",
"+",
"2",
"]",
"if",
"'/>'",
"in",
"body",
"else",
"body",
"xml",
"=",
"xmltodict",
".",
"parse",
"(",
"body",
")",
"try",
":",
"self",
".",
"analyze_reply_code",
"(",
"xml_response_dict",
"=",
"xml",
")",
"except",
"RETSException",
"as",
"e",
":",
"if",
"e",
".",
"reply_code",
"==",
"'20403'",
":",
"# The requested object_id was not found.",
"continue",
"raise",
"e",
"if",
"body",
":",
"obj",
"=",
"self",
".",
"_response_object_from_header",
"(",
"obj_head_dict",
"=",
"part_header_dict",
",",
"content",
"=",
"body",
".",
"encode",
"(",
"'latin-1'",
")",
"if",
"six",
".",
"PY3",
"else",
"body",
")",
"else",
":",
"obj",
"=",
"self",
".",
"_response_object_from_header",
"(",
"obj_head_dict",
"=",
"part_header_dict",
")",
"parsed",
".",
"append",
"(",
"obj",
")",
"return",
"parsed"
] | 45.456522 | 19.891304 |
def get_certificate(order_id=None, certificate_id=None, minion_id=None, cert_format='pem_all', filename=None):
'''
Retrieve a certificate by order_id or certificate_id and write it to stdout or a filename.
A list of permissible cert_formats is here:
https://www.digicert.com/services/v2/documentation/appendix-certificate-formats
CLI Example:
.. code-block:: bash
salt-run digicert.get_certificate order_id=48929454 cert_format=apache
Including a 'filename' will write the certificate to the desired file.
Note that some cert formats are zipped files, and some are binary.
If the certificate has not been issued, this function will return the order details
inside of which will be a status (one of pending, rejected, processing, issued,
revoked, canceled, needs_csr, and needs_approval)
If for some reason you want to pipe the output of this command to a file or other
command you will want to leave off the ``filename`` argument and make sure to include
``--no-color`` so there will be no terminal ANSI escape sequences.
'''
if order_id:
order_cert = salt.utils.http.query(
'{0}/order/certificate/{1}'.format(_base_url(),
order_id),
method='GET',
raise_error=False,
decode=True,
decode_type='json',
header_dict={
'X-DC-DEVKEY': _api_key(),
'Content-Type': 'application/json',
}
)
if order_cert['dict'].get('status') != 'issued':
return {'certificate': order_cert['dict']}
if order_cert['dict'].get('errors', False):
return {'certificate': order_cert['dict']}
certificate_id = order_cert['dict'].get('certificate').get('id', None)
common_name = order_cert['dict'].get('certificate').get('common_name')
if not certificate_id:
return {'certificate':
{'errors':
{'code': 'unknown',
'message': 'Unknown error, no certificate ID passed on command line or in body returned from API'}}}
if filename:
ret_cert = salt.utils.http.query(
'{0}/certificate/{1}/download/format/{2}'.format(_base_url(),
certificate_id,
cert_format),
method='GET',
decode=False,
text=False,
headers=True,
text_out=filename,
raise_error=False,
header_dict={
'X-DC-DEVKEY': _api_key(),
}
)
else:
ret_cert = salt.utils.http.query(
'{0}/certificate/{1}/download/format/{2}'.format(_base_url(),
certificate_id,
cert_format),
method='GET',
text=False,
decode=False,
raise_error=False,
header_dict={
'X-DC-DEVKEY': _api_key(),
}
)
if 'errors' in ret_cert:
return {'certificate': ret_cert}
if 'body' not in ret_cert:
ret = {'certificate': ret_cert}
cert = ret_cert
if isinstance(ret_cert, dict):
ret = ret_cert['body']
cert = ret
else:
ret = ret_cert
cert = ret
tmpfilename = None
if not filename:
fd, tmpfilename = tempfile.mkstemp()
filename = tmpfilename
os.write(fd, cert)
os.close(fd)
cmd = ['openssl', 'x509', '-noout', '-subject', '-nameopt', 'multiline', '-in', filename]
out = subprocess.check_output(cmd)
common_name = None
for l in out.splitlines():
common_name_match = re.search(' *commonName *= *(.*)', l)
if common_name_match:
common_name = common_name_match.group(1)
break
if tmpfilename:
os.unlink(tmpfilename)
if common_name:
bank = 'digicert/domains'
cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)
try:
data = cache.fetch(bank, common_name)
except TypeError:
data = {'certificate': cert}
cache.store(bank, common_name, data)
if 'headers' in ret_cert:
return {'certificate': {'filename': filename,
'original_filename': ret_cert['headers'].get('Content-Disposition', 'Not provided'),
'Content-Type': ret_cert['headers'].get('Content-Type', 'Not provided')
}}
return {'certificate': cert} | [
"def",
"get_certificate",
"(",
"order_id",
"=",
"None",
",",
"certificate_id",
"=",
"None",
",",
"minion_id",
"=",
"None",
",",
"cert_format",
"=",
"'pem_all'",
",",
"filename",
"=",
"None",
")",
":",
"if",
"order_id",
":",
"order_cert",
"=",
"salt",
".",
"utils",
".",
"http",
".",
"query",
"(",
"'{0}/order/certificate/{1}'",
".",
"format",
"(",
"_base_url",
"(",
")",
",",
"order_id",
")",
",",
"method",
"=",
"'GET'",
",",
"raise_error",
"=",
"False",
",",
"decode",
"=",
"True",
",",
"decode_type",
"=",
"'json'",
",",
"header_dict",
"=",
"{",
"'X-DC-DEVKEY'",
":",
"_api_key",
"(",
")",
",",
"'Content-Type'",
":",
"'application/json'",
",",
"}",
")",
"if",
"order_cert",
"[",
"'dict'",
"]",
".",
"get",
"(",
"'status'",
")",
"!=",
"'issued'",
":",
"return",
"{",
"'certificate'",
":",
"order_cert",
"[",
"'dict'",
"]",
"}",
"if",
"order_cert",
"[",
"'dict'",
"]",
".",
"get",
"(",
"'errors'",
",",
"False",
")",
":",
"return",
"{",
"'certificate'",
":",
"order_cert",
"[",
"'dict'",
"]",
"}",
"certificate_id",
"=",
"order_cert",
"[",
"'dict'",
"]",
".",
"get",
"(",
"'certificate'",
")",
".",
"get",
"(",
"'id'",
",",
"None",
")",
"common_name",
"=",
"order_cert",
"[",
"'dict'",
"]",
".",
"get",
"(",
"'certificate'",
")",
".",
"get",
"(",
"'common_name'",
")",
"if",
"not",
"certificate_id",
":",
"return",
"{",
"'certificate'",
":",
"{",
"'errors'",
":",
"{",
"'code'",
":",
"'unknown'",
",",
"'message'",
":",
"'Unknown error, no certificate ID passed on command line or in body returned from API'",
"}",
"}",
"}",
"if",
"filename",
":",
"ret_cert",
"=",
"salt",
".",
"utils",
".",
"http",
".",
"query",
"(",
"'{0}/certificate/{1}/download/format/{2}'",
".",
"format",
"(",
"_base_url",
"(",
")",
",",
"certificate_id",
",",
"cert_format",
")",
",",
"method",
"=",
"'GET'",
",",
"decode",
"=",
"False",
",",
"text",
"=",
"False",
",",
"headers",
"=",
"True",
",",
"text_out",
"=",
"filename",
",",
"raise_error",
"=",
"False",
",",
"header_dict",
"=",
"{",
"'X-DC-DEVKEY'",
":",
"_api_key",
"(",
")",
",",
"}",
")",
"else",
":",
"ret_cert",
"=",
"salt",
".",
"utils",
".",
"http",
".",
"query",
"(",
"'{0}/certificate/{1}/download/format/{2}'",
".",
"format",
"(",
"_base_url",
"(",
")",
",",
"certificate_id",
",",
"cert_format",
")",
",",
"method",
"=",
"'GET'",
",",
"text",
"=",
"False",
",",
"decode",
"=",
"False",
",",
"raise_error",
"=",
"False",
",",
"header_dict",
"=",
"{",
"'X-DC-DEVKEY'",
":",
"_api_key",
"(",
")",
",",
"}",
")",
"if",
"'errors'",
"in",
"ret_cert",
":",
"return",
"{",
"'certificate'",
":",
"ret_cert",
"}",
"if",
"'body'",
"not",
"in",
"ret_cert",
":",
"ret",
"=",
"{",
"'certificate'",
":",
"ret_cert",
"}",
"cert",
"=",
"ret_cert",
"if",
"isinstance",
"(",
"ret_cert",
",",
"dict",
")",
":",
"ret",
"=",
"ret_cert",
"[",
"'body'",
"]",
"cert",
"=",
"ret",
"else",
":",
"ret",
"=",
"ret_cert",
"cert",
"=",
"ret",
"tmpfilename",
"=",
"None",
"if",
"not",
"filename",
":",
"fd",
",",
"tmpfilename",
"=",
"tempfile",
".",
"mkstemp",
"(",
")",
"filename",
"=",
"tmpfilename",
"os",
".",
"write",
"(",
"fd",
",",
"cert",
")",
"os",
".",
"close",
"(",
"fd",
")",
"cmd",
"=",
"[",
"'openssl'",
",",
"'x509'",
",",
"'-noout'",
",",
"'-subject'",
",",
"'-nameopt'",
",",
"'multiline'",
",",
"'-in'",
",",
"filename",
"]",
"out",
"=",
"subprocess",
".",
"check_output",
"(",
"cmd",
")",
"common_name",
"=",
"None",
"for",
"l",
"in",
"out",
".",
"splitlines",
"(",
")",
":",
"common_name_match",
"=",
"re",
".",
"search",
"(",
"' *commonName *= *(.*)'",
",",
"l",
")",
"if",
"common_name_match",
":",
"common_name",
"=",
"common_name_match",
".",
"group",
"(",
"1",
")",
"break",
"if",
"tmpfilename",
":",
"os",
".",
"unlink",
"(",
"tmpfilename",
")",
"if",
"common_name",
":",
"bank",
"=",
"'digicert/domains'",
"cache",
"=",
"salt",
".",
"cache",
".",
"Cache",
"(",
"__opts__",
",",
"syspaths",
".",
"CACHE_DIR",
")",
"try",
":",
"data",
"=",
"cache",
".",
"fetch",
"(",
"bank",
",",
"common_name",
")",
"except",
"TypeError",
":",
"data",
"=",
"{",
"'certificate'",
":",
"cert",
"}",
"cache",
".",
"store",
"(",
"bank",
",",
"common_name",
",",
"data",
")",
"if",
"'headers'",
"in",
"ret_cert",
":",
"return",
"{",
"'certificate'",
":",
"{",
"'filename'",
":",
"filename",
",",
"'original_filename'",
":",
"ret_cert",
"[",
"'headers'",
"]",
".",
"get",
"(",
"'Content-Disposition'",
",",
"'Not provided'",
")",
",",
"'Content-Type'",
":",
"ret_cert",
"[",
"'headers'",
"]",
".",
"get",
"(",
"'Content-Type'",
",",
"'Not provided'",
")",
"}",
"}",
"return",
"{",
"'certificate'",
":",
"cert",
"}"
] | 35.651163 | 23.899225 |
def help(route):
r"""Displays help for the given route.
Args:
route (str): A route that resolves a member.
"""
help_text = getRouteHelp(route.split('/') if route else [])
if help_text is None:
err('Can\'t help :(')
else:
print '\n%s' % help_text | [
"def",
"help",
"(",
"route",
")",
":",
"help_text",
"=",
"getRouteHelp",
"(",
"route",
".",
"split",
"(",
"'/'",
")",
"if",
"route",
"else",
"[",
"]",
")",
"if",
"help_text",
"is",
"None",
":",
"err",
"(",
"'Can\\'t help :('",
")",
"else",
":",
"print",
"'\\n%s'",
"%",
"help_text"
] | 20 | 21.769231 |
def add_edge(self,
source: Node,
target: Node,
weight: float = 1,
save_to_cache: bool = True) -> None:
"""
Adds an edge to the edge list that will connect the specified nodes.
Arguments:
source (Node): The source node of the edge.
target (Node): The target node of the edge.
weight (float): The weight of the created edge.
save_to_cache (bool): Whether the edge should be saved to the local database.
"""
if not isinstance(source, Node):
raise TypeError("Invalid source: expected Node instance, got {}.".format(source))
if not isinstance(target, Node):
raise TypeError("Invalid target: expected Node instance, got {}.".format(target))
if source.index == target.index or\
self.get_edge_by_index(source.index, target.index) is not None:
return
self._edges[(source.index, target.index)] = Edge(source, target, weight)
if save_to_cache:
should_commit: bool = False
database: GraphDatabaseInterface = self._graph.database
db_edge: DBEdge = database.Edge.find_by_name(source.name, target.name)
if db_edge is None:
database.session.add(database.Edge(source.name, target.name, weight))
should_commit = True
elif db_edge.weight != weight:
db_edge.weight = weight
should_commit = True
if should_commit:
database.session.commit() | [
"def",
"add_edge",
"(",
"self",
",",
"source",
":",
"Node",
",",
"target",
":",
"Node",
",",
"weight",
":",
"float",
"=",
"1",
",",
"save_to_cache",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"if",
"not",
"isinstance",
"(",
"source",
",",
"Node",
")",
":",
"raise",
"TypeError",
"(",
"\"Invalid source: expected Node instance, got {}.\"",
".",
"format",
"(",
"source",
")",
")",
"if",
"not",
"isinstance",
"(",
"target",
",",
"Node",
")",
":",
"raise",
"TypeError",
"(",
"\"Invalid target: expected Node instance, got {}.\"",
".",
"format",
"(",
"target",
")",
")",
"if",
"source",
".",
"index",
"==",
"target",
".",
"index",
"or",
"self",
".",
"get_edge_by_index",
"(",
"source",
".",
"index",
",",
"target",
".",
"index",
")",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_edges",
"[",
"(",
"source",
".",
"index",
",",
"target",
".",
"index",
")",
"]",
"=",
"Edge",
"(",
"source",
",",
"target",
",",
"weight",
")",
"if",
"save_to_cache",
":",
"should_commit",
":",
"bool",
"=",
"False",
"database",
":",
"GraphDatabaseInterface",
"=",
"self",
".",
"_graph",
".",
"database",
"db_edge",
":",
"DBEdge",
"=",
"database",
".",
"Edge",
".",
"find_by_name",
"(",
"source",
".",
"name",
",",
"target",
".",
"name",
")",
"if",
"db_edge",
"is",
"None",
":",
"database",
".",
"session",
".",
"add",
"(",
"database",
".",
"Edge",
"(",
"source",
".",
"name",
",",
"target",
".",
"name",
",",
"weight",
")",
")",
"should_commit",
"=",
"True",
"elif",
"db_edge",
".",
"weight",
"!=",
"weight",
":",
"db_edge",
".",
"weight",
"=",
"weight",
"should_commit",
"=",
"True",
"if",
"should_commit",
":",
"database",
".",
"session",
".",
"commit",
"(",
")"
] | 42.368421 | 20.578947 |
def get_changelog_date_packager(self):
"""Returns part of the changelog entry, containing date and packager.
"""
try:
packager = subprocess.Popen(
'rpmdev-packager', stdout=subprocess.PIPE).communicate(
)[0].strip()
except OSError:
# Hi John Doe, you should install rpmdevtools
packager = "John Doe <john@doe.com>"
logger.warn("Package rpmdevtools is missing, using default "
"name: {0}.".format(packager))
with utils.c_time_locale():
date_str = time.strftime('%a %b %d %Y', time.gmtime())
encoding = locale.getpreferredencoding()
return u'{0} {1}'.format(date_str, packager.decode(encoding)) | [
"def",
"get_changelog_date_packager",
"(",
"self",
")",
":",
"try",
":",
"packager",
"=",
"subprocess",
".",
"Popen",
"(",
"'rpmdev-packager'",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"except",
"OSError",
":",
"# Hi John Doe, you should install rpmdevtools",
"packager",
"=",
"\"John Doe <john@doe.com>\"",
"logger",
".",
"warn",
"(",
"\"Package rpmdevtools is missing, using default \"",
"\"name: {0}.\"",
".",
"format",
"(",
"packager",
")",
")",
"with",
"utils",
".",
"c_time_locale",
"(",
")",
":",
"date_str",
"=",
"time",
".",
"strftime",
"(",
"'%a %b %d %Y'",
",",
"time",
".",
"gmtime",
"(",
")",
")",
"encoding",
"=",
"locale",
".",
"getpreferredencoding",
"(",
")",
"return",
"u'{0} {1}'",
".",
"format",
"(",
"date_str",
",",
"packager",
".",
"decode",
"(",
"encoding",
")",
")"
] | 46.8125 | 14.3125 |
def update_webhook(self, webhook_url, webhook_id, events=None):
"""Register webhook (if it doesn't exit)."""
hooks = self._request(MINUT_WEBHOOKS_URL, request_type='GET')['hooks']
try:
self._webhook = next(
hook for hook in hooks if hook['url'] == webhook_url)
_LOGGER.debug("Webhook: %s", self._webhook)
except StopIteration: # Not found
if events is None:
events = [e for v in EVENTS.values() for e in v if e]
self._webhook = self._register_webhook(webhook_url, events)
_LOGGER.debug("Registered hook: %s", self._webhook)
return self._webhook | [
"def",
"update_webhook",
"(",
"self",
",",
"webhook_url",
",",
"webhook_id",
",",
"events",
"=",
"None",
")",
":",
"hooks",
"=",
"self",
".",
"_request",
"(",
"MINUT_WEBHOOKS_URL",
",",
"request_type",
"=",
"'GET'",
")",
"[",
"'hooks'",
"]",
"try",
":",
"self",
".",
"_webhook",
"=",
"next",
"(",
"hook",
"for",
"hook",
"in",
"hooks",
"if",
"hook",
"[",
"'url'",
"]",
"==",
"webhook_url",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Webhook: %s\"",
",",
"self",
".",
"_webhook",
")",
"except",
"StopIteration",
":",
"# Not found",
"if",
"events",
"is",
"None",
":",
"events",
"=",
"[",
"e",
"for",
"v",
"in",
"EVENTS",
".",
"values",
"(",
")",
"for",
"e",
"in",
"v",
"if",
"e",
"]",
"self",
".",
"_webhook",
"=",
"self",
".",
"_register_webhook",
"(",
"webhook_url",
",",
"events",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Registered hook: %s\"",
",",
"self",
".",
"_webhook",
")",
"return",
"self",
".",
"_webhook"
] | 51.461538 | 18.692308 |
def addDiscreteOutcomeConstantMean(distribution, x, p, sort = False):
'''
Adds a discrete outcome of x with probability p to an existing distribution,
holding constant the relative probabilities of other outcomes and overall mean.
Parameters
----------
distribution : [np.array]
Two element list containing a list of probabilities and a list of outcomes.
x : float
The new value to be added to the distribution.
p : float
The probability of the discrete outcome x occuring.
sort: bool
Whether or not to sort X before returning it
Returns
-------
X : np.array
Discrete points for discrete probability mass function.
pmf : np.array
Probability associated with each point in X.
Written by Matthew N. White
Latest update: 08 December 2015 by David Low
'''
X = np.append(x,distribution[1]*(1-p*x)/(1-p))
pmf = np.append(p,distribution[0]*(1-p))
if sort:
indices = np.argsort(X)
X = X[indices]
pmf = pmf[indices]
return([pmf,X]) | [
"def",
"addDiscreteOutcomeConstantMean",
"(",
"distribution",
",",
"x",
",",
"p",
",",
"sort",
"=",
"False",
")",
":",
"X",
"=",
"np",
".",
"append",
"(",
"x",
",",
"distribution",
"[",
"1",
"]",
"*",
"(",
"1",
"-",
"p",
"*",
"x",
")",
"/",
"(",
"1",
"-",
"p",
")",
")",
"pmf",
"=",
"np",
".",
"append",
"(",
"p",
",",
"distribution",
"[",
"0",
"]",
"*",
"(",
"1",
"-",
"p",
")",
")",
"if",
"sort",
":",
"indices",
"=",
"np",
".",
"argsort",
"(",
"X",
")",
"X",
"=",
"X",
"[",
"indices",
"]",
"pmf",
"=",
"pmf",
"[",
"indices",
"]",
"return",
"(",
"[",
"pmf",
",",
"X",
"]",
")"
] | 30.2 | 24.6 |
def clean(matrix):
"""Return a copy of given matrix where keys associated
to space values are discarded"""
return defaultdict(lambda: ' ', {
k: v for k, v in matrix.items() if v != ' '
}) | [
"def",
"clean",
"(",
"matrix",
")",
":",
"return",
"defaultdict",
"(",
"lambda",
":",
"' '",
",",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"matrix",
".",
"items",
"(",
")",
"if",
"v",
"!=",
"' '",
"}",
")"
] | 34.333333 | 11.666667 |
def delete_instance(self, *args, **kwargs):
"""Send signals."""
self.pre_delete.send(self)
super(Model, self).delete_instance(*args, **kwargs)
self.post_delete.send(self) | [
"def",
"delete_instance",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"pre_delete",
".",
"send",
"(",
"self",
")",
"super",
"(",
"Model",
",",
"self",
")",
".",
"delete_instance",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"post_delete",
".",
"send",
"(",
"self",
")"
] | 39.6 | 6.6 |
def get(path, profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None):
'''
Get value saved in znode
path
path to check
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.get /test/name profile=prod
'''
conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme,
username=username, password=password, default_acl=default_acl)
ret, _ = conn.get(path)
return salt.utils.stringutils.to_str(ret) | [
"def",
"get",
"(",
"path",
",",
"profile",
"=",
"None",
",",
"hosts",
"=",
"None",
",",
"scheme",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"default_acl",
"=",
"None",
")",
":",
"conn",
"=",
"_get_zk_conn",
"(",
"profile",
"=",
"profile",
",",
"hosts",
"=",
"hosts",
",",
"scheme",
"=",
"scheme",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"default_acl",
"=",
"default_acl",
")",
"ret",
",",
"_",
"=",
"conn",
".",
"get",
"(",
"path",
")",
"return",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_str",
"(",
"ret",
")"
] | 25.944444 | 30 |
def create_hosted_service(self, service_name, label, description=None,
location=None, affinity_group=None,
extended_properties=None):
'''
Creates a new hosted service in Windows Azure.
service_name:
A name for the hosted service that is unique within Windows Azure.
This name is the DNS prefix name and can be used to access the
hosted service.
label:
A name for the hosted service. The name can be up to 100 characters
in length. The name can be used to identify the storage account for
your tracking purposes.
description:
A description for the hosted service. The description can be up to
1024 characters in length.
location:
The location where the hosted service will be created. You can
specify either a location or affinity_group, but not both.
affinity_group:
The name of an existing affinity group associated with this
subscription. This name is a GUID and can be retrieved by examining
the name element of the response body returned by
list_affinity_groups. You can specify either a location or
affinity_group, but not both.
extended_properties:
Dictionary containing name/value pairs of storage account
properties. You can have a maximum of 50 extended property
name/value pairs. The maximum length of the Name element is 64
characters, only alphanumeric characters and underscores are valid
in the Name, and the name must start with a letter. The value has
a maximum length of 255 characters.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('label', label)
if affinity_group is None and location is None:
raise ValueError(
'location or affinity_group must be specified')
if affinity_group is not None and location is not None:
raise ValueError(
'Only one of location or affinity_group needs to be specified')
return self._perform_post(self._get_hosted_service_path(),
_XmlSerializer.create_hosted_service_to_xml(
service_name,
label,
description,
location,
affinity_group,
extended_properties),
as_async=True) | [
"def",
"create_hosted_service",
"(",
"self",
",",
"service_name",
",",
"label",
",",
"description",
"=",
"None",
",",
"location",
"=",
"None",
",",
"affinity_group",
"=",
"None",
",",
"extended_properties",
"=",
"None",
")",
":",
"_validate_not_none",
"(",
"'service_name'",
",",
"service_name",
")",
"_validate_not_none",
"(",
"'label'",
",",
"label",
")",
"if",
"affinity_group",
"is",
"None",
"and",
"location",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'location or affinity_group must be specified'",
")",
"if",
"affinity_group",
"is",
"not",
"None",
"and",
"location",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'Only one of location or affinity_group needs to be specified'",
")",
"return",
"self",
".",
"_perform_post",
"(",
"self",
".",
"_get_hosted_service_path",
"(",
")",
",",
"_XmlSerializer",
".",
"create_hosted_service_to_xml",
"(",
"service_name",
",",
"label",
",",
"description",
",",
"location",
",",
"affinity_group",
",",
"extended_properties",
")",
",",
"as_async",
"=",
"True",
")"
] | 52.372549 | 22.45098 |
def _get_available_extensions():
"""Get a list of available file extensions to make it easy for
tab-completion and exception handling.
"""
extensions = []
# from filenames
parsers_dir = os.path.join(os.path.dirname(__file__))
glob_filename = os.path.join(parsers_dir, "*" + _FILENAME_SUFFIX + ".py")
ext_re = re.compile(glob_filename.replace('*', "(?P<ext>\w+)"))
for filename in glob.glob(glob_filename):
ext_match = ext_re.match(filename)
ext = ext_match.groups()[0]
extensions.append(ext)
extensions.append('.' + ext)
# from relevant synonyms (don't use the '' synonym)
for ext in EXTENSION_SYNONYMS.keys():
if ext:
extensions.append(ext)
extensions.append(ext.replace('.', '', 1))
extensions.sort()
return extensions | [
"def",
"_get_available_extensions",
"(",
")",
":",
"extensions",
"=",
"[",
"]",
"# from filenames",
"parsers_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"glob_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"parsers_dir",
",",
"\"*\"",
"+",
"_FILENAME_SUFFIX",
"+",
"\".py\"",
")",
"ext_re",
"=",
"re",
".",
"compile",
"(",
"glob_filename",
".",
"replace",
"(",
"'*'",
",",
"\"(?P<ext>\\w+)\"",
")",
")",
"for",
"filename",
"in",
"glob",
".",
"glob",
"(",
"glob_filename",
")",
":",
"ext_match",
"=",
"ext_re",
".",
"match",
"(",
"filename",
")",
"ext",
"=",
"ext_match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"extensions",
".",
"append",
"(",
"ext",
")",
"extensions",
".",
"append",
"(",
"'.'",
"+",
"ext",
")",
"# from relevant synonyms (don't use the '' synonym)",
"for",
"ext",
"in",
"EXTENSION_SYNONYMS",
".",
"keys",
"(",
")",
":",
"if",
"ext",
":",
"extensions",
".",
"append",
"(",
"ext",
")",
"extensions",
".",
"append",
"(",
"ext",
".",
"replace",
"(",
"'.'",
",",
"''",
",",
"1",
")",
")",
"extensions",
".",
"sort",
"(",
")",
"return",
"extensions"
] | 35.478261 | 14.652174 |
def run(self, conn, tmp, module_name, module_args, inject):
''' transfer the given module name, plus the async module, then run it '''
# shell and command module are the same
if module_name == 'shell':
module_name = 'command'
module_args += " #USE_SHELL"
(module_path, is_new_style, shebang) = self.runner._copy_module(conn, tmp, module_name, module_args, inject)
self.runner._low_level_exec_command(conn, "chmod a+rx %s" % module_path, tmp)
return self.runner._execute_module(conn, tmp, 'async_wrapper', module_args,
async_module=module_path,
async_jid=self.runner.generated_jid,
async_limit=self.runner.background,
inject=inject
) | [
"def",
"run",
"(",
"self",
",",
"conn",
",",
"tmp",
",",
"module_name",
",",
"module_args",
",",
"inject",
")",
":",
"# shell and command module are the same",
"if",
"module_name",
"==",
"'shell'",
":",
"module_name",
"=",
"'command'",
"module_args",
"+=",
"\" #USE_SHELL\"",
"(",
"module_path",
",",
"is_new_style",
",",
"shebang",
")",
"=",
"self",
".",
"runner",
".",
"_copy_module",
"(",
"conn",
",",
"tmp",
",",
"module_name",
",",
"module_args",
",",
"inject",
")",
"self",
".",
"runner",
".",
"_low_level_exec_command",
"(",
"conn",
",",
"\"chmod a+rx %s\"",
"%",
"module_path",
",",
"tmp",
")",
"return",
"self",
".",
"runner",
".",
"_execute_module",
"(",
"conn",
",",
"tmp",
",",
"'async_wrapper'",
",",
"module_args",
",",
"async_module",
"=",
"module_path",
",",
"async_jid",
"=",
"self",
".",
"runner",
".",
"generated_jid",
",",
"async_limit",
"=",
"self",
".",
"runner",
".",
"background",
",",
"inject",
"=",
"inject",
")"
] | 43.705882 | 25.117647 |
def _check_flavors_exist(self, parsed_args):
"""Ensure that selected flavors (--ROLE-flavor) exist in nova."""
compute_client = self.app.client_manager.compute
flavors = {f.name: f for f in compute_client.flavors.list()}
message = "Provided --{}-flavor, '{}', does not exist"
for target, flavor, scale in (
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
):
if flavor is None or scale == 0:
self.log.debug("--{}-flavor not used".format(target))
elif flavor not in flavors:
self.predeploy_errors += 1
self.log.error(message.format(target, flavor)) | [
"def",
"_check_flavors_exist",
"(",
"self",
",",
"parsed_args",
")",
":",
"compute_client",
"=",
"self",
".",
"app",
".",
"client_manager",
".",
"compute",
"flavors",
"=",
"{",
"f",
".",
"name",
":",
"f",
"for",
"f",
"in",
"compute_client",
".",
"flavors",
".",
"list",
"(",
")",
"}",
"message",
"=",
"\"Provided --{}-flavor, '{}', does not exist\"",
"for",
"target",
",",
"flavor",
",",
"scale",
"in",
"(",
"(",
"'control'",
",",
"parsed_args",
".",
"control_flavor",
",",
"parsed_args",
".",
"control_scale",
")",
",",
"(",
"'compute'",
",",
"parsed_args",
".",
"compute_flavor",
",",
"parsed_args",
".",
"compute_scale",
")",
",",
"(",
"'ceph-storage'",
",",
"parsed_args",
".",
"ceph_storage_flavor",
",",
"parsed_args",
".",
"ceph_storage_scale",
")",
",",
"(",
"'block-storage'",
",",
"parsed_args",
".",
"block_storage_flavor",
",",
"parsed_args",
".",
"block_storage_scale",
")",
",",
"(",
"'swift-storage'",
",",
"parsed_args",
".",
"swift_storage_flavor",
",",
"parsed_args",
".",
"swift_storage_scale",
")",
",",
")",
":",
"if",
"flavor",
"is",
"None",
"or",
"scale",
"==",
"0",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"--{}-flavor not used\"",
".",
"format",
"(",
"target",
")",
")",
"elif",
"flavor",
"not",
"in",
"flavors",
":",
"self",
".",
"predeploy_errors",
"+=",
"1",
"self",
".",
"log",
".",
"error",
"(",
"message",
".",
"format",
"(",
"target",
",",
"flavor",
")",
")"
] | 45.12 | 16.04 |
def has_reg(self, reg_name):
"""Check if a register is used in the instruction."""
return any(operand.has_reg(reg_name) for operand in self.operands) | [
"def",
"has_reg",
"(",
"self",
",",
"reg_name",
")",
":",
"return",
"any",
"(",
"operand",
".",
"has_reg",
"(",
"reg_name",
")",
"for",
"operand",
"in",
"self",
".",
"operands",
")"
] | 54.333333 | 15.333333 |
def unmount(self, remove_rw=False, allow_lazy=False):
"""Removes all ties of this disk to the filesystem, so the image can be unmounted successfully.
:raises SubsystemError: when one of the underlying commands fails. Some are swallowed.
:raises CleanupError: when actual cleanup fails. Some are swallowed.
"""
for m in list(sorted(self.volumes, key=lambda v: v.mountpoint or "", reverse=True)):
try:
m.unmount(allow_lazy=allow_lazy)
except ImageMounterError:
logger.warning("Error unmounting volume {0}".format(m.mountpoint))
if self._paths.get('nbd'):
_util.clean_unmount(['qemu-nbd', '-d'], self._paths['nbd'], rmdir=False)
if self.mountpoint:
try:
_util.clean_unmount(['fusermount', '-u'], self.mountpoint)
except SubsystemError:
if not allow_lazy:
raise
_util.clean_unmount(['fusermount', '-uz'], self.mountpoint)
if self._paths.get('avfs'):
try:
_util.clean_unmount(['fusermount', '-u'], self._paths['avfs'])
except SubsystemError:
if not allow_lazy:
raise
_util.clean_unmount(['fusermount', '-uz'], self._paths['avfs'])
if self.rw_active() and remove_rw:
os.remove(self.rwpath)
self.is_mounted = False | [
"def",
"unmount",
"(",
"self",
",",
"remove_rw",
"=",
"False",
",",
"allow_lazy",
"=",
"False",
")",
":",
"for",
"m",
"in",
"list",
"(",
"sorted",
"(",
"self",
".",
"volumes",
",",
"key",
"=",
"lambda",
"v",
":",
"v",
".",
"mountpoint",
"or",
"\"\"",
",",
"reverse",
"=",
"True",
")",
")",
":",
"try",
":",
"m",
".",
"unmount",
"(",
"allow_lazy",
"=",
"allow_lazy",
")",
"except",
"ImageMounterError",
":",
"logger",
".",
"warning",
"(",
"\"Error unmounting volume {0}\"",
".",
"format",
"(",
"m",
".",
"mountpoint",
")",
")",
"if",
"self",
".",
"_paths",
".",
"get",
"(",
"'nbd'",
")",
":",
"_util",
".",
"clean_unmount",
"(",
"[",
"'qemu-nbd'",
",",
"'-d'",
"]",
",",
"self",
".",
"_paths",
"[",
"'nbd'",
"]",
",",
"rmdir",
"=",
"False",
")",
"if",
"self",
".",
"mountpoint",
":",
"try",
":",
"_util",
".",
"clean_unmount",
"(",
"[",
"'fusermount'",
",",
"'-u'",
"]",
",",
"self",
".",
"mountpoint",
")",
"except",
"SubsystemError",
":",
"if",
"not",
"allow_lazy",
":",
"raise",
"_util",
".",
"clean_unmount",
"(",
"[",
"'fusermount'",
",",
"'-uz'",
"]",
",",
"self",
".",
"mountpoint",
")",
"if",
"self",
".",
"_paths",
".",
"get",
"(",
"'avfs'",
")",
":",
"try",
":",
"_util",
".",
"clean_unmount",
"(",
"[",
"'fusermount'",
",",
"'-u'",
"]",
",",
"self",
".",
"_paths",
"[",
"'avfs'",
"]",
")",
"except",
"SubsystemError",
":",
"if",
"not",
"allow_lazy",
":",
"raise",
"_util",
".",
"clean_unmount",
"(",
"[",
"'fusermount'",
",",
"'-uz'",
"]",
",",
"self",
".",
"_paths",
"[",
"'avfs'",
"]",
")",
"if",
"self",
".",
"rw_active",
"(",
")",
"and",
"remove_rw",
":",
"os",
".",
"remove",
"(",
"self",
".",
"rwpath",
")",
"self",
".",
"is_mounted",
"=",
"False"
] | 39.527778 | 23.472222 |
def get_help(obj, env, subcmds):
"""Interpolate complete help doc of given object
Assumption that given object as a specific interface:
obj.__doc__ is the basic help object.
obj.get_actions_titles() returns the subcommand if any.
"""
doc = txt.dedent(obj.__doc__ or "")
env = env.copy() ## get a local copy
doc = doc.strip()
if not re.search(r"^usage:\s*$", doc, flags=re.IGNORECASE | re.MULTILINE):
doc += txt.dedent("""
Usage:
%(std_usage)s
Options:
%(std_options)s""")
help_line = (" %%-%ds %%s"
% (max([5] + [len(a) for a in subcmds]), ))
env["actions"] = "\n".join(
help_line % (
name,
get_help(subcmd, subcmd_env(env, name), {}).split("\n")[0])
for name, subcmd in subcmds.items())
env["actions_help"] = "" if not env["actions"] else (
"ACTION could be one of:\n\n"
"%(actions)s\n\n"
"See '%(surcmd)s help ACTION' for more information "
"on a specific command."
% env)
if "%(std_usage)s" in doc:
env["std_usage"] = txt.indent(
("%(surcmd)s --help\n"
"%(surcmd)s --version" +
(("\n%(surcmd)s help [COMMAND]"
"\n%(surcmd)s ACTION [ARGS...]") if subcmds else ""))
% env,
_find_prefix(doc, "%(std_usage)s"),
first="")
if "%(std_options)s" in doc:
env["std_options"] = txt.indent(
"--help Show this screen.\n"
"--version Show version.",
_find_prefix(doc, "%(std_options)s"),
first="")
if subcmds and "%(actions_help)s" not in doc:
doc += "\n\n%(actions_help)s"
try:
output = doc % env
except KeyError as e:
msg.err("Doc interpolation of %s needed missing key %r"
% (aformat(env["surcmd"], attrs=["bold", ]),
e.args[0]))
exit(1)
except Exception as e:
msg.err(
"Documentation of %s is not valid. Please check it:\n%s"
% (aformat(env["surcmd"], attrs=["bold", ]),
doc))
exit(1)
return output | [
"def",
"get_help",
"(",
"obj",
",",
"env",
",",
"subcmds",
")",
":",
"doc",
"=",
"txt",
".",
"dedent",
"(",
"obj",
".",
"__doc__",
"or",
"\"\"",
")",
"env",
"=",
"env",
".",
"copy",
"(",
")",
"## get a local copy",
"doc",
"=",
"doc",
".",
"strip",
"(",
")",
"if",
"not",
"re",
".",
"search",
"(",
"r\"^usage:\\s*$\"",
",",
"doc",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
"|",
"re",
".",
"MULTILINE",
")",
":",
"doc",
"+=",
"txt",
".",
"dedent",
"(",
"\"\"\"\n\n Usage:\n %(std_usage)s\n\n Options:\n %(std_options)s\"\"\"",
")",
"help_line",
"=",
"(",
"\" %%-%ds %%s\"",
"%",
"(",
"max",
"(",
"[",
"5",
"]",
"+",
"[",
"len",
"(",
"a",
")",
"for",
"a",
"in",
"subcmds",
"]",
")",
",",
")",
")",
"env",
"[",
"\"actions\"",
"]",
"=",
"\"\\n\"",
".",
"join",
"(",
"help_line",
"%",
"(",
"name",
",",
"get_help",
"(",
"subcmd",
",",
"subcmd_env",
"(",
"env",
",",
"name",
")",
",",
"{",
"}",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"[",
"0",
"]",
")",
"for",
"name",
",",
"subcmd",
"in",
"subcmds",
".",
"items",
"(",
")",
")",
"env",
"[",
"\"actions_help\"",
"]",
"=",
"\"\"",
"if",
"not",
"env",
"[",
"\"actions\"",
"]",
"else",
"(",
"\"ACTION could be one of:\\n\\n\"",
"\"%(actions)s\\n\\n\"",
"\"See '%(surcmd)s help ACTION' for more information \"",
"\"on a specific command.\"",
"%",
"env",
")",
"if",
"\"%(std_usage)s\"",
"in",
"doc",
":",
"env",
"[",
"\"std_usage\"",
"]",
"=",
"txt",
".",
"indent",
"(",
"(",
"\"%(surcmd)s --help\\n\"",
"\"%(surcmd)s --version\"",
"+",
"(",
"(",
"\"\\n%(surcmd)s help [COMMAND]\"",
"\"\\n%(surcmd)s ACTION [ARGS...]\"",
")",
"if",
"subcmds",
"else",
"\"\"",
")",
")",
"%",
"env",
",",
"_find_prefix",
"(",
"doc",
",",
"\"%(std_usage)s\"",
")",
",",
"first",
"=",
"\"\"",
")",
"if",
"\"%(std_options)s\"",
"in",
"doc",
":",
"env",
"[",
"\"std_options\"",
"]",
"=",
"txt",
".",
"indent",
"(",
"\"--help Show this screen.\\n\"",
"\"--version Show version.\"",
",",
"_find_prefix",
"(",
"doc",
",",
"\"%(std_options)s\"",
")",
",",
"first",
"=",
"\"\"",
")",
"if",
"subcmds",
"and",
"\"%(actions_help)s\"",
"not",
"in",
"doc",
":",
"doc",
"+=",
"\"\\n\\n%(actions_help)s\"",
"try",
":",
"output",
"=",
"doc",
"%",
"env",
"except",
"KeyError",
"as",
"e",
":",
"msg",
".",
"err",
"(",
"\"Doc interpolation of %s needed missing key %r\"",
"%",
"(",
"aformat",
"(",
"env",
"[",
"\"surcmd\"",
"]",
",",
"attrs",
"=",
"[",
"\"bold\"",
",",
"]",
")",
",",
"e",
".",
"args",
"[",
"0",
"]",
")",
")",
"exit",
"(",
"1",
")",
"except",
"Exception",
"as",
"e",
":",
"msg",
".",
"err",
"(",
"\"Documentation of %s is not valid. Please check it:\\n%s\"",
"%",
"(",
"aformat",
"(",
"env",
"[",
"\"surcmd\"",
"]",
",",
"attrs",
"=",
"[",
"\"bold\"",
",",
"]",
")",
",",
"doc",
")",
")",
"exit",
"(",
"1",
")",
"return",
"output"
] | 31.735294 | 17.25 |
def execute(self, stmt, **params):
"""Execute a SQL statement.
The statement may be a string SQL string,
an :func:`sqlalchemy.sql.expression.select` construct, or a
:func:`sqlalchemy.sql.expression.text`
construct.
"""
return self.session.execute(sql.text(stmt, bind=self.bind), **params) | [
"def",
"execute",
"(",
"self",
",",
"stmt",
",",
"*",
"*",
"params",
")",
":",
"return",
"self",
".",
"session",
".",
"execute",
"(",
"sql",
".",
"text",
"(",
"stmt",
",",
"bind",
"=",
"self",
".",
"bind",
")",
",",
"*",
"*",
"params",
")"
] | 33.9 | 18.9 |
def uninstall_handler(self, event_type, handler, user_handle=None):
"""Uninstalls handlers for events in this resource.
:param event_type: Logical event identifier.
:param handler: Interpreted as a valid reference to a handler to be uninstalled by a client application.
:param user_handle: The user handle (ctypes object or None) returned by install_handler.
"""
self.visalib.uninstall_visa_handler(self.session, event_type, handler, user_handle) | [
"def",
"uninstall_handler",
"(",
"self",
",",
"event_type",
",",
"handler",
",",
"user_handle",
"=",
"None",
")",
":",
"self",
".",
"visalib",
".",
"uninstall_visa_handler",
"(",
"self",
".",
"session",
",",
"event_type",
",",
"handler",
",",
"user_handle",
")"
] | 54.222222 | 33.111111 |
def validate_cbarpos(value):
"""Validate a colorbar position
Parameters
----------
value: bool or str
A string can be a combination of 'sh|sv|fl|fr|ft|fb|b|r'
Returns
-------
list
list of strings with possible colorbar positions
Raises
------
ValueError"""
patt = 'sh|sv|fl|fr|ft|fb|b|r'
if value is True:
value = {'b'}
elif not value:
value = set()
elif isinstance(value, six.string_types):
for s in re.finditer('[^%s]+' % patt, value):
warn("Unknown colorbar position %s!" % s.group(), RuntimeWarning)
value = set(re.findall(patt, value))
else:
value = validate_stringset(value)
for s in (s for s in value
if not re.match(patt, s)):
warn("Unknown colorbar position %s!" % s)
value.remove(s)
return value | [
"def",
"validate_cbarpos",
"(",
"value",
")",
":",
"patt",
"=",
"'sh|sv|fl|fr|ft|fb|b|r'",
"if",
"value",
"is",
"True",
":",
"value",
"=",
"{",
"'b'",
"}",
"elif",
"not",
"value",
":",
"value",
"=",
"set",
"(",
")",
"elif",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"for",
"s",
"in",
"re",
".",
"finditer",
"(",
"'[^%s]+'",
"%",
"patt",
",",
"value",
")",
":",
"warn",
"(",
"\"Unknown colorbar position %s!\"",
"%",
"s",
".",
"group",
"(",
")",
",",
"RuntimeWarning",
")",
"value",
"=",
"set",
"(",
"re",
".",
"findall",
"(",
"patt",
",",
"value",
")",
")",
"else",
":",
"value",
"=",
"validate_stringset",
"(",
"value",
")",
"for",
"s",
"in",
"(",
"s",
"for",
"s",
"in",
"value",
"if",
"not",
"re",
".",
"match",
"(",
"patt",
",",
"s",
")",
")",
":",
"warn",
"(",
"\"Unknown colorbar position %s!\"",
"%",
"s",
")",
"value",
".",
"remove",
"(",
"s",
")",
"return",
"value"
] | 26.84375 | 19.59375 |
def set_template(path, template, context, defaults, saltenv='base', **kwargs):
'''
Set answers to debconf questions from a template.
path
location of the file containing the package selections
template
template format
context
variables to add to the template environment
default
default values for the template environment
CLI Example:
.. code-block:: bash
salt '*' debconf.set_template salt://pathto/pkg.selections.jinja jinja None None
'''
path = __salt__['cp.get_template'](
path=path,
dest=None,
template=template,
saltenv=saltenv,
context=context,
defaults=defaults,
**kwargs)
return set_file(path, saltenv, **kwargs) | [
"def",
"set_template",
"(",
"path",
",",
"template",
",",
"context",
",",
"defaults",
",",
"saltenv",
"=",
"'base'",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"__salt__",
"[",
"'cp.get_template'",
"]",
"(",
"path",
"=",
"path",
",",
"dest",
"=",
"None",
",",
"template",
"=",
"template",
",",
"saltenv",
"=",
"saltenv",
",",
"context",
"=",
"context",
",",
"defaults",
"=",
"defaults",
",",
"*",
"*",
"kwargs",
")",
"return",
"set_file",
"(",
"path",
",",
"saltenv",
",",
"*",
"*",
"kwargs",
")"
] | 21.735294 | 26.970588 |
def subseparable_conv(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution. If separability == 0 it's a separable_conv."""
def conv_fn(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution, splits into separability-many blocks."""
separability = None
if "separability" in kwargs:
separability = kwargs.pop("separability")
if separability:
parts = []
abs_sep = separability if separability > 0 else -1 * separability
for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)):
with tf.variable_scope("part_%d" % split_idx):
if separability > 0:
parts.append(
layers().Conv2D(filters // separability, kernel_size,
**kwargs)(split))
else:
parts.append(
layers().SeparableConv2D(filters // abs_sep,
kernel_size, **kwargs)(split))
if separability > 1:
result = layers().Conv2D(filters, (1, 1))(tf.concat(parts, axis=3))
elif abs_sep == 1: # If we have just one block, return it.
assert len(parts) == 1
result = parts[0]
else:
result = tf.concat(parts, axis=3)
else:
result = layers().SeparableConv2D(filters, kernel_size,
**kwargs)(inputs)
if separability is not None:
kwargs["separability"] = separability
return result
return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs) | [
"def",
"subseparable_conv",
"(",
"inputs",
",",
"filters",
",",
"kernel_size",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"conv_fn",
"(",
"inputs",
",",
"filters",
",",
"kernel_size",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Sub-separable convolution, splits into separability-many blocks.\"\"\"",
"separability",
"=",
"None",
"if",
"\"separability\"",
"in",
"kwargs",
":",
"separability",
"=",
"kwargs",
".",
"pop",
"(",
"\"separability\"",
")",
"if",
"separability",
":",
"parts",
"=",
"[",
"]",
"abs_sep",
"=",
"separability",
"if",
"separability",
">",
"0",
"else",
"-",
"1",
"*",
"separability",
"for",
"split_idx",
",",
"split",
"in",
"enumerate",
"(",
"tf",
".",
"split",
"(",
"inputs",
",",
"abs_sep",
",",
"axis",
"=",
"3",
")",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"part_%d\"",
"%",
"split_idx",
")",
":",
"if",
"separability",
">",
"0",
":",
"parts",
".",
"append",
"(",
"layers",
"(",
")",
".",
"Conv2D",
"(",
"filters",
"//",
"separability",
",",
"kernel_size",
",",
"*",
"*",
"kwargs",
")",
"(",
"split",
")",
")",
"else",
":",
"parts",
".",
"append",
"(",
"layers",
"(",
")",
".",
"SeparableConv2D",
"(",
"filters",
"//",
"abs_sep",
",",
"kernel_size",
",",
"*",
"*",
"kwargs",
")",
"(",
"split",
")",
")",
"if",
"separability",
">",
"1",
":",
"result",
"=",
"layers",
"(",
")",
".",
"Conv2D",
"(",
"filters",
",",
"(",
"1",
",",
"1",
")",
")",
"(",
"tf",
".",
"concat",
"(",
"parts",
",",
"axis",
"=",
"3",
")",
")",
"elif",
"abs_sep",
"==",
"1",
":",
"# If we have just one block, return it.",
"assert",
"len",
"(",
"parts",
")",
"==",
"1",
"result",
"=",
"parts",
"[",
"0",
"]",
"else",
":",
"result",
"=",
"tf",
".",
"concat",
"(",
"parts",
",",
"axis",
"=",
"3",
")",
"else",
":",
"result",
"=",
"layers",
"(",
")",
".",
"SeparableConv2D",
"(",
"filters",
",",
"kernel_size",
",",
"*",
"*",
"kwargs",
")",
"(",
"inputs",
")",
"if",
"separability",
"is",
"not",
"None",
":",
"kwargs",
"[",
"\"separability\"",
"]",
"=",
"separability",
"return",
"result",
"return",
"conv_internal",
"(",
"conv_fn",
",",
"inputs",
",",
"filters",
",",
"kernel_size",
",",
"*",
"*",
"kwargs",
")"
] | 42.027778 | 19.138889 |
def getresponse(self):
"""
Pass-thru method to make this class behave a little like HTTPConnection
"""
resp = self.http.getresponse()
self.log.info("resp is %s", str(resp))
if resp.status < 400:
return resp
else:
errtext = resp.read()
content_type = resp.getheader('Content-Type', 'text/plain')
raise HttpError(code=resp.status, content_type=content_type, content=errtext) | [
"def",
"getresponse",
"(",
"self",
")",
":",
"resp",
"=",
"self",
".",
"http",
".",
"getresponse",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"resp is %s\"",
",",
"str",
"(",
"resp",
")",
")",
"if",
"resp",
".",
"status",
"<",
"400",
":",
"return",
"resp",
"else",
":",
"errtext",
"=",
"resp",
".",
"read",
"(",
")",
"content_type",
"=",
"resp",
".",
"getheader",
"(",
"'Content-Type'",
",",
"'text/plain'",
")",
"raise",
"HttpError",
"(",
"code",
"=",
"resp",
".",
"status",
",",
"content_type",
"=",
"content_type",
",",
"content",
"=",
"errtext",
")"
] | 38.416667 | 17.25 |
def format_api_version(self, api_version):
""" Return QualysGuard API version for api_version specified.
"""
# Convert to int.
if type(api_version) == str:
api_version = api_version.lower()
if api_version[0] == 'v' and api_version[1].isdigit():
# Remove first 'v' in case the user typed 'v1' or 'v2', etc.
api_version = api_version[1:]
# Check for input matching Qualys modules.
if api_version in ('asset management', 'assets', 'tag', 'tagging', 'tags'):
# Convert to Asset Management API.
api_version = 'am'
elif api_version in ('am2'):
# Convert to Asset Management API v2
api_version = 'am2'
elif api_version in ('webapp', 'web application scanning', 'webapp scanning'):
# Convert to WAS API.
api_version = 'was'
elif api_version in ('pol', 'pc'):
# Convert PC module to API number 2.
api_version = 2
else:
api_version = int(api_version)
return api_version | [
"def",
"format_api_version",
"(",
"self",
",",
"api_version",
")",
":",
"# Convert to int.",
"if",
"type",
"(",
"api_version",
")",
"==",
"str",
":",
"api_version",
"=",
"api_version",
".",
"lower",
"(",
")",
"if",
"api_version",
"[",
"0",
"]",
"==",
"'v'",
"and",
"api_version",
"[",
"1",
"]",
".",
"isdigit",
"(",
")",
":",
"# Remove first 'v' in case the user typed 'v1' or 'v2', etc.",
"api_version",
"=",
"api_version",
"[",
"1",
":",
"]",
"# Check for input matching Qualys modules.",
"if",
"api_version",
"in",
"(",
"'asset management'",
",",
"'assets'",
",",
"'tag'",
",",
"'tagging'",
",",
"'tags'",
")",
":",
"# Convert to Asset Management API.",
"api_version",
"=",
"'am'",
"elif",
"api_version",
"in",
"(",
"'am2'",
")",
":",
"# Convert to Asset Management API v2",
"api_version",
"=",
"'am2'",
"elif",
"api_version",
"in",
"(",
"'webapp'",
",",
"'web application scanning'",
",",
"'webapp scanning'",
")",
":",
"# Convert to WAS API.",
"api_version",
"=",
"'was'",
"elif",
"api_version",
"in",
"(",
"'pol'",
",",
"'pc'",
")",
":",
"# Convert PC module to API number 2.",
"api_version",
"=",
"2",
"else",
":",
"api_version",
"=",
"int",
"(",
"api_version",
")",
"return",
"api_version"
] | 44.115385 | 13.653846 |
def base_dict_to_string(base_dict):
"""
Converts a dictionary to a string. {'C': 12, 'A':4} gets converted to C:12;A:4
:param base_dict: Dictionary of bases and counts created by find_if_multibase
:return: String representing that dictionary.
"""
outstr = ''
# First, sort base_dict so that major allele always comes first - makes output report nicer to look at.
base_list = sorted(base_dict.items(), key=lambda kv: kv[1], reverse=True)
for base in base_list:
outstr += '{}:{};'.format(base[0], base[1])
return outstr[:-1] | [
"def",
"base_dict_to_string",
"(",
"base_dict",
")",
":",
"outstr",
"=",
"''",
"# First, sort base_dict so that major allele always comes first - makes output report nicer to look at.",
"base_list",
"=",
"sorted",
"(",
"base_dict",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"kv",
":",
"kv",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"for",
"base",
"in",
"base_list",
":",
"outstr",
"+=",
"'{}:{};'",
".",
"format",
"(",
"base",
"[",
"0",
"]",
",",
"base",
"[",
"1",
"]",
")",
"return",
"outstr",
"[",
":",
"-",
"1",
"]"
] | 46.583333 | 22.416667 |
def add_ptr_records(self, device, records):
"""
Adds one or more PTR records to the specified device.
"""
device_type = self._resolve_device_type(device)
href, svc_name = self._get_ptr_details(device, device_type)
if not isinstance(records, (list, tuple)):
records = [records]
body = {"recordsList": {
"records": records},
"link": {
"content": "",
"href": href,
"rel": svc_name,
}}
uri = "/rdns"
# This is a necessary hack, so here's why: if you attempt to add
# PTR records to device, and you don't have rights to either the device
# or the IP address, the DNS API will return a 401 - Unauthorized.
# Unfortunately, the pyrax client interprets this as a bad auth token,
# and there is no way to distinguish this from an actual authentication
# failure. The client will attempt to re-authenticate as a result, and
# will fail, due to the DNS API not having regional endpoints. The net
# result is that an EndpointNotFound exception will be raised, which
# we catch here and then raise a more meaningful exception.
# The Rackspace DNS team is working on changing this to return a 403
# instead; when that happens this kludge can go away.
try:
resp, resp_body = self._async_call(uri, body=body, method="POST",
error_class=exc.PTRRecordCreationFailed)
except exc.EndpointNotFound:
raise exc.InvalidPTRRecord("The domain/IP address information is not "
"valid for this device.")
return resp_body.get("records")
records = [CloudDNSPTRRecord(rec, device)
for rec in resp_body.get("records", [])]
return records | [
"def",
"add_ptr_records",
"(",
"self",
",",
"device",
",",
"records",
")",
":",
"device_type",
"=",
"self",
".",
"_resolve_device_type",
"(",
"device",
")",
"href",
",",
"svc_name",
"=",
"self",
".",
"_get_ptr_details",
"(",
"device",
",",
"device_type",
")",
"if",
"not",
"isinstance",
"(",
"records",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"records",
"=",
"[",
"records",
"]",
"body",
"=",
"{",
"\"recordsList\"",
":",
"{",
"\"records\"",
":",
"records",
"}",
",",
"\"link\"",
":",
"{",
"\"content\"",
":",
"\"\"",
",",
"\"href\"",
":",
"href",
",",
"\"rel\"",
":",
"svc_name",
",",
"}",
"}",
"uri",
"=",
"\"/rdns\"",
"# This is a necessary hack, so here's why: if you attempt to add",
"# PTR records to device, and you don't have rights to either the device",
"# or the IP address, the DNS API will return a 401 - Unauthorized.",
"# Unfortunately, the pyrax client interprets this as a bad auth token,",
"# and there is no way to distinguish this from an actual authentication",
"# failure. The client will attempt to re-authenticate as a result, and",
"# will fail, due to the DNS API not having regional endpoints. The net",
"# result is that an EndpointNotFound exception will be raised, which",
"# we catch here and then raise a more meaningful exception.",
"# The Rackspace DNS team is working on changing this to return a 403",
"# instead; when that happens this kludge can go away.",
"try",
":",
"resp",
",",
"resp_body",
"=",
"self",
".",
"_async_call",
"(",
"uri",
",",
"body",
"=",
"body",
",",
"method",
"=",
"\"POST\"",
",",
"error_class",
"=",
"exc",
".",
"PTRRecordCreationFailed",
")",
"except",
"exc",
".",
"EndpointNotFound",
":",
"raise",
"exc",
".",
"InvalidPTRRecord",
"(",
"\"The domain/IP address information is not \"",
"\"valid for this device.\"",
")",
"return",
"resp_body",
".",
"get",
"(",
"\"records\"",
")",
"records",
"=",
"[",
"CloudDNSPTRRecord",
"(",
"rec",
",",
"device",
")",
"for",
"rec",
"in",
"resp_body",
".",
"get",
"(",
"\"records\"",
",",
"[",
"]",
")",
"]",
"return",
"records"
] | 50.378378 | 19.567568 |
def get_chain_sequence_string(self, chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = True):
'''Similar to get_annotated_chain_sequence_string except that we only return the Sequence and do not state which sequence it was.'''
chain_pair = self.get_annotated_chain_sequence_string(chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = raise_Exception_if_not_found)
if chain_pair:
return chain_pair[1]
return None | [
"def",
"get_chain_sequence_string",
"(",
"self",
",",
"chain_id",
",",
"use_seqres_sequences_if_possible",
",",
"raise_Exception_if_not_found",
"=",
"True",
")",
":",
"chain_pair",
"=",
"self",
".",
"get_annotated_chain_sequence_string",
"(",
"chain_id",
",",
"use_seqres_sequences_if_possible",
",",
"raise_Exception_if_not_found",
"=",
"raise_Exception_if_not_found",
")",
"if",
"chain_pair",
":",
"return",
"chain_pair",
"[",
"1",
"]",
"return",
"None"
] | 82.666667 | 58.333333 |
def frame_from_firmware(self, firmware):
'''extract information from firmware, return pretty string to user'''
# see Tools/scripts/generate-manifest for this map:
frame_to_mavlink_dict = {
"quad": "QUADROTOR",
"hexa": "HEXAROTOR",
"y6": "ARDUPILOT_Y6",
"tri": "TRICOPTER",
"octa": "OCTOROTOR",
"octa-quad": "ARDUPILOT_OCTAQUAD",
"heli": "HELICOPTER",
"Plane": "FIXED_WING",
"Tracker": "ANTENNA_TRACKER",
"Rover": "GROUND_ROVER",
"PX4IO": "ARDUPILOT_PX4IO",
}
mavlink_to_frame_dict = { v : k for k,v in frame_to_mavlink_dict.items() }
x = firmware["mav-type"]
if firmware["mav-autopilot"] != "ARDUPILOTMEGA":
return x
if x in mavlink_to_frame_dict:
return mavlink_to_frame_dict[x]
return x | [
"def",
"frame_from_firmware",
"(",
"self",
",",
"firmware",
")",
":",
"# see Tools/scripts/generate-manifest for this map:",
"frame_to_mavlink_dict",
"=",
"{",
"\"quad\"",
":",
"\"QUADROTOR\"",
",",
"\"hexa\"",
":",
"\"HEXAROTOR\"",
",",
"\"y6\"",
":",
"\"ARDUPILOT_Y6\"",
",",
"\"tri\"",
":",
"\"TRICOPTER\"",
",",
"\"octa\"",
":",
"\"OCTOROTOR\"",
",",
"\"octa-quad\"",
":",
"\"ARDUPILOT_OCTAQUAD\"",
",",
"\"heli\"",
":",
"\"HELICOPTER\"",
",",
"\"Plane\"",
":",
"\"FIXED_WING\"",
",",
"\"Tracker\"",
":",
"\"ANTENNA_TRACKER\"",
",",
"\"Rover\"",
":",
"\"GROUND_ROVER\"",
",",
"\"PX4IO\"",
":",
"\"ARDUPILOT_PX4IO\"",
",",
"}",
"mavlink_to_frame_dict",
"=",
"{",
"v",
":",
"k",
"for",
"k",
",",
"v",
"in",
"frame_to_mavlink_dict",
".",
"items",
"(",
")",
"}",
"x",
"=",
"firmware",
"[",
"\"mav-type\"",
"]",
"if",
"firmware",
"[",
"\"mav-autopilot\"",
"]",
"!=",
"\"ARDUPILOTMEGA\"",
":",
"return",
"x",
"if",
"x",
"in",
"mavlink_to_frame_dict",
":",
"return",
"mavlink_to_frame_dict",
"[",
"x",
"]",
"return",
"x"
] | 37.291667 | 13.125 |
def send_request(self, method, url, headers=None,
json_data=None, retry=True):
"""Send requests to Skybell."""
if not self.cache(CONST.ACCESS_TOKEN) and url != CONST.LOGIN_URL:
self.login()
if not headers:
headers = {}
if self.cache(CONST.ACCESS_TOKEN):
headers['Authorization'] = 'Bearer ' + \
self.cache(CONST.ACCESS_TOKEN)
headers['user-agent'] = (
'SkyBell/3.4.1 (iPhone9,2; iOS 11.0; loc=en_US; lang=en-US) '
'com.skybell.doorbell/1')
headers['content-type'] = 'application/json'
headers['accepts'] = '*/*'
headers['x-skybell-app-id'] = self.cache(CONST.APP_ID)
headers['x-skybell-client-id'] = self.cache(CONST.CLIENT_ID)
_LOGGER.debug("HTTP %s %s Request with headers: %s",
method, url, headers)
try:
response = getattr(self._session, method)(
url, headers=headers, json=json_data)
_LOGGER.debug("%s %s", response, response.text)
if response and response.status_code < 400:
return response
except RequestException as exc:
_LOGGER.warning("Skybell request exception: %s", exc)
if retry:
self.login()
return self.send_request(method, url, headers, json_data, False)
raise SkybellException(ERROR.REQUEST, "Retry failed") | [
"def",
"send_request",
"(",
"self",
",",
"method",
",",
"url",
",",
"headers",
"=",
"None",
",",
"json_data",
"=",
"None",
",",
"retry",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"cache",
"(",
"CONST",
".",
"ACCESS_TOKEN",
")",
"and",
"url",
"!=",
"CONST",
".",
"LOGIN_URL",
":",
"self",
".",
"login",
"(",
")",
"if",
"not",
"headers",
":",
"headers",
"=",
"{",
"}",
"if",
"self",
".",
"cache",
"(",
"CONST",
".",
"ACCESS_TOKEN",
")",
":",
"headers",
"[",
"'Authorization'",
"]",
"=",
"'Bearer '",
"+",
"self",
".",
"cache",
"(",
"CONST",
".",
"ACCESS_TOKEN",
")",
"headers",
"[",
"'user-agent'",
"]",
"=",
"(",
"'SkyBell/3.4.1 (iPhone9,2; iOS 11.0; loc=en_US; lang=en-US) '",
"'com.skybell.doorbell/1'",
")",
"headers",
"[",
"'content-type'",
"]",
"=",
"'application/json'",
"headers",
"[",
"'accepts'",
"]",
"=",
"'*/*'",
"headers",
"[",
"'x-skybell-app-id'",
"]",
"=",
"self",
".",
"cache",
"(",
"CONST",
".",
"APP_ID",
")",
"headers",
"[",
"'x-skybell-client-id'",
"]",
"=",
"self",
".",
"cache",
"(",
"CONST",
".",
"CLIENT_ID",
")",
"_LOGGER",
".",
"debug",
"(",
"\"HTTP %s %s Request with headers: %s\"",
",",
"method",
",",
"url",
",",
"headers",
")",
"try",
":",
"response",
"=",
"getattr",
"(",
"self",
".",
"_session",
",",
"method",
")",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"json",
"=",
"json_data",
")",
"_LOGGER",
".",
"debug",
"(",
"\"%s %s\"",
",",
"response",
",",
"response",
".",
"text",
")",
"if",
"response",
"and",
"response",
".",
"status_code",
"<",
"400",
":",
"return",
"response",
"except",
"RequestException",
"as",
"exc",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Skybell request exception: %s\"",
",",
"exc",
")",
"if",
"retry",
":",
"self",
".",
"login",
"(",
")",
"return",
"self",
".",
"send_request",
"(",
"method",
",",
"url",
",",
"headers",
",",
"json_data",
",",
"False",
")",
"raise",
"SkybellException",
"(",
"ERROR",
".",
"REQUEST",
",",
"\"Retry failed\"",
")"
] | 35.725 | 20.85 |
def bullseye_position(self) -> typing.Tuple[float, float]:
"""
Returns: bullseye position
"""
return self.bullseye_x, self.bullseye_y | [
"def",
"bullseye_position",
"(",
"self",
")",
"->",
"typing",
".",
"Tuple",
"[",
"float",
",",
"float",
"]",
":",
"return",
"self",
".",
"bullseye_x",
",",
"self",
".",
"bullseye_y"
] | 32.2 | 6.2 |
def generate_add_sub(self):
''' Generates prefixes/suffixes in a short form to parse and remove some redundancy '''
# Prefix or Suffix
affix_type = 'p:' if self.opt == "PFX" else 's:'
remove_char = '-' + self.char_to_strip if self.char_to_strip != '' else ''
return affix_type + remove_char + '+' + self.affix | [
"def",
"generate_add_sub",
"(",
"self",
")",
":",
"# Prefix or Suffix",
"affix_type",
"=",
"'p:'",
"if",
"self",
".",
"opt",
"==",
"\"PFX\"",
"else",
"'s:'",
"remove_char",
"=",
"'-'",
"+",
"self",
".",
"char_to_strip",
"if",
"self",
".",
"char_to_strip",
"!=",
"''",
"else",
"''",
"return",
"affix_type",
"+",
"remove_char",
"+",
"'+'",
"+",
"self",
".",
"affix"
] | 49.142857 | 28.285714 |
def add_holiday(self, start_dt, holiday_name, end_dt = None, calendar_id = 'notices'):
'''Adds a holiday event to the calendar. start_dt and end_dt (if supplied) should be date objects. Returns True if the event was added.'''
assert(calendar_id in self.configured_calendar_ids.keys())
calendarId = self.configured_calendar_ids[calendar_id]
# Note: end_date is one day ahead e.g. for the New Years' holiday Dec 31-Jan 1st, we specify the end_date as Jan 2nd. This is what the calendar expects.
if not end_dt:
end_dt = start_dt
start_date = date(year=start_dt.year, month=start_dt.month, day=start_dt.day)#, tzinfo=self.timezone)
end_date = date(year=end_dt.year, month=end_dt.month, day=end_dt.day) + timedelta(days = 1) #, tzinfo=self.timezone)
start_time = datetime(year=start_dt.year, month=start_dt.month, day=start_dt.day, hour=0, minute=0, second=0, tzinfo=self.timezone) + timedelta(days = -1)
end_time = datetime(year=end_dt.year, month=end_dt.month, day=end_dt.day, hour=23, minute=59, second=59, tzinfo=self.timezone) + timedelta(days = 2)
# Do not add the quarter multiple times
events = self.get_events((start_time + timedelta(days = -1)).isoformat(), (end_time + timedelta(days = 1)).isoformat(), ignore_cancelled = True)
for event in events:
if event.summary.find(holiday_name) != -1:
return False
event_body = {
'summary' : holiday_name,
'description' : holiday_name,
'start' : {'date' : start_date.isoformat(), 'timeZone' : self.timezone_string},
'end' : {'date' : end_date.isoformat(), 'timeZone' : self.timezone_string},
'status' : 'confirmed',
'extendedProperties' : {
'shared' : {
'event_type' : 'Holiday'
}
}
}
if abs((end_date - start_date).days) > 7:
raise Exception('The range of dates from {0} to {1} is greater than expected. Please check to make sure that the dates are correct.'.format(start_date, end_date))
elif end_date < start_date:
raise Exception('Error: The end date {1} occurs before the start date ({0}).'.format(start_date, end_date))
created_event = self.service.events().insert(calendarId = self.configured_calendar_ids[calendar_id], body = event_body).execute()
return True | [
"def",
"add_holiday",
"(",
"self",
",",
"start_dt",
",",
"holiday_name",
",",
"end_dt",
"=",
"None",
",",
"calendar_id",
"=",
"'notices'",
")",
":",
"assert",
"(",
"calendar_id",
"in",
"self",
".",
"configured_calendar_ids",
".",
"keys",
"(",
")",
")",
"calendarId",
"=",
"self",
".",
"configured_calendar_ids",
"[",
"calendar_id",
"]",
"# Note: end_date is one day ahead e.g. for the New Years' holiday Dec 31-Jan 1st, we specify the end_date as Jan 2nd. This is what the calendar expects.",
"if",
"not",
"end_dt",
":",
"end_dt",
"=",
"start_dt",
"start_date",
"=",
"date",
"(",
"year",
"=",
"start_dt",
".",
"year",
",",
"month",
"=",
"start_dt",
".",
"month",
",",
"day",
"=",
"start_dt",
".",
"day",
")",
"#, tzinfo=self.timezone)",
"end_date",
"=",
"date",
"(",
"year",
"=",
"end_dt",
".",
"year",
",",
"month",
"=",
"end_dt",
".",
"month",
",",
"day",
"=",
"end_dt",
".",
"day",
")",
"+",
"timedelta",
"(",
"days",
"=",
"1",
")",
"#, tzinfo=self.timezone)",
"start_time",
"=",
"datetime",
"(",
"year",
"=",
"start_dt",
".",
"year",
",",
"month",
"=",
"start_dt",
".",
"month",
",",
"day",
"=",
"start_dt",
".",
"day",
",",
"hour",
"=",
"0",
",",
"minute",
"=",
"0",
",",
"second",
"=",
"0",
",",
"tzinfo",
"=",
"self",
".",
"timezone",
")",
"+",
"timedelta",
"(",
"days",
"=",
"-",
"1",
")",
"end_time",
"=",
"datetime",
"(",
"year",
"=",
"end_dt",
".",
"year",
",",
"month",
"=",
"end_dt",
".",
"month",
",",
"day",
"=",
"end_dt",
".",
"day",
",",
"hour",
"=",
"23",
",",
"minute",
"=",
"59",
",",
"second",
"=",
"59",
",",
"tzinfo",
"=",
"self",
".",
"timezone",
")",
"+",
"timedelta",
"(",
"days",
"=",
"2",
")",
"# Do not add the quarter multiple times",
"events",
"=",
"self",
".",
"get_events",
"(",
"(",
"start_time",
"+",
"timedelta",
"(",
"days",
"=",
"-",
"1",
")",
")",
".",
"isoformat",
"(",
")",
",",
"(",
"end_time",
"+",
"timedelta",
"(",
"days",
"=",
"1",
")",
")",
".",
"isoformat",
"(",
")",
",",
"ignore_cancelled",
"=",
"True",
")",
"for",
"event",
"in",
"events",
":",
"if",
"event",
".",
"summary",
".",
"find",
"(",
"holiday_name",
")",
"!=",
"-",
"1",
":",
"return",
"False",
"event_body",
"=",
"{",
"'summary'",
":",
"holiday_name",
",",
"'description'",
":",
"holiday_name",
",",
"'start'",
":",
"{",
"'date'",
":",
"start_date",
".",
"isoformat",
"(",
")",
",",
"'timeZone'",
":",
"self",
".",
"timezone_string",
"}",
",",
"'end'",
":",
"{",
"'date'",
":",
"end_date",
".",
"isoformat",
"(",
")",
",",
"'timeZone'",
":",
"self",
".",
"timezone_string",
"}",
",",
"'status'",
":",
"'confirmed'",
",",
"'extendedProperties'",
":",
"{",
"'shared'",
":",
"{",
"'event_type'",
":",
"'Holiday'",
"}",
"}",
"}",
"if",
"abs",
"(",
"(",
"end_date",
"-",
"start_date",
")",
".",
"days",
")",
">",
"7",
":",
"raise",
"Exception",
"(",
"'The range of dates from {0} to {1} is greater than expected. Please check to make sure that the dates are correct.'",
".",
"format",
"(",
"start_date",
",",
"end_date",
")",
")",
"elif",
"end_date",
"<",
"start_date",
":",
"raise",
"Exception",
"(",
"'Error: The end date {1} occurs before the start date ({0}).'",
".",
"format",
"(",
"start_date",
",",
"end_date",
")",
")",
"created_event",
"=",
"self",
".",
"service",
".",
"events",
"(",
")",
".",
"insert",
"(",
"calendarId",
"=",
"self",
".",
"configured_calendar_ids",
"[",
"calendar_id",
"]",
",",
"body",
"=",
"event_body",
")",
".",
"execute",
"(",
")",
"return",
"True"
] | 62.153846 | 42.769231 |
def localize_date(date, city):
""" Localize date into city
Date: datetime
City: timezone city definitio. Example: 'Asia/Qatar', 'America/New York'..
"""
local = pytz.timezone(city)
local_dt = local.localize(date, is_dst=None)
return local_dt | [
"def",
"localize_date",
"(",
"date",
",",
"city",
")",
":",
"local",
"=",
"pytz",
".",
"timezone",
"(",
"city",
")",
"local_dt",
"=",
"local",
".",
"localize",
"(",
"date",
",",
"is_dst",
"=",
"None",
")",
"return",
"local_dt"
] | 29.111111 | 16.444444 |
def _bracket_complete_sig(self, symbol, fullsymbol):
"""Returns the call signature and docstring for the executable
immediately preceding a bracket '(' that was typed."""
if symbol != fullsymbol:
#We have a sym%sym%... chain and the completion just needs to
#be the signature of the member method.
target, targmod = self._get_chain_parent_symbol(symbol, fullsymbol)
if symbol in target.executables:
child = target.executables[symbol]
return self._compile_signature(child.target, child.name)
elif symbol in target.members:
#We are dealing with a dimension request on an array that
#is a member of the type.
child = target.members[symbol]
return self._bracket_dim_suggest(child)
else:
return {}
else:
#We must be dealing with a regular executable or builtin fxn
#or a regular variable dimension.
iexec = self._bracket_exact_exec(symbol)
if iexec is not None:
#It is indeed a function we are completing for.
return self._compile_signature(iexec, iexec.name)
else:
#We need to look at local and global variables to find the
#variable declaration and dimensionality.
ivar = self._bracket_exact_var(symbol)
return self._bracket_dim_suggest(ivar) | [
"def",
"_bracket_complete_sig",
"(",
"self",
",",
"symbol",
",",
"fullsymbol",
")",
":",
"if",
"symbol",
"!=",
"fullsymbol",
":",
"#We have a sym%sym%... chain and the completion just needs to",
"#be the signature of the member method.",
"target",
",",
"targmod",
"=",
"self",
".",
"_get_chain_parent_symbol",
"(",
"symbol",
",",
"fullsymbol",
")",
"if",
"symbol",
"in",
"target",
".",
"executables",
":",
"child",
"=",
"target",
".",
"executables",
"[",
"symbol",
"]",
"return",
"self",
".",
"_compile_signature",
"(",
"child",
".",
"target",
",",
"child",
".",
"name",
")",
"elif",
"symbol",
"in",
"target",
".",
"members",
":",
"#We are dealing with a dimension request on an array that",
"#is a member of the type.",
"child",
"=",
"target",
".",
"members",
"[",
"symbol",
"]",
"return",
"self",
".",
"_bracket_dim_suggest",
"(",
"child",
")",
"else",
":",
"return",
"{",
"}",
"else",
":",
"#We must be dealing with a regular executable or builtin fxn",
"#or a regular variable dimension.",
"iexec",
"=",
"self",
".",
"_bracket_exact_exec",
"(",
"symbol",
")",
"if",
"iexec",
"is",
"not",
"None",
":",
"#It is indeed a function we are completing for.",
"return",
"self",
".",
"_compile_signature",
"(",
"iexec",
",",
"iexec",
".",
"name",
")",
"else",
":",
"#We need to look at local and global variables to find the",
"#variable declaration and dimensionality.",
"ivar",
"=",
"self",
".",
"_bracket_exact_var",
"(",
"symbol",
")",
"return",
"self",
".",
"_bracket_dim_suggest",
"(",
"ivar",
")"
] | 49.966667 | 16.7 |
def _msgmerge(po_path, pot_file, backup):
"""Merge an existing .po file with new translations.
:arg po_path: path to the .po file
:arg pot_file: a file-like object for the related templates
:arg backup: whether or not to create backup .po files
"""
pot_file.seek(0)
command = [
'msgmerge',
'--update',
'--width=200',
'--backup=%s' % ('simple' if backup else 'off'),
po_path,
'-'
]
p3 = Popen(command, stdin=pot_file)
p3.communicate() | [
"def",
"_msgmerge",
"(",
"po_path",
",",
"pot_file",
",",
"backup",
")",
":",
"pot_file",
".",
"seek",
"(",
"0",
")",
"command",
"=",
"[",
"'msgmerge'",
",",
"'--update'",
",",
"'--width=200'",
",",
"'--backup=%s'",
"%",
"(",
"'simple'",
"if",
"backup",
"else",
"'off'",
")",
",",
"po_path",
",",
"'-'",
"]",
"p3",
"=",
"Popen",
"(",
"command",
",",
"stdin",
"=",
"pot_file",
")",
"p3",
".",
"communicate",
"(",
")"
] | 28.055556 | 17.444444 |
def _collect(self, lines):
""" This routine reads the following from the Siesta file:
- atomic positions
- cell_parameters
- atomic_species
"""
for tag,value,unit in re.findall(
'([\.A-Za-z]+)\s+%s\s+([A-Za-z]+)?' %
self._num_regex,lines):
tag = tag.lower()
unit = unit.lower()
if tag == "latticeconstant":
self._tags['latticeconstantunit'] = unit.capitalize()
if unit == 'ang':
self._tags[tag] = float(value) / Bohr
elif unit == 'bohr':
self._tags[tag] = float(value)
else:
raise ValueError('Unknown LatticeConstant unit: {}'.format(unit))
for tag,value in re.findall('([\.A-Za-z]+)[ \t]+([a-zA-Z]+)',lines):
tag = tag.replace('_','').lower()
if tag == "atomiccoordinatesformat":
self._tags[tag] = value.strip().lower()
#check if the necessary tags are present
self.check_present('atomiccoordinatesformat')
acell = self._tags['latticeconstant']
#capture the blocks
blocks = re.findall(
'%block\s+([A-Za-z_]+)\s((?:.+\n)+?(?=(?:\s+)?%endblock))',
lines, re.MULTILINE)
for tag,block in blocks:
tag = tag.replace('_','').lower()
if tag == "chemicalspecieslabel":
lines = block.split('\n')[:-1]
self._tags["atomicnumbers"] = dict([map(int,species.split()[:2])
for species in lines])
self._tags[tag] = dict(
[(lambda x: (x[2],int(x[0])))(species.split())
for species in lines])
elif tag == "latticevectors":
self._tags[tag] = [[ float(v)*acell for v in vector.split()]
for vector in block.split('\n')[:3]]
elif tag == "atomiccoordinatesandatomicspecies":
lines = block.split('\n')[:-1]
self._tags["atomiccoordinates"] = [
[float(x) for x in atom.split()[:3]] for atom in lines]
self._tags["atomicspecies"] = [int(atom.split()[3])
for atom in lines]
#check if the block are present
self.check_present("atomicspecies")
self.check_present("atomiccoordinates")
self.check_present("latticevectors")
self.check_present("chemicalspecieslabel")
#translate the atomicspecies to atomic numbers
self._tags["atomicnumbers"] = [self._tags["atomicnumbers"][atype]
for atype in self._tags["atomicspecies"]] | [
"def",
"_collect",
"(",
"self",
",",
"lines",
")",
":",
"for",
"tag",
",",
"value",
",",
"unit",
"in",
"re",
".",
"findall",
"(",
"'([\\.A-Za-z]+)\\s+%s\\s+([A-Za-z]+)?'",
"%",
"self",
".",
"_num_regex",
",",
"lines",
")",
":",
"tag",
"=",
"tag",
".",
"lower",
"(",
")",
"unit",
"=",
"unit",
".",
"lower",
"(",
")",
"if",
"tag",
"==",
"\"latticeconstant\"",
":",
"self",
".",
"_tags",
"[",
"'latticeconstantunit'",
"]",
"=",
"unit",
".",
"capitalize",
"(",
")",
"if",
"unit",
"==",
"'ang'",
":",
"self",
".",
"_tags",
"[",
"tag",
"]",
"=",
"float",
"(",
"value",
")",
"/",
"Bohr",
"elif",
"unit",
"==",
"'bohr'",
":",
"self",
".",
"_tags",
"[",
"tag",
"]",
"=",
"float",
"(",
"value",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown LatticeConstant unit: {}'",
".",
"format",
"(",
"unit",
")",
")",
"for",
"tag",
",",
"value",
"in",
"re",
".",
"findall",
"(",
"'([\\.A-Za-z]+)[ \\t]+([a-zA-Z]+)'",
",",
"lines",
")",
":",
"tag",
"=",
"tag",
".",
"replace",
"(",
"'_'",
",",
"''",
")",
".",
"lower",
"(",
")",
"if",
"tag",
"==",
"\"atomiccoordinatesformat\"",
":",
"self",
".",
"_tags",
"[",
"tag",
"]",
"=",
"value",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"#check if the necessary tags are present",
"self",
".",
"check_present",
"(",
"'atomiccoordinatesformat'",
")",
"acell",
"=",
"self",
".",
"_tags",
"[",
"'latticeconstant'",
"]",
"#capture the blocks",
"blocks",
"=",
"re",
".",
"findall",
"(",
"'%block\\s+([A-Za-z_]+)\\s((?:.+\\n)+?(?=(?:\\s+)?%endblock))'",
",",
"lines",
",",
"re",
".",
"MULTILINE",
")",
"for",
"tag",
",",
"block",
"in",
"blocks",
":",
"tag",
"=",
"tag",
".",
"replace",
"(",
"'_'",
",",
"''",
")",
".",
"lower",
"(",
")",
"if",
"tag",
"==",
"\"chemicalspecieslabel\"",
":",
"lines",
"=",
"block",
".",
"split",
"(",
"'\\n'",
")",
"[",
":",
"-",
"1",
"]",
"self",
".",
"_tags",
"[",
"\"atomicnumbers\"",
"]",
"=",
"dict",
"(",
"[",
"map",
"(",
"int",
",",
"species",
".",
"split",
"(",
")",
"[",
":",
"2",
"]",
")",
"for",
"species",
"in",
"lines",
"]",
")",
"self",
".",
"_tags",
"[",
"tag",
"]",
"=",
"dict",
"(",
"[",
"(",
"lambda",
"x",
":",
"(",
"x",
"[",
"2",
"]",
",",
"int",
"(",
"x",
"[",
"0",
"]",
")",
")",
")",
"(",
"species",
".",
"split",
"(",
")",
")",
"for",
"species",
"in",
"lines",
"]",
")",
"elif",
"tag",
"==",
"\"latticevectors\"",
":",
"self",
".",
"_tags",
"[",
"tag",
"]",
"=",
"[",
"[",
"float",
"(",
"v",
")",
"*",
"acell",
"for",
"v",
"in",
"vector",
".",
"split",
"(",
")",
"]",
"for",
"vector",
"in",
"block",
".",
"split",
"(",
"'\\n'",
")",
"[",
":",
"3",
"]",
"]",
"elif",
"tag",
"==",
"\"atomiccoordinatesandatomicspecies\"",
":",
"lines",
"=",
"block",
".",
"split",
"(",
"'\\n'",
")",
"[",
":",
"-",
"1",
"]",
"self",
".",
"_tags",
"[",
"\"atomiccoordinates\"",
"]",
"=",
"[",
"[",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"atom",
".",
"split",
"(",
")",
"[",
":",
"3",
"]",
"]",
"for",
"atom",
"in",
"lines",
"]",
"self",
".",
"_tags",
"[",
"\"atomicspecies\"",
"]",
"=",
"[",
"int",
"(",
"atom",
".",
"split",
"(",
")",
"[",
"3",
"]",
")",
"for",
"atom",
"in",
"lines",
"]",
"#check if the block are present",
"self",
".",
"check_present",
"(",
"\"atomicspecies\"",
")",
"self",
".",
"check_present",
"(",
"\"atomiccoordinates\"",
")",
"self",
".",
"check_present",
"(",
"\"latticevectors\"",
")",
"self",
".",
"check_present",
"(",
"\"chemicalspecieslabel\"",
")",
"#translate the atomicspecies to atomic numbers",
"self",
".",
"_tags",
"[",
"\"atomicnumbers\"",
"]",
"=",
"[",
"self",
".",
"_tags",
"[",
"\"atomicnumbers\"",
"]",
"[",
"atype",
"]",
"for",
"atype",
"in",
"self",
".",
"_tags",
"[",
"\"atomicspecies\"",
"]",
"]"
] | 45.52459 | 16.131148 |
def marketShortInterestDF(date=None, token='', version=''):
'''The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report.
The report data will be published daily at 4:00pm ET.
https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev
Args:
date (datetime); Effective Datetime
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
df = pd.DataFrame(marketShortInterest(date, token, version))
_toDatetime(df)
return df | [
"def",
"marketShortInterestDF",
"(",
"date",
"=",
"None",
",",
"token",
"=",
"''",
",",
"version",
"=",
"''",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"marketShortInterest",
"(",
"date",
",",
"token",
",",
"version",
")",
")",
"_toDatetime",
"(",
"df",
")",
"return",
"df"
] | 32.166667 | 27.944444 |
def create_healthcheck(ip_addr=None, fqdn=None, region=None, key=None, keyid=None, profile=None,
port=53, hc_type='TCP', resource_path='', string_match=None, request_interval=30,
failure_threshold=3, retry_on_errors=True, error_retries=5):
'''
Create a Route53 healthcheck
.. versionadded:: 2018.3.0
ip_addr
IP address to check. ip_addr or fqdn is required.
fqdn
Domain name of the endpoint to check. ip_addr or fqdn is required
port
Port to check
hc_type
Healthcheck type. HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP
resource_path
Path to check
string_match
If hc_type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string to search for in the
response body from the specified resource
request_interval
The number of seconds between the time that Amazon Route 53 gets a response from
your endpoint and the time that it sends the next health-check request.
failure_threshold
The number of consecutive health checks that an endpoint must pass or fail for
Amazon Route 53 to change the current status of the endpoint from unhealthy to
healthy or vice versa.
region
Region endpoint to connect to
key
AWS key
keyid
AWS keyid
profile
AWS pillar profile
CLI Example::
salt myminion boto_route53.create_healthcheck 192.168.0.1
salt myminion boto_route53.create_healthcheck 192.168.0.1 port=443 hc_type=HTTPS \
resource_path=/ fqdn=blog.saltstack.furniture
'''
if fqdn is None and ip_addr is None:
msg = 'One of the following must be specified: fqdn or ip_addr'
log.error(msg)
return {'error': msg}
hc_ = boto.route53.healthcheck.HealthCheck(ip_addr,
port,
hc_type,
resource_path,
fqdn=fqdn,
string_match=string_match,
request_interval=request_interval,
failure_threshold=failure_threshold)
if region is None:
region = 'universal'
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
while error_retries > 0:
try:
return {'result': conn.create_health_check(hc_)}
except DNSServerError as exc:
log.debug(exc)
if retry_on_errors:
if 'Throttling' == exc.code:
log.debug('Throttled by AWS API.')
elif 'PriorRequestNotComplete' == exc.code:
log.debug('The request was rejected by AWS API.\
Route 53 was still processing a prior request')
time.sleep(3)
error_retries -= 1
continue
return {'error': __utils__['boto.get_error'](exc)}
return False | [
"def",
"create_healthcheck",
"(",
"ip_addr",
"=",
"None",
",",
"fqdn",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"port",
"=",
"53",
",",
"hc_type",
"=",
"'TCP'",
",",
"resource_path",
"=",
"''",
",",
"string_match",
"=",
"None",
",",
"request_interval",
"=",
"30",
",",
"failure_threshold",
"=",
"3",
",",
"retry_on_errors",
"=",
"True",
",",
"error_retries",
"=",
"5",
")",
":",
"if",
"fqdn",
"is",
"None",
"and",
"ip_addr",
"is",
"None",
":",
"msg",
"=",
"'One of the following must be specified: fqdn or ip_addr'",
"log",
".",
"error",
"(",
"msg",
")",
"return",
"{",
"'error'",
":",
"msg",
"}",
"hc_",
"=",
"boto",
".",
"route53",
".",
"healthcheck",
".",
"HealthCheck",
"(",
"ip_addr",
",",
"port",
",",
"hc_type",
",",
"resource_path",
",",
"fqdn",
"=",
"fqdn",
",",
"string_match",
"=",
"string_match",
",",
"request_interval",
"=",
"request_interval",
",",
"failure_threshold",
"=",
"failure_threshold",
")",
"if",
"region",
"is",
"None",
":",
"region",
"=",
"'universal'",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"while",
"error_retries",
">",
"0",
":",
"try",
":",
"return",
"{",
"'result'",
":",
"conn",
".",
"create_health_check",
"(",
"hc_",
")",
"}",
"except",
"DNSServerError",
"as",
"exc",
":",
"log",
".",
"debug",
"(",
"exc",
")",
"if",
"retry_on_errors",
":",
"if",
"'Throttling'",
"==",
"exc",
".",
"code",
":",
"log",
".",
"debug",
"(",
"'Throttled by AWS API.'",
")",
"elif",
"'PriorRequestNotComplete'",
"==",
"exc",
".",
"code",
":",
"log",
".",
"debug",
"(",
"'The request was rejected by AWS API.\\\n Route 53 was still processing a prior request'",
")",
"time",
".",
"sleep",
"(",
"3",
")",
"error_retries",
"-=",
"1",
"continue",
"return",
"{",
"'error'",
":",
"__utils__",
"[",
"'boto.get_error'",
"]",
"(",
"exc",
")",
"}",
"return",
"False"
] | 30.96 | 29.62 |
def inputAnalysis(allSequences, model, numColumns):
"""
Calculates the overlap score of each SDR used as input to the temporal memory. Generates
an overlap matrix with entries (i,j) = overlapScore(i, j)
@param allSequences (array) sequences using during the experiment
@param model (string) string specifying whether the experiment used random or
periodic data
@param numColumns (int) number of columns in the temporal memory
@return overlapMatrix (array) matrix whose entries (i, j) contain the overlap score
between SDRs i and j
"""
records = np.shape(allSequences)[0]
symbols = np.shape(allSequences)[1]
totalItems = records * symbols
overlapMatrix = np.zeros((totalItems, totalItems))
for i in range(totalItems):
if i % 500 == 0:
print str(i) + " rows processed"
for j in range(totalItems):
if model == "random":
overlapMatrix[i, j] = percentOverlap(allSequences[int(i/symbols)][i%symbols],
allSequences[int(j/symbols)][j%symbols],
numColumns)
elif model == "periodic":
overlapMatrix[i, j] = percentOverlap(allSequences[i][0], allSequences[j][0],
numColumns)
print "***All rows processed!***"
# substract diagonal from correlation matrix
overlapMatrix = np.subtract(overlapMatrix, np.identity(totalItems))
return overlapMatrix | [
"def",
"inputAnalysis",
"(",
"allSequences",
",",
"model",
",",
"numColumns",
")",
":",
"records",
"=",
"np",
".",
"shape",
"(",
"allSequences",
")",
"[",
"0",
"]",
"symbols",
"=",
"np",
".",
"shape",
"(",
"allSequences",
")",
"[",
"1",
"]",
"totalItems",
"=",
"records",
"*",
"symbols",
"overlapMatrix",
"=",
"np",
".",
"zeros",
"(",
"(",
"totalItems",
",",
"totalItems",
")",
")",
"for",
"i",
"in",
"range",
"(",
"totalItems",
")",
":",
"if",
"i",
"%",
"500",
"==",
"0",
":",
"print",
"str",
"(",
"i",
")",
"+",
"\" rows processed\"",
"for",
"j",
"in",
"range",
"(",
"totalItems",
")",
":",
"if",
"model",
"==",
"\"random\"",
":",
"overlapMatrix",
"[",
"i",
",",
"j",
"]",
"=",
"percentOverlap",
"(",
"allSequences",
"[",
"int",
"(",
"i",
"/",
"symbols",
")",
"]",
"[",
"i",
"%",
"symbols",
"]",
",",
"allSequences",
"[",
"int",
"(",
"j",
"/",
"symbols",
")",
"]",
"[",
"j",
"%",
"symbols",
"]",
",",
"numColumns",
")",
"elif",
"model",
"==",
"\"periodic\"",
":",
"overlapMatrix",
"[",
"i",
",",
"j",
"]",
"=",
"percentOverlap",
"(",
"allSequences",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"allSequences",
"[",
"j",
"]",
"[",
"0",
"]",
",",
"numColumns",
")",
"print",
"\"***All rows processed!***\"",
"# substract diagonal from correlation matrix",
"overlapMatrix",
"=",
"np",
".",
"subtract",
"(",
"overlapMatrix",
",",
"np",
".",
"identity",
"(",
"totalItems",
")",
")",
"return",
"overlapMatrix"
] | 41.371429 | 21.371429 |
def _next_move_direction(self):
"""
pick a move at random from the list of moves
"""
nmoves = len(self.moves)
move = np.random.randint(1, nmoves+1)
while self.prev_move == (move + 3) % nmoves:
move = np.random.randint(1, nmoves+1)
self.prev_move = move
return np.array(self.moves[move]) | [
"def",
"_next_move_direction",
"(",
"self",
")",
":",
"nmoves",
"=",
"len",
"(",
"self",
".",
"moves",
")",
"move",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"1",
",",
"nmoves",
"+",
"1",
")",
"while",
"self",
".",
"prev_move",
"==",
"(",
"move",
"+",
"3",
")",
"%",
"nmoves",
":",
"move",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"1",
",",
"nmoves",
"+",
"1",
")",
"self",
".",
"prev_move",
"=",
"move",
"return",
"np",
".",
"array",
"(",
"self",
".",
"moves",
"[",
"move",
"]",
")"
] | 35.3 | 6.7 |
def his_from_sql(self, db_name, point):
"""
Retrive point histories from SQL database
"""
his = self._read_from_sql('select * from "%s"' % "history", db_name)
his.index = his["index"].apply(Timestamp)
return his.set_index("index")[point] | [
"def",
"his_from_sql",
"(",
"self",
",",
"db_name",
",",
"point",
")",
":",
"his",
"=",
"self",
".",
"_read_from_sql",
"(",
"'select * from \"%s\"'",
"%",
"\"history\"",
",",
"db_name",
")",
"his",
".",
"index",
"=",
"his",
"[",
"\"index\"",
"]",
".",
"apply",
"(",
"Timestamp",
")",
"return",
"his",
".",
"set_index",
"(",
"\"index\"",
")",
"[",
"point",
"]"
] | 39.857143 | 8.428571 |
def _find_server(account, servername=None):
""" Find and return a PlexServer object. """
servers = servers = [s for s in account.resources() if 'server' in s.provides]
# If servername specified find and return it
if servername is not None:
for server in servers:
if server.name == servername:
return server.connect()
raise SystemExit('Unknown server name: %s' % servername)
# If servername not specified; allow user to choose
return utils.choose('Choose a Server', servers, 'name').connect() | [
"def",
"_find_server",
"(",
"account",
",",
"servername",
"=",
"None",
")",
":",
"servers",
"=",
"servers",
"=",
"[",
"s",
"for",
"s",
"in",
"account",
".",
"resources",
"(",
")",
"if",
"'server'",
"in",
"s",
".",
"provides",
"]",
"# If servername specified find and return it",
"if",
"servername",
"is",
"not",
"None",
":",
"for",
"server",
"in",
"servers",
":",
"if",
"server",
".",
"name",
"==",
"servername",
":",
"return",
"server",
".",
"connect",
"(",
")",
"raise",
"SystemExit",
"(",
"'Unknown server name: %s'",
"%",
"servername",
")",
"# If servername not specified; allow user to choose",
"return",
"utils",
".",
"choose",
"(",
"'Choose a Server'",
",",
"servers",
",",
"'name'",
")",
".",
"connect",
"(",
")"
] | 49.909091 | 13 |
def mine_blocks(num_blocks: int, chain: MiningChain) -> MiningChain:
"""
Variadic argument version of :func:`~eth.tools.builder.chain.mine_block`
"""
if not isinstance(chain, MiningChain):
raise ValidationError('`mine_block` may only be used on MiningChain instances')
for _ in range(num_blocks):
chain.mine_block()
return chain | [
"def",
"mine_blocks",
"(",
"num_blocks",
":",
"int",
",",
"chain",
":",
"MiningChain",
")",
"->",
"MiningChain",
":",
"if",
"not",
"isinstance",
"(",
"chain",
",",
"MiningChain",
")",
":",
"raise",
"ValidationError",
"(",
"'`mine_block` may only be used on MiningChain instances'",
")",
"for",
"_",
"in",
"range",
"(",
"num_blocks",
")",
":",
"chain",
".",
"mine_block",
"(",
")",
"return",
"chain"
] | 40 | 17.777778 |
def _M2_sparse(Xvar, mask_X, Yvar, mask_Y, weights=None):
""" 2nd moment matrix exploiting zero input columns """
C = np.zeros((len(mask_X), len(mask_Y)))
C[np.ix_(mask_X, mask_Y)] = _M2_dense(Xvar, Yvar, weights=weights)
return C | [
"def",
"_M2_sparse",
"(",
"Xvar",
",",
"mask_X",
",",
"Yvar",
",",
"mask_Y",
",",
"weights",
"=",
"None",
")",
":",
"C",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"mask_X",
")",
",",
"len",
"(",
"mask_Y",
")",
")",
")",
"C",
"[",
"np",
".",
"ix_",
"(",
"mask_X",
",",
"mask_Y",
")",
"]",
"=",
"_M2_dense",
"(",
"Xvar",
",",
"Yvar",
",",
"weights",
"=",
"weights",
")",
"return",
"C"
] | 48.4 | 15.8 |
def events_client(self, client):
"""
Get a client's events. Uses GET to /events/clients/<client> interface.
:Args:
* *client*: (str) Client's ID
:Returns: (list) Events
"""
# TODO Add paging to this
client = self._client_id(client)
response = self._get(url.events_clients_id.format(id=client))
self._check_response(response, 200)
return self._create_response(response).get("events") | [
"def",
"events_client",
"(",
"self",
",",
"client",
")",
":",
"# TODO Add paging to this",
"client",
"=",
"self",
".",
"_client_id",
"(",
"client",
")",
"response",
"=",
"self",
".",
"_get",
"(",
"url",
".",
"events_clients_id",
".",
"format",
"(",
"id",
"=",
"client",
")",
")",
"self",
".",
"_check_response",
"(",
"response",
",",
"200",
")",
"return",
"self",
".",
"_create_response",
"(",
"response",
")",
".",
"get",
"(",
"\"events\"",
")"
] | 33 | 15.857143 |
def format_modes(modes, full_modes=False, current_mode=None):
""" Creates a nice readily printable Table for a list of modes.
Used in `displays list' and the candidates list
in `displays set'. """
t = table.Table(((
'*' if mode == current_mode else '', # 0
str(Q.CGDisplayModeGetWidth(mode)), # 1
str(Q.CGDisplayModeGetHeight(mode)), # 2
'@'+shorter_float_str(Q.CGDisplayModeGetRefreshRate(mode)), # 3
format_pixelEncoding(
Q.CGDisplayModeCopyPixelEncoding(mode))) # 4
for mode in modes))
t.set_key(2, 'height')
t.set_key(3, 'rate')
t.set_key(4, 'depth')
t.set_alignment('height', 'l')
t.set_alignment('rate', 'l')
t.set_separator('height', ' x ')
created_flags_col = False
if full_modes:
t.append_col(tuple((' '.join(get_flags_of_mode(mode))
for mode in modes)), key='flags')
created_flags_col = True
else:
# Remove refresh rate and bit depth if they are all the same
if len(frozenset(t.get_col('rate'))) == 1:
t.del_col('rate')
if len(frozenset(t.get_col('depth'))) == 1:
t.del_col('depth')
# Show distinct IO flags when several modes appear the same
lut = {}
for i, row in enumerate(t):
row = tuple(row)
if row not in lut:
lut[row] = []
elif not created_flags_col:
t.append_col(('',) * len(modes), key='flags')
lut[row].append(i)
for rw, indices in lut.iteritems():
if len(indices) == 1:
continue
flags = {}
for i in indices:
flags[i] = get_flags_of_mode(modes[i])
common_flags = reduce(lambda x, y: x.intersection(y),
map(frozenset, flags.itervalues()))
for i in indices:
t[i, 'flags'] = ' '.join(frozenset(flags[i])
- common_flags)
if created_flags_col:
t.set_alignment('flags', 'l')
return t | [
"def",
"format_modes",
"(",
"modes",
",",
"full_modes",
"=",
"False",
",",
"current_mode",
"=",
"None",
")",
":",
"t",
"=",
"table",
".",
"Table",
"(",
"(",
"(",
"'*'",
"if",
"mode",
"==",
"current_mode",
"else",
"''",
",",
"# 0",
"str",
"(",
"Q",
".",
"CGDisplayModeGetWidth",
"(",
"mode",
")",
")",
",",
"# 1",
"str",
"(",
"Q",
".",
"CGDisplayModeGetHeight",
"(",
"mode",
")",
")",
",",
"# 2",
"'@'",
"+",
"shorter_float_str",
"(",
"Q",
".",
"CGDisplayModeGetRefreshRate",
"(",
"mode",
")",
")",
",",
"# 3",
"format_pixelEncoding",
"(",
"Q",
".",
"CGDisplayModeCopyPixelEncoding",
"(",
"mode",
")",
")",
")",
"# 4",
"for",
"mode",
"in",
"modes",
")",
")",
"t",
".",
"set_key",
"(",
"2",
",",
"'height'",
")",
"t",
".",
"set_key",
"(",
"3",
",",
"'rate'",
")",
"t",
".",
"set_key",
"(",
"4",
",",
"'depth'",
")",
"t",
".",
"set_alignment",
"(",
"'height'",
",",
"'l'",
")",
"t",
".",
"set_alignment",
"(",
"'rate'",
",",
"'l'",
")",
"t",
".",
"set_separator",
"(",
"'height'",
",",
"' x '",
")",
"created_flags_col",
"=",
"False",
"if",
"full_modes",
":",
"t",
".",
"append_col",
"(",
"tuple",
"(",
"(",
"' '",
".",
"join",
"(",
"get_flags_of_mode",
"(",
"mode",
")",
")",
"for",
"mode",
"in",
"modes",
")",
")",
",",
"key",
"=",
"'flags'",
")",
"created_flags_col",
"=",
"True",
"else",
":",
"# Remove refresh rate and bit depth if they are all the same",
"if",
"len",
"(",
"frozenset",
"(",
"t",
".",
"get_col",
"(",
"'rate'",
")",
")",
")",
"==",
"1",
":",
"t",
".",
"del_col",
"(",
"'rate'",
")",
"if",
"len",
"(",
"frozenset",
"(",
"t",
".",
"get_col",
"(",
"'depth'",
")",
")",
")",
"==",
"1",
":",
"t",
".",
"del_col",
"(",
"'depth'",
")",
"# Show distinct IO flags when several modes appear the same",
"lut",
"=",
"{",
"}",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"t",
")",
":",
"row",
"=",
"tuple",
"(",
"row",
")",
"if",
"row",
"not",
"in",
"lut",
":",
"lut",
"[",
"row",
"]",
"=",
"[",
"]",
"elif",
"not",
"created_flags_col",
":",
"t",
".",
"append_col",
"(",
"(",
"''",
",",
")",
"*",
"len",
"(",
"modes",
")",
",",
"key",
"=",
"'flags'",
")",
"lut",
"[",
"row",
"]",
".",
"append",
"(",
"i",
")",
"for",
"rw",
",",
"indices",
"in",
"lut",
".",
"iteritems",
"(",
")",
":",
"if",
"len",
"(",
"indices",
")",
"==",
"1",
":",
"continue",
"flags",
"=",
"{",
"}",
"for",
"i",
"in",
"indices",
":",
"flags",
"[",
"i",
"]",
"=",
"get_flags_of_mode",
"(",
"modes",
"[",
"i",
"]",
")",
"common_flags",
"=",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
".",
"intersection",
"(",
"y",
")",
",",
"map",
"(",
"frozenset",
",",
"flags",
".",
"itervalues",
"(",
")",
")",
")",
"for",
"i",
"in",
"indices",
":",
"t",
"[",
"i",
",",
"'flags'",
"]",
"=",
"' '",
".",
"join",
"(",
"frozenset",
"(",
"flags",
"[",
"i",
"]",
")",
"-",
"common_flags",
")",
"if",
"created_flags_col",
":",
"t",
".",
"set_alignment",
"(",
"'flags'",
",",
"'l'",
")",
"return",
"t"
] | 49.207547 | 16.471698 |
def corr(self):
'''The correlation matrix'''
cov = self.cov()
N = cov.shape[0]
corr = ndarray((N,N))
for r in range(N):
for c in range(r):
corr[r,c] = corr[c,r] = cov[r,c]/sqrt(cov[r,r]*cov[c,c])
corr[r,r] = 1.
return corr | [
"def",
"corr",
"(",
"self",
")",
":",
"cov",
"=",
"self",
".",
"cov",
"(",
")",
"N",
"=",
"cov",
".",
"shape",
"[",
"0",
"]",
"corr",
"=",
"ndarray",
"(",
"(",
"N",
",",
"N",
")",
")",
"for",
"r",
"in",
"range",
"(",
"N",
")",
":",
"for",
"c",
"in",
"range",
"(",
"r",
")",
":",
"corr",
"[",
"r",
",",
"c",
"]",
"=",
"corr",
"[",
"c",
",",
"r",
"]",
"=",
"cov",
"[",
"r",
",",
"c",
"]",
"/",
"sqrt",
"(",
"cov",
"[",
"r",
",",
"r",
"]",
"*",
"cov",
"[",
"c",
",",
"c",
"]",
")",
"corr",
"[",
"r",
",",
"r",
"]",
"=",
"1.",
"return",
"corr"
] | 31 | 15.6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.