text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def aggregationDivide(dividend, divisor):
"""
Return the result from dividing two dicts that represent date and time.
Both dividend and divisor are dicts that contain one or more of the following
keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds',
'milliseconds', 'microseconds'.
For example:
::
aggregationDivide({'hours': 4}, {'minutes': 15}) == 16
:param dividend: (dict) The numerator, as a dict representing a date and time
:param divisor: (dict) the denominator, as a dict representing a date and time
:returns: (float) number of times divisor goes into dividend
"""
# Convert each into microseconds
dividendMonthSec = aggregationToMonthsSeconds(dividend)
divisorMonthSec = aggregationToMonthsSeconds(divisor)
# It is a usage error to mix both months and seconds in the same operation
if (dividendMonthSec['months'] != 0 and divisorMonthSec['seconds'] != 0) \
or (dividendMonthSec['seconds'] != 0 and divisorMonthSec['months'] != 0):
raise RuntimeError("Aggregation dicts with months/years can only be "
"inter-operated with other aggregation dicts that contain "
"months/years")
if dividendMonthSec['months'] > 0:
return float(dividendMonthSec['months']) / divisor['months']
else:
return float(dividendMonthSec['seconds']) / divisorMonthSec['seconds'] | [
"def",
"aggregationDivide",
"(",
"dividend",
",",
"divisor",
")",
":",
"# Convert each into microseconds",
"dividendMonthSec",
"=",
"aggregationToMonthsSeconds",
"(",
"dividend",
")",
"divisorMonthSec",
"=",
"aggregationToMonthsSeconds",
"(",
"divisor",
")",
"# It is a usage error to mix both months and seconds in the same operation",
"if",
"(",
"dividendMonthSec",
"[",
"'months'",
"]",
"!=",
"0",
"and",
"divisorMonthSec",
"[",
"'seconds'",
"]",
"!=",
"0",
")",
"or",
"(",
"dividendMonthSec",
"[",
"'seconds'",
"]",
"!=",
"0",
"and",
"divisorMonthSec",
"[",
"'months'",
"]",
"!=",
"0",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Aggregation dicts with months/years can only be \"",
"\"inter-operated with other aggregation dicts that contain \"",
"\"months/years\"",
")",
"if",
"dividendMonthSec",
"[",
"'months'",
"]",
">",
"0",
":",
"return",
"float",
"(",
"dividendMonthSec",
"[",
"'months'",
"]",
")",
"/",
"divisor",
"[",
"'months'",
"]",
"else",
":",
"return",
"float",
"(",
"dividendMonthSec",
"[",
"'seconds'",
"]",
")",
"/",
"divisorMonthSec",
"[",
"'seconds'",
"]"
] | 35.702703 | 28.459459 |
def _assign_work_unit(self, node):
"""Assign a work unit to a node."""
assert self.workqueue
# Grab a unit of work
scope, work_unit = self.workqueue.popitem(last=False)
# Keep track of the assigned work
assigned_to_node = self.assigned_work.setdefault(node, default=OrderedDict())
assigned_to_node[scope] = work_unit
# Ask the node to execute the workload
worker_collection = self.registered_collections[node]
nodeids_indexes = [
worker_collection.index(nodeid)
for nodeid, completed in work_unit.items()
if not completed
]
node.send_runtest_some(nodeids_indexes) | [
"def",
"_assign_work_unit",
"(",
"self",
",",
"node",
")",
":",
"assert",
"self",
".",
"workqueue",
"# Grab a unit of work",
"scope",
",",
"work_unit",
"=",
"self",
".",
"workqueue",
".",
"popitem",
"(",
"last",
"=",
"False",
")",
"# Keep track of the assigned work",
"assigned_to_node",
"=",
"self",
".",
"assigned_work",
".",
"setdefault",
"(",
"node",
",",
"default",
"=",
"OrderedDict",
"(",
")",
")",
"assigned_to_node",
"[",
"scope",
"]",
"=",
"work_unit",
"# Ask the node to execute the workload",
"worker_collection",
"=",
"self",
".",
"registered_collections",
"[",
"node",
"]",
"nodeids_indexes",
"=",
"[",
"worker_collection",
".",
"index",
"(",
"nodeid",
")",
"for",
"nodeid",
",",
"completed",
"in",
"work_unit",
".",
"items",
"(",
")",
"if",
"not",
"completed",
"]",
"node",
".",
"send_runtest_some",
"(",
"nodeids_indexes",
")"
] | 34 | 18.25 |
def register(func=None, name=None):
"""
Expose compiler to factory.
:param func: the callable to expose
:type func: callable
:param name: name of format
:type name: str
It can be used as a decorator::
@register(name='my:validator')
def my_validator(obj):
if obj is True:
return obj
raise ValidationError('obj is not true')
or as a function::
def my_validator(obj):
if obj is True:
return obj
raise ValidationError('obj is not true')
@register(name='my:validator')
"""
if not name:
raise CompilationError('Name is required')
if not func:
return partial(register, name=name)
return FormatRegistry.register(name, func) | [
"def",
"register",
"(",
"func",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"not",
"name",
":",
"raise",
"CompilationError",
"(",
"'Name is required'",
")",
"if",
"not",
"func",
":",
"return",
"partial",
"(",
"register",
",",
"name",
"=",
"name",
")",
"return",
"FormatRegistry",
".",
"register",
"(",
"name",
",",
"func",
")"
] | 23.96875 | 16.65625 |
def _connected(client):
"""
Connected to AMP server, start listening locally, and give the AMP
client a reference to the local listening factory.
"""
log.msg("Connected to AMP server, starting to listen locally...")
localFactory = multiplexing.ProxyingFactory(client, "hello")
return listeningEndpoint.listen(localFactory) | [
"def",
"_connected",
"(",
"client",
")",
":",
"log",
".",
"msg",
"(",
"\"Connected to AMP server, starting to listen locally...\"",
")",
"localFactory",
"=",
"multiplexing",
".",
"ProxyingFactory",
"(",
"client",
",",
"\"hello\"",
")",
"return",
"listeningEndpoint",
".",
"listen",
"(",
"localFactory",
")"
] | 42.875 | 15.375 |
def get_global_rate_limit(self):
"""Get the global rate limit per client.
:rtype: int
:returns: The global rate limit for each client.
"""
r = urllib.request.urlopen('https://archive.org/metadata/iamine-rate-limiter')
j = json.loads(r.read().decode('utf-8'))
return int(j.get('metadata', {}).get('rate_per_second', 300)) | [
"def",
"get_global_rate_limit",
"(",
"self",
")",
":",
"r",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"'https://archive.org/metadata/iamine-rate-limiter'",
")",
"j",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"return",
"int",
"(",
"j",
".",
"get",
"(",
"'metadata'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'rate_per_second'",
",",
"300",
")",
")"
] | 41 | 18.666667 |
def power(self, n):
"""The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
PTM: the matrix power of the SuperOp converted to a PTM channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer.
"""
if n > 0:
return super().power(n)
return PTM(SuperOp(self).power(n)) | [
"def",
"power",
"(",
"self",
",",
"n",
")",
":",
"if",
"n",
">",
"0",
":",
"return",
"super",
"(",
")",
".",
"power",
"(",
"n",
")",
"return",
"PTM",
"(",
"SuperOp",
"(",
"self",
")",
".",
"power",
"(",
"n",
")",
")"
] | 31.25 | 23.5 |
def sanitize(name):
"""
Sanitize the specified ``name`` for use with breathe directives.
**Parameters**
``name`` (:class:`python:str`)
The name to be sanitized.
**Return**
:class:`python:str`
The input ``name`` sanitized to use with breathe directives (primarily for use
with ``.. doxygenfunction::``). Replacements such as ``"<" -> "<"`` are
performed, as well as removing spaces ``"< " -> "<"`` must be done. Breathe is
particularly sensitive with respect to whitespace.
"""
return name.replace(
"<", "<"
).replace(
">", ">"
).replace(
"&", "&"
).replace(
"< ", "<"
).replace(
" >", ">"
).replace(
" &", "&"
).replace(
"& ", "&"
) | [
"def",
"sanitize",
"(",
"name",
")",
":",
"return",
"name",
".",
"replace",
"(",
"\"<\"",
",",
"\"<\"",
")",
".",
"replace",
"(",
"\">\"",
",",
"\">\"",
")",
".",
"replace",
"(",
"\"&\"",
",",
"\"&\"",
")",
".",
"replace",
"(",
"\"< \"",
",",
"\"<\"",
")",
".",
"replace",
"(",
"\" >\"",
",",
"\">\"",
")",
".",
"replace",
"(",
"\" &\"",
",",
"\"&\"",
")",
".",
"replace",
"(",
"\"& \"",
",",
"\"&\"",
")"
] | 24.25 | 25.0625 |
def route(
self,
uri,
methods=frozenset({"GET"}),
host=None,
strict_slashes=None,
stream=False,
version=None,
name=None,
):
"""Create a blueprint route from a decorated function.
:param uri: endpoint at which the route will be accessible.
:param methods: list of acceptable HTTP methods.
:param host: IP Address of FQDN for the sanic server to use.
:param strict_slashes: Enforce the API urls are requested with a
training */*
:param stream: If the route should provide a streaming support
:param version: Blueprint Version
:param name: Unique name to identify the Route
:return a decorated method that when invoked will return an object
of type :class:`FutureRoute`
"""
if strict_slashes is None:
strict_slashes = self.strict_slashes
def decorator(handler):
route = FutureRoute(
handler,
uri,
methods,
host,
strict_slashes,
stream,
version,
name,
)
self.routes.append(route)
return handler
return decorator | [
"def",
"route",
"(",
"self",
",",
"uri",
",",
"methods",
"=",
"frozenset",
"(",
"{",
"\"GET\"",
"}",
")",
",",
"host",
"=",
"None",
",",
"strict_slashes",
"=",
"None",
",",
"stream",
"=",
"False",
",",
"version",
"=",
"None",
",",
"name",
"=",
"None",
",",
")",
":",
"if",
"strict_slashes",
"is",
"None",
":",
"strict_slashes",
"=",
"self",
".",
"strict_slashes",
"def",
"decorator",
"(",
"handler",
")",
":",
"route",
"=",
"FutureRoute",
"(",
"handler",
",",
"uri",
",",
"methods",
",",
"host",
",",
"strict_slashes",
",",
"stream",
",",
"version",
",",
"name",
",",
")",
"self",
".",
"routes",
".",
"append",
"(",
"route",
")",
"return",
"handler",
"return",
"decorator"
] | 29.857143 | 19.02381 |
def health_check(self):
""" Pull health and alarm information from the device.
Purpose: Grab the cpu/mem usage, system/chassis alarms, top 5
| processes, and states if the primary/backup partitions are on
| different versions.
@returns: The output that should be shown to the user.
@rtype: str
"""
output = 'Chassis Alarms:\n\t'
# Grab chassis alarms, system alarms, show chassis routing-engine,
# 'show system processes extensive', and also xpath to the
# relevant nodes on each.
chassis_alarms = self._session.command("show chassis alarms")
chassis_alarms = chassis_alarms.xpath('//alarm-detail')
system_alarms = self._session.command("show system alarms")
system_alarms = system_alarms.xpath('//alarm-detail')
chass = self._session.command(command="show chassis routing-engine",
format='text').xpath('//output')[0].text
proc = self._session.command("show system processes extensive")
proc = proc.xpath('output')[0].text.split('\n')
if chassis_alarms == []: # Chassis Alarms
output += 'No chassis alarms active.\n'
else:
for i in chassis_alarms:
output += (i.xpath('alarm-class')[0].text.strip() + ' Alarm \t'
'\t' + i.xpath('alarm-time')[0].text.strip() +
'\n\t' +
i.xpath('alarm-description')[0].text.strip() + '\n')
output += '\nSystem Alarms: \n\t'
if system_alarms == []: # System Alarms
output += 'No system alarms active.\n'
else:
for i in system_alarms:
output += (i.xpath('alarm-class')[0].text.strip() + ' Alarm '
'\t\t' + i.xpath('alarm-time')[0].text.strip() +
'\n\t' +
i.xpath('alarm-description')[0].text.strip() + '\n')
# add the output of the show chassis routing-engine to the command.
output += '\n' + chass
# Grabs the top 5 processes and the header line.
output += ('\n\nTop 5 busiest processes (high mgd values likely from '
'script execution):\n')
for line_number in range(8, 14):
output += proc[line_number] + '\n'
return output | [
"def",
"health_check",
"(",
"self",
")",
":",
"output",
"=",
"'Chassis Alarms:\\n\\t'",
"# Grab chassis alarms, system alarms, show chassis routing-engine,",
"# 'show system processes extensive', and also xpath to the",
"# relevant nodes on each.",
"chassis_alarms",
"=",
"self",
".",
"_session",
".",
"command",
"(",
"\"show chassis alarms\"",
")",
"chassis_alarms",
"=",
"chassis_alarms",
".",
"xpath",
"(",
"'//alarm-detail'",
")",
"system_alarms",
"=",
"self",
".",
"_session",
".",
"command",
"(",
"\"show system alarms\"",
")",
"system_alarms",
"=",
"system_alarms",
".",
"xpath",
"(",
"'//alarm-detail'",
")",
"chass",
"=",
"self",
".",
"_session",
".",
"command",
"(",
"command",
"=",
"\"show chassis routing-engine\"",
",",
"format",
"=",
"'text'",
")",
".",
"xpath",
"(",
"'//output'",
")",
"[",
"0",
"]",
".",
"text",
"proc",
"=",
"self",
".",
"_session",
".",
"command",
"(",
"\"show system processes extensive\"",
")",
"proc",
"=",
"proc",
".",
"xpath",
"(",
"'output'",
")",
"[",
"0",
"]",
".",
"text",
".",
"split",
"(",
"'\\n'",
")",
"if",
"chassis_alarms",
"==",
"[",
"]",
":",
"# Chassis Alarms",
"output",
"+=",
"'No chassis alarms active.\\n'",
"else",
":",
"for",
"i",
"in",
"chassis_alarms",
":",
"output",
"+=",
"(",
"i",
".",
"xpath",
"(",
"'alarm-class'",
")",
"[",
"0",
"]",
".",
"text",
".",
"strip",
"(",
")",
"+",
"' Alarm \\t'",
"'\\t'",
"+",
"i",
".",
"xpath",
"(",
"'alarm-time'",
")",
"[",
"0",
"]",
".",
"text",
".",
"strip",
"(",
")",
"+",
"'\\n\\t'",
"+",
"i",
".",
"xpath",
"(",
"'alarm-description'",
")",
"[",
"0",
"]",
".",
"text",
".",
"strip",
"(",
")",
"+",
"'\\n'",
")",
"output",
"+=",
"'\\nSystem Alarms: \\n\\t'",
"if",
"system_alarms",
"==",
"[",
"]",
":",
"# System Alarms",
"output",
"+=",
"'No system alarms active.\\n'",
"else",
":",
"for",
"i",
"in",
"system_alarms",
":",
"output",
"+=",
"(",
"i",
".",
"xpath",
"(",
"'alarm-class'",
")",
"[",
"0",
"]",
".",
"text",
".",
"strip",
"(",
")",
"+",
"' Alarm '",
"'\\t\\t'",
"+",
"i",
".",
"xpath",
"(",
"'alarm-time'",
")",
"[",
"0",
"]",
".",
"text",
".",
"strip",
"(",
")",
"+",
"'\\n\\t'",
"+",
"i",
".",
"xpath",
"(",
"'alarm-description'",
")",
"[",
"0",
"]",
".",
"text",
".",
"strip",
"(",
")",
"+",
"'\\n'",
")",
"# add the output of the show chassis routing-engine to the command.",
"output",
"+=",
"'\\n'",
"+",
"chass",
"# Grabs the top 5 processes and the header line.",
"output",
"+=",
"(",
"'\\n\\nTop 5 busiest processes (high mgd values likely from '",
"'script execution):\\n'",
")",
"for",
"line_number",
"in",
"range",
"(",
"8",
",",
"14",
")",
":",
"output",
"+=",
"proc",
"[",
"line_number",
"]",
"+",
"'\\n'",
"return",
"output"
] | 50.382979 | 20.446809 |
def pseudo_peripheral_node(A):
"""Find a pseudo peripheral node.
Parameters
----------
A : sparse matrix
Sparse matrix
Returns
-------
x : int
Locaiton of the node
order : array
BFS ordering
level : array
BFS levels
Notes
-----
Algorithm in Saad
"""
from pyamg.graph import breadth_first_search
n = A.shape[0]
valence = np.diff(A.indptr)
# select an initial node x, set delta = 0
x = int(np.random.rand() * n)
delta = 0
while True:
# do a level-set traversal from x
order, level = breadth_first_search(A, x)
# select a node y in the last level with min degree
maxlevel = level.max()
lastnodes = np.where(level == maxlevel)[0]
lastnodesvalence = valence[lastnodes]
minlastnodesvalence = lastnodesvalence.min()
y = np.where(lastnodesvalence == minlastnodesvalence)[0][0]
y = lastnodes[y]
# if d(x,y)>delta, set, and go to bfs above
if level[y] > delta:
x = y
delta = level[y]
else:
return x, order, level | [
"def",
"pseudo_peripheral_node",
"(",
"A",
")",
":",
"from",
"pyamg",
".",
"graph",
"import",
"breadth_first_search",
"n",
"=",
"A",
".",
"shape",
"[",
"0",
"]",
"valence",
"=",
"np",
".",
"diff",
"(",
"A",
".",
"indptr",
")",
"# select an initial node x, set delta = 0",
"x",
"=",
"int",
"(",
"np",
".",
"random",
".",
"rand",
"(",
")",
"*",
"n",
")",
"delta",
"=",
"0",
"while",
"True",
":",
"# do a level-set traversal from x",
"order",
",",
"level",
"=",
"breadth_first_search",
"(",
"A",
",",
"x",
")",
"# select a node y in the last level with min degree",
"maxlevel",
"=",
"level",
".",
"max",
"(",
")",
"lastnodes",
"=",
"np",
".",
"where",
"(",
"level",
"==",
"maxlevel",
")",
"[",
"0",
"]",
"lastnodesvalence",
"=",
"valence",
"[",
"lastnodes",
"]",
"minlastnodesvalence",
"=",
"lastnodesvalence",
".",
"min",
"(",
")",
"y",
"=",
"np",
".",
"where",
"(",
"lastnodesvalence",
"==",
"minlastnodesvalence",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"y",
"=",
"lastnodes",
"[",
"y",
"]",
"# if d(x,y)>delta, set, and go to bfs above",
"if",
"level",
"[",
"y",
"]",
">",
"delta",
":",
"x",
"=",
"y",
"delta",
"=",
"level",
"[",
"y",
"]",
"else",
":",
"return",
"x",
",",
"order",
",",
"level"
] | 22.591837 | 21.040816 |
def build_additional_match(self, ident, node_set):
"""
handle additional matches supplied by 'has()' calls
"""
source_ident = ident
for key, value in node_set.must_match.items():
if isinstance(value, dict):
label = ':' + value['node_class'].__label__
stmt = _rel_helper(lhs=source_ident, rhs=label, ident='', **value)
self._ast['where'].append(stmt)
else:
raise ValueError("Expecting dict got: " + repr(value))
for key, val in node_set.dont_match.items():
if isinstance(val, dict):
label = ':' + val['node_class'].__label__
stmt = _rel_helper(lhs=source_ident, rhs=label, ident='', **val)
self._ast['where'].append('NOT ' + stmt)
else:
raise ValueError("Expecting dict got: " + repr(val)) | [
"def",
"build_additional_match",
"(",
"self",
",",
"ident",
",",
"node_set",
")",
":",
"source_ident",
"=",
"ident",
"for",
"key",
",",
"value",
"in",
"node_set",
".",
"must_match",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"label",
"=",
"':'",
"+",
"value",
"[",
"'node_class'",
"]",
".",
"__label__",
"stmt",
"=",
"_rel_helper",
"(",
"lhs",
"=",
"source_ident",
",",
"rhs",
"=",
"label",
",",
"ident",
"=",
"''",
",",
"*",
"*",
"value",
")",
"self",
".",
"_ast",
"[",
"'where'",
"]",
".",
"append",
"(",
"stmt",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Expecting dict got: \"",
"+",
"repr",
"(",
"value",
")",
")",
"for",
"key",
",",
"val",
"in",
"node_set",
".",
"dont_match",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"label",
"=",
"':'",
"+",
"val",
"[",
"'node_class'",
"]",
".",
"__label__",
"stmt",
"=",
"_rel_helper",
"(",
"lhs",
"=",
"source_ident",
",",
"rhs",
"=",
"label",
",",
"ident",
"=",
"''",
",",
"*",
"*",
"val",
")",
"self",
".",
"_ast",
"[",
"'where'",
"]",
".",
"append",
"(",
"'NOT '",
"+",
"stmt",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Expecting dict got: \"",
"+",
"repr",
"(",
"val",
")",
")"
] | 42.761905 | 19.047619 |
def bigquery_type(o, timestamp_parser=default_timestamp_parser):
"""Given a value, return the matching BigQuery type of that value. Must be
one of str/unicode/int/float/datetime/record, where record is a dict
containing value which have matching BigQuery types.
Parameters
----------
o : object
A Python object
time_stamp_parser : function, optional
Unary function taking a ``str`` and returning and ``bool`` that is
True if the string represents a date
Returns
-------
Union[str, None]
Name of the corresponding BigQuery type for `o`, or None if no type
could be found
Examples
--------
>>> bigquery_type("abc")
"string"
>>> bigquery_type(123)
"integer"
"""
t = type(o)
if t in six.integer_types:
return "integer"
elif (t == six.binary_type and six.PY2) or t == six.text_type:
if timestamp_parser and timestamp_parser(o):
return "timestamp"
else:
return "string"
elif t == float:
return "float"
elif t == bool:
return "boolean"
elif t == dict:
return "record"
elif t == datetime:
return "timestamp"
else:
return None | [
"def",
"bigquery_type",
"(",
"o",
",",
"timestamp_parser",
"=",
"default_timestamp_parser",
")",
":",
"t",
"=",
"type",
"(",
"o",
")",
"if",
"t",
"in",
"six",
".",
"integer_types",
":",
"return",
"\"integer\"",
"elif",
"(",
"t",
"==",
"six",
".",
"binary_type",
"and",
"six",
".",
"PY2",
")",
"or",
"t",
"==",
"six",
".",
"text_type",
":",
"if",
"timestamp_parser",
"and",
"timestamp_parser",
"(",
"o",
")",
":",
"return",
"\"timestamp\"",
"else",
":",
"return",
"\"string\"",
"elif",
"t",
"==",
"float",
":",
"return",
"\"float\"",
"elif",
"t",
"==",
"bool",
":",
"return",
"\"boolean\"",
"elif",
"t",
"==",
"dict",
":",
"return",
"\"record\"",
"elif",
"t",
"==",
"datetime",
":",
"return",
"\"timestamp\"",
"else",
":",
"return",
"None"
] | 26.777778 | 21.555556 |
def _replace_keyword(self, keyword, replacement, count=0):
"""
replace_keyword(keyword, replacement[, count])
Walk through the element and its children
and look for Str() objects that contains
exactly the keyword. Then, replace it.
Usually applied to an entire document (a :class:`.Doc` element)
Note: If the replacement is a block, it cannot be put in place of
a Str element. As a solution, the closest ancestor (e.g. the parent)
will be replaced instead, but only if possible
(if the parent only has one child).
Example:
>>> from panflute import *
>>> p1 = Para(Str('Spam'), Space, Emph(Str('and'), Space, Str('eggs')))
>>> p2 = Para(Str('eggs'))
>>> p3 = Plain(Emph(Str('eggs')))
>>> doc = Doc(p1, p2, p3)
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(eggs))) Para(Str(eggs)) Plain(Emph(Str(eggs))))
>>> doc.replace_keyword('eggs', Str('ham'))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(ham)) Plain(Emph(Str(ham))))
>>> doc.replace_keyword(keyword='ham', replacement=Para(Str('spam')))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(spam)) Para(Str(spam)))
:param keyword: string that will be searched (cannot have spaces!)
:type keyword: :class:`str`
:param replacement: element that will be placed in turn of the ``Str``
element that contains the keyword.
:type replacement: :class:`.Element`
:param count: number of occurrences that will be replaced.
If count is not given or is set to zero, all occurrences
will be replaced.
:type count: :class:`int`
"""
def replace_with_inline(e, doc):
if type(e) == Str and e.text == keyword:
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
def replace_with_block(e, doc):
if hasattr(e, 'content') and len(e.content) == 1:
ee = e.content[0]
if type(ee) == Str and ee.text == keyword:
if isinstance(e, Block):
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
elif isinstance(e, Inline):
return Str(keyword)
doc = self.doc
if doc is None:
raise Exception('No root document')
doc.num_matches = 0
if isinstance(replacement, Inline):
return self.walk(replace_with_inline, doc)
elif isinstance(replacement, Block):
return self.walk(replace_with_block, doc)
else:
raise NotImplementedError(type(replacement)) | [
"def",
"_replace_keyword",
"(",
"self",
",",
"keyword",
",",
"replacement",
",",
"count",
"=",
"0",
")",
":",
"def",
"replace_with_inline",
"(",
"e",
",",
"doc",
")",
":",
"if",
"type",
"(",
"e",
")",
"==",
"Str",
"and",
"e",
".",
"text",
"==",
"keyword",
":",
"doc",
".",
"num_matches",
"+=",
"1",
"if",
"not",
"count",
"or",
"doc",
".",
"num_matches",
"<=",
"count",
":",
"return",
"replacement",
"def",
"replace_with_block",
"(",
"e",
",",
"doc",
")",
":",
"if",
"hasattr",
"(",
"e",
",",
"'content'",
")",
"and",
"len",
"(",
"e",
".",
"content",
")",
"==",
"1",
":",
"ee",
"=",
"e",
".",
"content",
"[",
"0",
"]",
"if",
"type",
"(",
"ee",
")",
"==",
"Str",
"and",
"ee",
".",
"text",
"==",
"keyword",
":",
"if",
"isinstance",
"(",
"e",
",",
"Block",
")",
":",
"doc",
".",
"num_matches",
"+=",
"1",
"if",
"not",
"count",
"or",
"doc",
".",
"num_matches",
"<=",
"count",
":",
"return",
"replacement",
"elif",
"isinstance",
"(",
"e",
",",
"Inline",
")",
":",
"return",
"Str",
"(",
"keyword",
")",
"doc",
"=",
"self",
".",
"doc",
"if",
"doc",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'No root document'",
")",
"doc",
".",
"num_matches",
"=",
"0",
"if",
"isinstance",
"(",
"replacement",
",",
"Inline",
")",
":",
"return",
"self",
".",
"walk",
"(",
"replace_with_inline",
",",
"doc",
")",
"elif",
"isinstance",
"(",
"replacement",
",",
"Block",
")",
":",
"return",
"self",
".",
"walk",
"(",
"replace_with_block",
",",
"doc",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"type",
"(",
"replacement",
")",
")"
] | 38.724638 | 18.57971 |
def predict_local(self, X, batch_size = -1):
"""
:param X: X can be a ndarray or list of ndarray if the model has multiple inputs.
The first dimension of X should be batch.
:param batch_size: total batch size of prediction.
:return: a ndarray as the prediction result.
"""
jresults = callBigDlFunc(self.bigdl_type,
"predictLocal",
self.value,
self._to_jtensors(X),
batch_size)
return np.stack([j.to_ndarray()for j in jresults]) | [
"def",
"predict_local",
"(",
"self",
",",
"X",
",",
"batch_size",
"=",
"-",
"1",
")",
":",
"jresults",
"=",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"predictLocal\"",
",",
"self",
".",
"value",
",",
"self",
".",
"_to_jtensors",
"(",
"X",
")",
",",
"batch_size",
")",
"return",
"np",
".",
"stack",
"(",
"[",
"j",
".",
"to_ndarray",
"(",
")",
"for",
"j",
"in",
"jresults",
"]",
")"
] | 40.733333 | 15.266667 |
def reference_pix_from_wcs(frames, pixref, origin=1):
"""Compute reference pixels between frames using WCS information.
The sky world coordinates are computed on *pixref* using
the WCS of the first frame in the sequence. Then, the
pixel coordinates of the reference sky world-coordinates
are computed for the rest of the frames.
The results is a list with the position of the reference pixel
in each image
"""
result = []
with frames[0].open() as hdulist:
wcsh = wcs.WCS(hdulist[0].header)
skyref = wcsh.wcs_pix2world([pixref], origin)
result.append(pixref)
for idx, frame in enumerate(frames[1:]):
with frame.open() as hdulist:
wcsh = wcs.WCS(hdulist[0].header)
pixval = wcsh.wcs_world2pix(skyref, origin)
result.append(tuple(pixval[0]))
return result | [
"def",
"reference_pix_from_wcs",
"(",
"frames",
",",
"pixref",
",",
"origin",
"=",
"1",
")",
":",
"result",
"=",
"[",
"]",
"with",
"frames",
"[",
"0",
"]",
".",
"open",
"(",
")",
"as",
"hdulist",
":",
"wcsh",
"=",
"wcs",
".",
"WCS",
"(",
"hdulist",
"[",
"0",
"]",
".",
"header",
")",
"skyref",
"=",
"wcsh",
".",
"wcs_pix2world",
"(",
"[",
"pixref",
"]",
",",
"origin",
")",
"result",
".",
"append",
"(",
"pixref",
")",
"for",
"idx",
",",
"frame",
"in",
"enumerate",
"(",
"frames",
"[",
"1",
":",
"]",
")",
":",
"with",
"frame",
".",
"open",
"(",
")",
"as",
"hdulist",
":",
"wcsh",
"=",
"wcs",
".",
"WCS",
"(",
"hdulist",
"[",
"0",
"]",
".",
"header",
")",
"pixval",
"=",
"wcsh",
".",
"wcs_world2pix",
"(",
"skyref",
",",
"origin",
")",
"result",
".",
"append",
"(",
"tuple",
"(",
"pixval",
"[",
"0",
"]",
")",
")",
"return",
"result"
] | 31.444444 | 18.851852 |
def get_all(self, api_method, collection_name, **kwargs):
"""
Return all objects in an api_method, handle pagination, and pass
kwargs on to the method being called.
For example, "users.list" returns an object like:
{
"members": [{<member_obj>}, {<member_obj_2>}],
"response_metadata": {
"next_cursor": "cursor_id"
}
}
so if you call `get_all("users.list", "members")`, this function
will return all member objects to you while handling pagination
"""
objs = []
limit = 250
# if you don't provide a limit, the slack API won't return a cursor to you
page = json.loads(self.api_call(api_method, limit=limit, **kwargs))
while 1:
try:
for obj in page[collection_name]:
objs.append(obj)
except KeyError:
LOG.error("Unable to find key %s in page object: \n"
"%s", collection_name, page)
return objs
cursor = dig(page, "response_metadata", "next_cursor")
if cursor:
# In general we allow applications that integrate with Slack to send
# no more than one message per second
# https://api.slack.com/docs/rate-limits
time.sleep(1)
page = json.loads(
self.api_call(
api_method, cursor=cursor, limit=limit, **kwargs))
else:
break
return objs | [
"def",
"get_all",
"(",
"self",
",",
"api_method",
",",
"collection_name",
",",
"*",
"*",
"kwargs",
")",
":",
"objs",
"=",
"[",
"]",
"limit",
"=",
"250",
"# if you don't provide a limit, the slack API won't return a cursor to you",
"page",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"api_call",
"(",
"api_method",
",",
"limit",
"=",
"limit",
",",
"*",
"*",
"kwargs",
")",
")",
"while",
"1",
":",
"try",
":",
"for",
"obj",
"in",
"page",
"[",
"collection_name",
"]",
":",
"objs",
".",
"append",
"(",
"obj",
")",
"except",
"KeyError",
":",
"LOG",
".",
"error",
"(",
"\"Unable to find key %s in page object: \\n\"",
"\"%s\"",
",",
"collection_name",
",",
"page",
")",
"return",
"objs",
"cursor",
"=",
"dig",
"(",
"page",
",",
"\"response_metadata\"",
",",
"\"next_cursor\"",
")",
"if",
"cursor",
":",
"# In general we allow applications that integrate with Slack to send",
"# no more than one message per second",
"# https://api.slack.com/docs/rate-limits",
"time",
".",
"sleep",
"(",
"1",
")",
"page",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"api_call",
"(",
"api_method",
",",
"cursor",
"=",
"cursor",
",",
"limit",
"=",
"limit",
",",
"*",
"*",
"kwargs",
")",
")",
"else",
":",
"break",
"return",
"objs"
] | 35.386364 | 22.159091 |
def items(self):
"""
:return: a list of name/value attribute pairs sorted by attribute name.
"""
sorted_keys = sorted(self.keys())
return [(k, self[k]) for k in sorted_keys] | [
"def",
"items",
"(",
"self",
")",
":",
"sorted_keys",
"=",
"sorted",
"(",
"self",
".",
"keys",
"(",
")",
")",
"return",
"[",
"(",
"k",
",",
"self",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"sorted_keys",
"]"
] | 34.666667 | 12.333333 |
def send_request(self, request):
'''Send a Request. Return a (message, event) pair.
The message is an unframed message to send over the network.
Wait on the event for the response; which will be in the
"result" attribute.
Raises: ProtocolError if the request violates the protocol
in some way..
'''
request_id = next(self._id_counter)
message = self._protocol.request_message(request, request_id)
return message, self._event(request, request_id) | [
"def",
"send_request",
"(",
"self",
",",
"request",
")",
":",
"request_id",
"=",
"next",
"(",
"self",
".",
"_id_counter",
")",
"message",
"=",
"self",
".",
"_protocol",
".",
"request_message",
"(",
"request",
",",
"request_id",
")",
"return",
"message",
",",
"self",
".",
"_event",
"(",
"request",
",",
"request_id",
")"
] | 39.692308 | 22.615385 |
def execute_ssh(cls, command, *args, **kwargs):
"""execute_ssh(command, arguments..., pty=False, echo=False)
Execute `command` on a remote server. It first calls
:meth:`Flow.connect_ssh` using all positional and keyword
arguments, then calls :meth:`SSHClient.execute` with the command
and pty / echo options.
Args:
command(str): The command to execute on the remote server.
arguments...: The options for the SSH connection.
pty(bool): Request a pseudo-terminal from the server.
echo(bool): Whether to echo read/written data to stdout by default.
Returns:
:class:`Flow`: A Flow instance initialised with the SSH channel.
"""
pty = kwargs.pop('pty', False)
echo = kwargs.pop('echo', False)
client = cls.connect_ssh(*args, **kwargs)
f = client.execute(command, pty=pty, echo=echo)
f.client = client
return f | [
"def",
"execute_ssh",
"(",
"cls",
",",
"command",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"pty",
"=",
"kwargs",
".",
"pop",
"(",
"'pty'",
",",
"False",
")",
"echo",
"=",
"kwargs",
".",
"pop",
"(",
"'echo'",
",",
"False",
")",
"client",
"=",
"cls",
".",
"connect_ssh",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"f",
"=",
"client",
".",
"execute",
"(",
"command",
",",
"pty",
"=",
"pty",
",",
"echo",
"=",
"echo",
")",
"f",
".",
"client",
"=",
"client",
"return",
"f"
] | 39.875 | 21.666667 |
def one_of(inners, arg):
"""At least one of the inner validators must pass"""
for inner in inners:
with suppress(com.IbisTypeError, ValueError):
return inner(arg)
rules_formatted = ', '.join(map(repr, inners))
raise com.IbisTypeError(
'Arg passes neither of the following rules: {}'.format(rules_formatted)
) | [
"def",
"one_of",
"(",
"inners",
",",
"arg",
")",
":",
"for",
"inner",
"in",
"inners",
":",
"with",
"suppress",
"(",
"com",
".",
"IbisTypeError",
",",
"ValueError",
")",
":",
"return",
"inner",
"(",
"arg",
")",
"rules_formatted",
"=",
"', '",
".",
"join",
"(",
"map",
"(",
"repr",
",",
"inners",
")",
")",
"raise",
"com",
".",
"IbisTypeError",
"(",
"'Arg passes neither of the following rules: {}'",
".",
"format",
"(",
"rules_formatted",
")",
")"
] | 34.8 | 19.2 |
def conllu2json(input_data, n_sents=10, use_morphology=False, lang=None):
"""
Convert conllu files into JSON format for use with train cli.
use_morphology parameter enables appending morphology to tags, which is
useful for languages such as Spanish, where UD tags are not so rich.
Extract NER tags if available and convert them so that they follow
BILUO and the Wikipedia scheme
"""
# by @dvsrepo, via #11 explosion/spacy-dev-resources
# by @katarkor
docs = []
sentences = []
conll_tuples = read_conllx(input_data, use_morphology=use_morphology)
checked_for_ner = False
has_ner_tags = False
for i, (raw_text, tokens) in enumerate(conll_tuples):
sentence, brackets = tokens[0]
if not checked_for_ner:
has_ner_tags = is_ner(sentence[5][0])
checked_for_ner = True
sentences.append(generate_sentence(sentence, has_ner_tags))
# Real-sized documents could be extracted using the comments on the
# conluu document
if len(sentences) % n_sents == 0:
doc = create_doc(sentences, i)
docs.append(doc)
sentences = []
return docs | [
"def",
"conllu2json",
"(",
"input_data",
",",
"n_sents",
"=",
"10",
",",
"use_morphology",
"=",
"False",
",",
"lang",
"=",
"None",
")",
":",
"# by @dvsrepo, via #11 explosion/spacy-dev-resources",
"# by @katarkor",
"docs",
"=",
"[",
"]",
"sentences",
"=",
"[",
"]",
"conll_tuples",
"=",
"read_conllx",
"(",
"input_data",
",",
"use_morphology",
"=",
"use_morphology",
")",
"checked_for_ner",
"=",
"False",
"has_ner_tags",
"=",
"False",
"for",
"i",
",",
"(",
"raw_text",
",",
"tokens",
")",
"in",
"enumerate",
"(",
"conll_tuples",
")",
":",
"sentence",
",",
"brackets",
"=",
"tokens",
"[",
"0",
"]",
"if",
"not",
"checked_for_ner",
":",
"has_ner_tags",
"=",
"is_ner",
"(",
"sentence",
"[",
"5",
"]",
"[",
"0",
"]",
")",
"checked_for_ner",
"=",
"True",
"sentences",
".",
"append",
"(",
"generate_sentence",
"(",
"sentence",
",",
"has_ner_tags",
")",
")",
"# Real-sized documents could be extracted using the comments on the",
"# conluu document",
"if",
"len",
"(",
"sentences",
")",
"%",
"n_sents",
"==",
"0",
":",
"doc",
"=",
"create_doc",
"(",
"sentences",
",",
"i",
")",
"docs",
".",
"append",
"(",
"doc",
")",
"sentences",
"=",
"[",
"]",
"return",
"docs"
] | 40 | 18.068966 |
def update_offer(self, offer_id, offer_dict):
"""
Updates an offer
:param offer_id: the offer id
:param offer_dict: dict
:return: dict
"""
return self._create_put_request(resource=OFFERS, billomat_id=offer_id, send_data=offer_dict) | [
"def",
"update_offer",
"(",
"self",
",",
"offer_id",
",",
"offer_dict",
")",
":",
"return",
"self",
".",
"_create_put_request",
"(",
"resource",
"=",
"OFFERS",
",",
"billomat_id",
"=",
"offer_id",
",",
"send_data",
"=",
"offer_dict",
")"
] | 31.111111 | 16.888889 |
def load(self):
""" Return the model from the store """
if self.rid and not self.is_loaded:
store = goldman.sess.store
self._is_loaded = True
self.model = store.find(self.rtype, self.field, self.rid)
return self.model | [
"def",
"load",
"(",
"self",
")",
":",
"if",
"self",
".",
"rid",
"and",
"not",
"self",
".",
"is_loaded",
":",
"store",
"=",
"goldman",
".",
"sess",
".",
"store",
"self",
".",
"_is_loaded",
"=",
"True",
"self",
".",
"model",
"=",
"store",
".",
"find",
"(",
"self",
".",
"rtype",
",",
"self",
".",
"field",
",",
"self",
".",
"rid",
")",
"return",
"self",
".",
"model"
] | 27.1 | 20 |
def remove_child(self, rhs):
"""Remove a given child element, specified by name or as element."""
if type(rhs) is XMLElement:
lib.lsl_remove_child(self.e, rhs.e)
else:
lib.lsl_remove_child_n(self.e, rhs) | [
"def",
"remove_child",
"(",
"self",
",",
"rhs",
")",
":",
"if",
"type",
"(",
"rhs",
")",
"is",
"XMLElement",
":",
"lib",
".",
"lsl_remove_child",
"(",
"self",
".",
"e",
",",
"rhs",
".",
"e",
")",
"else",
":",
"lib",
".",
"lsl_remove_child_n",
"(",
"self",
".",
"e",
",",
"rhs",
")"
] | 41 | 9.666667 |
def _random_mutation_operator(self, individual, allow_shrink=True):
"""Perform a replacement, insertion, or shrink mutation on an individual.
Parameters
----------
individual: DEAP individual
A list of pipeline operators and model parameters that can be
compiled by DEAP into a callable function
allow_shrink: bool (True)
If True the `mutShrink` operator, which randomly shrinks the pipeline,
is allowed to be chosen as one of the random mutation operators.
If False, `mutShrink` will never be chosen as a mutation operator.
Returns
-------
mut_ind: DEAP individual
Returns the individual with one of the mutations applied to it
"""
if self.tree_structure:
mutation_techniques = [
partial(gp.mutInsert, pset=self._pset),
partial(mutNodeReplacement, pset=self._pset)
]
# We can't shrink pipelines with only one primitive, so we only add it if we find more primitives.
number_of_primitives = sum([isinstance(node, deap.gp.Primitive) for node in individual])
if number_of_primitives > 1 and allow_shrink:
mutation_techniques.append(partial(gp.mutShrink))
else:
mutation_techniques = [partial(mutNodeReplacement, pset=self._pset)]
mutator = np.random.choice(mutation_techniques)
unsuccesful_mutations = 0
for _ in range(self._max_mut_loops):
# We have to clone the individual because mutator operators work in-place.
ind = self._toolbox.clone(individual)
offspring, = mutator(ind)
if str(offspring) not in self.evaluated_individuals_:
# Update statistics
# crossover_count is kept the same as for the predecessor
# mutation count is increased by 1
# predecessor is set to the string representation of the individual before mutation
# generation is set to 'INVALID' such that we can recognize that it should be updated accordingly
offspring.statistics['crossover_count'] = individual.statistics['crossover_count']
offspring.statistics['mutation_count'] = individual.statistics['mutation_count'] + 1
offspring.statistics['predecessor'] = (str(individual),)
offspring.statistics['generation'] = 'INVALID'
break
else:
unsuccesful_mutations += 1
# Sometimes you have pipelines for which every shrunk version has already been explored too.
# To still mutate the individual, one of the two other mutators should be applied instead.
if ((unsuccesful_mutations == 50) and
(type(mutator) is partial and mutator.func is gp.mutShrink)):
offspring, = self._random_mutation_operator(individual, allow_shrink=False)
return offspring, | [
"def",
"_random_mutation_operator",
"(",
"self",
",",
"individual",
",",
"allow_shrink",
"=",
"True",
")",
":",
"if",
"self",
".",
"tree_structure",
":",
"mutation_techniques",
"=",
"[",
"partial",
"(",
"gp",
".",
"mutInsert",
",",
"pset",
"=",
"self",
".",
"_pset",
")",
",",
"partial",
"(",
"mutNodeReplacement",
",",
"pset",
"=",
"self",
".",
"_pset",
")",
"]",
"# We can't shrink pipelines with only one primitive, so we only add it if we find more primitives.",
"number_of_primitives",
"=",
"sum",
"(",
"[",
"isinstance",
"(",
"node",
",",
"deap",
".",
"gp",
".",
"Primitive",
")",
"for",
"node",
"in",
"individual",
"]",
")",
"if",
"number_of_primitives",
">",
"1",
"and",
"allow_shrink",
":",
"mutation_techniques",
".",
"append",
"(",
"partial",
"(",
"gp",
".",
"mutShrink",
")",
")",
"else",
":",
"mutation_techniques",
"=",
"[",
"partial",
"(",
"mutNodeReplacement",
",",
"pset",
"=",
"self",
".",
"_pset",
")",
"]",
"mutator",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"mutation_techniques",
")",
"unsuccesful_mutations",
"=",
"0",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"_max_mut_loops",
")",
":",
"# We have to clone the individual because mutator operators work in-place.",
"ind",
"=",
"self",
".",
"_toolbox",
".",
"clone",
"(",
"individual",
")",
"offspring",
",",
"=",
"mutator",
"(",
"ind",
")",
"if",
"str",
"(",
"offspring",
")",
"not",
"in",
"self",
".",
"evaluated_individuals_",
":",
"# Update statistics",
"# crossover_count is kept the same as for the predecessor",
"# mutation count is increased by 1",
"# predecessor is set to the string representation of the individual before mutation",
"# generation is set to 'INVALID' such that we can recognize that it should be updated accordingly",
"offspring",
".",
"statistics",
"[",
"'crossover_count'",
"]",
"=",
"individual",
".",
"statistics",
"[",
"'crossover_count'",
"]",
"offspring",
".",
"statistics",
"[",
"'mutation_count'",
"]",
"=",
"individual",
".",
"statistics",
"[",
"'mutation_count'",
"]",
"+",
"1",
"offspring",
".",
"statistics",
"[",
"'predecessor'",
"]",
"=",
"(",
"str",
"(",
"individual",
")",
",",
")",
"offspring",
".",
"statistics",
"[",
"'generation'",
"]",
"=",
"'INVALID'",
"break",
"else",
":",
"unsuccesful_mutations",
"+=",
"1",
"# Sometimes you have pipelines for which every shrunk version has already been explored too.",
"# To still mutate the individual, one of the two other mutators should be applied instead.",
"if",
"(",
"(",
"unsuccesful_mutations",
"==",
"50",
")",
"and",
"(",
"type",
"(",
"mutator",
")",
"is",
"partial",
"and",
"mutator",
".",
"func",
"is",
"gp",
".",
"mutShrink",
")",
")",
":",
"offspring",
",",
"=",
"self",
".",
"_random_mutation_operator",
"(",
"individual",
",",
"allow_shrink",
"=",
"False",
")",
"return",
"offspring",
","
] | 49.316667 | 28.25 |
def is_cell_empty(self, cell):
"""Checks if the cell is empty."""
if cell is None:
return True
elif self._is_cell_empty:
return self._is_cell_empty(cell)
else:
return cell is None | [
"def",
"is_cell_empty",
"(",
"self",
",",
"cell",
")",
":",
"if",
"cell",
"is",
"None",
":",
"return",
"True",
"elif",
"self",
".",
"_is_cell_empty",
":",
"return",
"self",
".",
"_is_cell_empty",
"(",
"cell",
")",
"else",
":",
"return",
"cell",
"is",
"None"
] | 25.75 | 14 |
def create_filebase_name(self, group_info, extension='gz', file_name=None):
"""
Return tuple of resolved destination folder name and file name
"""
dirname = self.filebase.formatted_dirname(groups=group_info)
if not file_name:
file_name = self.filebase.prefix_template + '.' + extension
return dirname, file_name | [
"def",
"create_filebase_name",
"(",
"self",
",",
"group_info",
",",
"extension",
"=",
"'gz'",
",",
"file_name",
"=",
"None",
")",
":",
"dirname",
"=",
"self",
".",
"filebase",
".",
"formatted_dirname",
"(",
"groups",
"=",
"group_info",
")",
"if",
"not",
"file_name",
":",
"file_name",
"=",
"self",
".",
"filebase",
".",
"prefix_template",
"+",
"'.'",
"+",
"extension",
"return",
"dirname",
",",
"file_name"
] | 45.5 | 18.25 |
def start_update(self, draw=None, queues=None, update_shared=True):
"""
Conduct the registered plot updates
This method starts the updates from what has been registered by the
:meth:`update` method. You can call this method if you did not set the
`auto_update` parameter to True when calling the :meth:`update` method
and when the :attr:`no_auto_update` attribute is True.
Parameters
----------
%(InteractiveBase.start_update.parameters)s
Returns
-------
%(InteractiveBase.start_update.returns)s
See Also
--------
:attr:`no_auto_update`, update"""
def update_the_others():
for fmto in fmtos:
for other_fmto in fmto.shared:
if not other_fmto.plotter._updating:
other_fmto.plotter._register_update(
force=[other_fmto.key])
for fmto in fmtos:
for other_fmto in fmto.shared:
if not other_fmto.plotter._updating:
other_draw = other_fmto.plotter.start_update(
draw=False, update_shared=False)
if other_draw:
self._figs2draw.add(
other_fmto.plotter.ax.get_figure())
if self.disabled:
return False
if queues is not None:
queues[0].get()
self.logger.debug("Starting update of %r",
self._registered_updates.keys())
# update the formatoptions
self._save_state()
try:
# get the formatoptions. We sort them here by key to make sure that
# the order always stays the same (easier for debugging)
fmtos = sorted(self._set_and_filter(), key=lambda fmto: fmto.key)
except Exception:
# restore last (working) state
last_state = self._old_fmt.pop(-1)
with self.no_validation:
for key in self:
self[key] = last_state.get(key, getattr(self, key).default)
if queues is not None:
queues[0].task_done()
self._release_all(queue=None if queues is None else queues[1])
# raise the error
raise
for fmto in fmtos:
for fmto2 in fmto.shared:
fmto2.plotter._to_update[fmto2] = self
if queues is not None:
self._updating = True
queues[0].task_done()
# wait for the other tasks to finish
queues[0].join()
queues[1].get()
fmtos.extend([fmto for fmto in self._insert_additionals(list(
self._to_update)) if fmto not in fmtos])
self._to_update.clear()
fmto_groups = self._grouped_fmtos(self._sorted_by_priority(fmtos[:]))
# if any formatoption requires a clearing of the axes is updated,
# we reinitialize the plot
if self.cleared:
self.reinit(draw=draw)
update_the_others()
self._release_all(queue=None if queues is None else queues[1])
return True
# otherwise we update it
arr_draw = False
try:
for priority, grouper in fmto_groups:
arr_draw = True
self._plot_by_priority(priority, grouper)
update_the_others()
except Exception:
raise
finally:
# make sure that all locks are released
self._release_all(finish=True,
queue=None if queues is None else queues[1])
if draw is None:
draw = rcParams['auto_draw']
if draw and arr_draw:
self.draw()
if rcParams['auto_show']:
self.show()
self.replot = False
return arr_draw | [
"def",
"start_update",
"(",
"self",
",",
"draw",
"=",
"None",
",",
"queues",
"=",
"None",
",",
"update_shared",
"=",
"True",
")",
":",
"def",
"update_the_others",
"(",
")",
":",
"for",
"fmto",
"in",
"fmtos",
":",
"for",
"other_fmto",
"in",
"fmto",
".",
"shared",
":",
"if",
"not",
"other_fmto",
".",
"plotter",
".",
"_updating",
":",
"other_fmto",
".",
"plotter",
".",
"_register_update",
"(",
"force",
"=",
"[",
"other_fmto",
".",
"key",
"]",
")",
"for",
"fmto",
"in",
"fmtos",
":",
"for",
"other_fmto",
"in",
"fmto",
".",
"shared",
":",
"if",
"not",
"other_fmto",
".",
"plotter",
".",
"_updating",
":",
"other_draw",
"=",
"other_fmto",
".",
"plotter",
".",
"start_update",
"(",
"draw",
"=",
"False",
",",
"update_shared",
"=",
"False",
")",
"if",
"other_draw",
":",
"self",
".",
"_figs2draw",
".",
"add",
"(",
"other_fmto",
".",
"plotter",
".",
"ax",
".",
"get_figure",
"(",
")",
")",
"if",
"self",
".",
"disabled",
":",
"return",
"False",
"if",
"queues",
"is",
"not",
"None",
":",
"queues",
"[",
"0",
"]",
".",
"get",
"(",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Starting update of %r\"",
",",
"self",
".",
"_registered_updates",
".",
"keys",
"(",
")",
")",
"# update the formatoptions",
"self",
".",
"_save_state",
"(",
")",
"try",
":",
"# get the formatoptions. We sort them here by key to make sure that",
"# the order always stays the same (easier for debugging)",
"fmtos",
"=",
"sorted",
"(",
"self",
".",
"_set_and_filter",
"(",
")",
",",
"key",
"=",
"lambda",
"fmto",
":",
"fmto",
".",
"key",
")",
"except",
"Exception",
":",
"# restore last (working) state",
"last_state",
"=",
"self",
".",
"_old_fmt",
".",
"pop",
"(",
"-",
"1",
")",
"with",
"self",
".",
"no_validation",
":",
"for",
"key",
"in",
"self",
":",
"self",
"[",
"key",
"]",
"=",
"last_state",
".",
"get",
"(",
"key",
",",
"getattr",
"(",
"self",
",",
"key",
")",
".",
"default",
")",
"if",
"queues",
"is",
"not",
"None",
":",
"queues",
"[",
"0",
"]",
".",
"task_done",
"(",
")",
"self",
".",
"_release_all",
"(",
"queue",
"=",
"None",
"if",
"queues",
"is",
"None",
"else",
"queues",
"[",
"1",
"]",
")",
"# raise the error",
"raise",
"for",
"fmto",
"in",
"fmtos",
":",
"for",
"fmto2",
"in",
"fmto",
".",
"shared",
":",
"fmto2",
".",
"plotter",
".",
"_to_update",
"[",
"fmto2",
"]",
"=",
"self",
"if",
"queues",
"is",
"not",
"None",
":",
"self",
".",
"_updating",
"=",
"True",
"queues",
"[",
"0",
"]",
".",
"task_done",
"(",
")",
"# wait for the other tasks to finish",
"queues",
"[",
"0",
"]",
".",
"join",
"(",
")",
"queues",
"[",
"1",
"]",
".",
"get",
"(",
")",
"fmtos",
".",
"extend",
"(",
"[",
"fmto",
"for",
"fmto",
"in",
"self",
".",
"_insert_additionals",
"(",
"list",
"(",
"self",
".",
"_to_update",
")",
")",
"if",
"fmto",
"not",
"in",
"fmtos",
"]",
")",
"self",
".",
"_to_update",
".",
"clear",
"(",
")",
"fmto_groups",
"=",
"self",
".",
"_grouped_fmtos",
"(",
"self",
".",
"_sorted_by_priority",
"(",
"fmtos",
"[",
":",
"]",
")",
")",
"# if any formatoption requires a clearing of the axes is updated,",
"# we reinitialize the plot",
"if",
"self",
".",
"cleared",
":",
"self",
".",
"reinit",
"(",
"draw",
"=",
"draw",
")",
"update_the_others",
"(",
")",
"self",
".",
"_release_all",
"(",
"queue",
"=",
"None",
"if",
"queues",
"is",
"None",
"else",
"queues",
"[",
"1",
"]",
")",
"return",
"True",
"# otherwise we update it",
"arr_draw",
"=",
"False",
"try",
":",
"for",
"priority",
",",
"grouper",
"in",
"fmto_groups",
":",
"arr_draw",
"=",
"True",
"self",
".",
"_plot_by_priority",
"(",
"priority",
",",
"grouper",
")",
"update_the_others",
"(",
")",
"except",
"Exception",
":",
"raise",
"finally",
":",
"# make sure that all locks are released",
"self",
".",
"_release_all",
"(",
"finish",
"=",
"True",
",",
"queue",
"=",
"None",
"if",
"queues",
"is",
"None",
"else",
"queues",
"[",
"1",
"]",
")",
"if",
"draw",
"is",
"None",
":",
"draw",
"=",
"rcParams",
"[",
"'auto_draw'",
"]",
"if",
"draw",
"and",
"arr_draw",
":",
"self",
".",
"draw",
"(",
")",
"if",
"rcParams",
"[",
"'auto_show'",
"]",
":",
"self",
".",
"show",
"(",
")",
"self",
".",
"replot",
"=",
"False",
"return",
"arr_draw"
] | 38.31 | 17.29 |
def generate(self, blueprint, context, interactive=True):
"""Generate a blueprint within this application."""
if not isinstance(blueprint, Blueprint):
bp = self.blueprints.get(blueprint)
if not bp:
raise ValueError('%s is not a valid blueprint' % blueprint)
blueprint = bp
self.stdout.write(
style.format_command(
'Generating',
blueprint.full_name
)
)
generator = Generator(
self,
blueprint,
context,
interactive=interactive
)
result = generator.generate()
if blueprint.name == 'init':
# try re-setting the name
self.refresh()
return result | [
"def",
"generate",
"(",
"self",
",",
"blueprint",
",",
"context",
",",
"interactive",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"blueprint",
",",
"Blueprint",
")",
":",
"bp",
"=",
"self",
".",
"blueprints",
".",
"get",
"(",
"blueprint",
")",
"if",
"not",
"bp",
":",
"raise",
"ValueError",
"(",
"'%s is not a valid blueprint'",
"%",
"blueprint",
")",
"blueprint",
"=",
"bp",
"self",
".",
"stdout",
".",
"write",
"(",
"style",
".",
"format_command",
"(",
"'Generating'",
",",
"blueprint",
".",
"full_name",
")",
")",
"generator",
"=",
"Generator",
"(",
"self",
",",
"blueprint",
",",
"context",
",",
"interactive",
"=",
"interactive",
")",
"result",
"=",
"generator",
".",
"generate",
"(",
")",
"if",
"blueprint",
".",
"name",
"==",
"'init'",
":",
"# try re-setting the name",
"self",
".",
"refresh",
"(",
")",
"return",
"result"
] | 30.76 | 15.36 |
def create_storage_policy(policy_name, policy_dict, service_instance=None):
'''
Creates a storage policy.
Supported capability types: scalar, set, range.
policy_name
Name of the policy to create.
The value of the argument will override any existing name in
``policy_dict``.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.create_storage_policy policy_name='policy name'
policy_dict="$policy_dict"
'''
log.trace('create storage policy \'%s\', dict = %s', policy_name, policy_dict)
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policy_create_spec = pbm.profile.CapabilityBasedProfileCreateSpec()
# Hardcode the storage profile resource type
policy_create_spec.resourceType = pbm.profile.ResourceType(
resourceType=pbm.profile.ResourceTypeEnum.STORAGE)
# Set name argument
policy_dict['name'] = policy_name
log.trace('Setting policy values in policy_update_spec')
_apply_policy_config(policy_create_spec, policy_dict)
salt.utils.pbm.create_storage_policy(profile_manager, policy_create_spec)
return {'create_storage_policy': True} | [
"def",
"create_storage_policy",
"(",
"policy_name",
",",
"policy_dict",
",",
"service_instance",
"=",
"None",
")",
":",
"log",
".",
"trace",
"(",
"'create storage policy \\'%s\\', dict = %s'",
",",
"policy_name",
",",
"policy_dict",
")",
"profile_manager",
"=",
"salt",
".",
"utils",
".",
"pbm",
".",
"get_profile_manager",
"(",
"service_instance",
")",
"policy_create_spec",
"=",
"pbm",
".",
"profile",
".",
"CapabilityBasedProfileCreateSpec",
"(",
")",
"# Hardcode the storage profile resource type",
"policy_create_spec",
".",
"resourceType",
"=",
"pbm",
".",
"profile",
".",
"ResourceType",
"(",
"resourceType",
"=",
"pbm",
".",
"profile",
".",
"ResourceTypeEnum",
".",
"STORAGE",
")",
"# Set name argument",
"policy_dict",
"[",
"'name'",
"]",
"=",
"policy_name",
"log",
".",
"trace",
"(",
"'Setting policy values in policy_update_spec'",
")",
"_apply_policy_config",
"(",
"policy_create_spec",
",",
"policy_dict",
")",
"salt",
".",
"utils",
".",
"pbm",
".",
"create_storage_policy",
"(",
"profile_manager",
",",
"policy_create_spec",
")",
"return",
"{",
"'create_storage_policy'",
":",
"True",
"}"
] | 37.805556 | 23.583333 |
def get_as_dataframe(worksheet,
evaluate_formulas=False,
**options):
"""
Returns the worksheet contents as a DataFrame.
:param worksheet: the worksheet.
:param evaluate_formulas: if True, get the value of a cell after
formula evaluation; otherwise get the formula itself if present.
Defaults to False.
:param \*\*options: all the options for pandas.io.parsers.TextParser,
according to the version of pandas that is installed.
(Note: TextParser supports only the default 'python' parser engine,
not the C engine.)
:returns: pandas.DataFrame
"""
all_values = _get_all_values(worksheet, evaluate_formulas)
return TextParser(all_values, **options).read() | [
"def",
"get_as_dataframe",
"(",
"worksheet",
",",
"evaluate_formulas",
"=",
"False",
",",
"*",
"*",
"options",
")",
":",
"all_values",
"=",
"_get_all_values",
"(",
"worksheet",
",",
"evaluate_formulas",
")",
"return",
"TextParser",
"(",
"all_values",
",",
"*",
"*",
"options",
")",
".",
"read",
"(",
")"
] | 42.888889 | 16.666667 |
def get_tile(self, codepoint: int) -> np.array:
"""Return a copy of a tile for the given codepoint.
If the tile does not exist yet then a blank array will be returned.
The tile will have a shape of (height, width, rgba) and a dtype of
uint8. Note that most grey-scale tiles will only use the alpha
channel and will usually have a solid white color channel.
"""
tile = np.zeros(self.tile_shape + (4,), dtype=np.uint8)
lib.TCOD_tileset_get_tile_(
self._tileset_p,
codepoint,
ffi.cast("struct TCOD_ColorRGBA*", tile.ctypes.data),
)
return tile | [
"def",
"get_tile",
"(",
"self",
",",
"codepoint",
":",
"int",
")",
"->",
"np",
".",
"array",
":",
"tile",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"tile_shape",
"+",
"(",
"4",
",",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"lib",
".",
"TCOD_tileset_get_tile_",
"(",
"self",
".",
"_tileset_p",
",",
"codepoint",
",",
"ffi",
".",
"cast",
"(",
"\"struct TCOD_ColorRGBA*\"",
",",
"tile",
".",
"ctypes",
".",
"data",
")",
",",
")",
"return",
"tile"
] | 40.25 | 21.75 |
def verify_any(df, check, *args, **kwargs):
"""
Verify that any of the entries in ``check(df, *args, **kwargs)``
is true
"""
result = check(df, *args, **kwargs)
try:
assert np.any(result)
except AssertionError as e:
msg = '{} not true for any'.format(check.__name__)
e.args = (msg, df)
raise
return df | [
"def",
"verify_any",
"(",
"df",
",",
"check",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"check",
"(",
"df",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"assert",
"np",
".",
"any",
"(",
"result",
")",
"except",
"AssertionError",
"as",
"e",
":",
"msg",
"=",
"'{} not true for any'",
".",
"format",
"(",
"check",
".",
"__name__",
")",
"e",
".",
"args",
"=",
"(",
"msg",
",",
"df",
")",
"raise",
"return",
"df"
] | 27.153846 | 15.307692 |
def extract_flask_settings(self):
"""
Copies SCOUT_* settings in the app into Scout's config lookup
"""
configs = {}
configs["application_root"] = self.app.instance_path
for name in current_app.config:
if name.startswith("SCOUT_"):
value = current_app.config[name]
clean_name = name.replace("SCOUT_", "").lower()
configs[clean_name] = value
ScoutConfig.set(**configs) | [
"def",
"extract_flask_settings",
"(",
"self",
")",
":",
"configs",
"=",
"{",
"}",
"configs",
"[",
"\"application_root\"",
"]",
"=",
"self",
".",
"app",
".",
"instance_path",
"for",
"name",
"in",
"current_app",
".",
"config",
":",
"if",
"name",
".",
"startswith",
"(",
"\"SCOUT_\"",
")",
":",
"value",
"=",
"current_app",
".",
"config",
"[",
"name",
"]",
"clean_name",
"=",
"name",
".",
"replace",
"(",
"\"SCOUT_\"",
",",
"\"\"",
")",
".",
"lower",
"(",
")",
"configs",
"[",
"clean_name",
"]",
"=",
"value",
"ScoutConfig",
".",
"set",
"(",
"*",
"*",
"configs",
")"
] | 39.333333 | 9.833333 |
def super_kls(self):
"""
Determine what kls this group inherits from
If default kls should be used, then None is returned
"""
if not self.kls and self.parent and self.parent.name:
return self.parent.kls_name
return self.kls | [
"def",
"super_kls",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"kls",
"and",
"self",
".",
"parent",
"and",
"self",
".",
"parent",
".",
"name",
":",
"return",
"self",
".",
"parent",
".",
"kls_name",
"return",
"self",
".",
"kls"
] | 35.5 | 12.25 |
def __init(self):
""" initializes the service """
params = {
"f" : "json",
}
json_dict = self._get(self._url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json = json.dumps(json_dict)
self._json_dict = json_dict
self.loadAttributes(json_dict=json_dict) | [
"def",
"__init",
"(",
"self",
")",
":",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
",",
"}",
"json_dict",
"=",
"self",
".",
"_get",
"(",
"self",
".",
"_url",
",",
"params",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
")",
"self",
".",
"_json",
"=",
"json",
".",
"dumps",
"(",
"json_dict",
")",
"self",
".",
"_json_dict",
"=",
"json_dict",
"self",
".",
"loadAttributes",
"(",
"json_dict",
"=",
"json_dict",
")"
] | 39.333333 | 15.416667 |
def resolve_resource_id_refs(self, input_dict, supported_resource_id_refs):
"""
Resolve resource references within a GetAtt dict.
Example:
{ "Fn::GetAtt": ["LogicalId", "Arn"] } => {"Fn::GetAtt": ["ResolvedLogicalId", "Arn"]}
Theoretically, only the first element of the array can contain reference to SAM resources. The second element
is name of an attribute (like Arn) of the resource.
However tools like AWS CLI apply the assumption that first element of the array is a LogicalId and cannot
contain a 'dot'. So they break at the first dot to convert YAML tag to JSON map like this:
`!GetAtt LogicalId.Arn` => {"Fn::GetAtt": [ "LogicalId", "Arn" ] }
Therefore to resolve the reference, we join the array into a string, break it back up to check if it contains
a known reference, and resolve it if we can.
:param input_dict: Dictionary to be resolved
:param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones.
:return: Resolved dictionary
"""
if not self.can_handle(input_dict):
return input_dict
key = self.intrinsic_name
value = input_dict[key]
# Value must be an array with *at least* two elements. If not, this is invalid GetAtt syntax. We just pass along
# the input to CFN for it to do the "official" validation.
if not isinstance(value, list) or len(value) < 2:
return input_dict
value_str = self._resource_ref_separator.join(value)
splits = value_str.split(self._resource_ref_separator)
logical_id = splits[0]
remaining = splits[1:] # if any
resolved_value = supported_resource_id_refs.get(logical_id)
return self._get_resolved_dictionary(input_dict, key, resolved_value, remaining) | [
"def",
"resolve_resource_id_refs",
"(",
"self",
",",
"input_dict",
",",
"supported_resource_id_refs",
")",
":",
"if",
"not",
"self",
".",
"can_handle",
"(",
"input_dict",
")",
":",
"return",
"input_dict",
"key",
"=",
"self",
".",
"intrinsic_name",
"value",
"=",
"input_dict",
"[",
"key",
"]",
"# Value must be an array with *at least* two elements. If not, this is invalid GetAtt syntax. We just pass along",
"# the input to CFN for it to do the \"official\" validation.",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
"or",
"len",
"(",
"value",
")",
"<",
"2",
":",
"return",
"input_dict",
"value_str",
"=",
"self",
".",
"_resource_ref_separator",
".",
"join",
"(",
"value",
")",
"splits",
"=",
"value_str",
".",
"split",
"(",
"self",
".",
"_resource_ref_separator",
")",
"logical_id",
"=",
"splits",
"[",
"0",
"]",
"remaining",
"=",
"splits",
"[",
"1",
":",
"]",
"# if any",
"resolved_value",
"=",
"supported_resource_id_refs",
".",
"get",
"(",
"logical_id",
")",
"return",
"self",
".",
"_get_resolved_dictionary",
"(",
"input_dict",
",",
"key",
",",
"resolved_value",
",",
"remaining",
")"
] | 43.952381 | 31.809524 |
def _map_order_to_ticks(start, end, order, reverse=False):
"""Map elements from given `order` array to bins ranging from `start`
to `end`.
"""
size = len(order)
bounds = np.linspace(start, end, size + 1)
if reverse:
bounds = bounds[::-1]
mapping = list(zip(bounds[:-1]%(np.pi*2), order))
return mapping | [
"def",
"_map_order_to_ticks",
"(",
"start",
",",
"end",
",",
"order",
",",
"reverse",
"=",
"False",
")",
":",
"size",
"=",
"len",
"(",
"order",
")",
"bounds",
"=",
"np",
".",
"linspace",
"(",
"start",
",",
"end",
",",
"size",
"+",
"1",
")",
"if",
"reverse",
":",
"bounds",
"=",
"bounds",
"[",
":",
":",
"-",
"1",
"]",
"mapping",
"=",
"list",
"(",
"zip",
"(",
"bounds",
"[",
":",
"-",
"1",
"]",
"%",
"(",
"np",
".",
"pi",
"*",
"2",
")",
",",
"order",
")",
")",
"return",
"mapping"
] | 36.9 | 12.9 |
def validate_get_dbs(connection):
"""
validates the connection object is capable of read access to rethink
should be at least one test database by default
:param connection: <rethinkdb.net.DefaultConnection>
:return: <set> list of databases
:raises: ReqlDriverError AssertionError
"""
remote_dbs = set(rethinkdb.db_list().run(connection))
assert remote_dbs
return remote_dbs | [
"def",
"validate_get_dbs",
"(",
"connection",
")",
":",
"remote_dbs",
"=",
"set",
"(",
"rethinkdb",
".",
"db_list",
"(",
")",
".",
"run",
"(",
"connection",
")",
")",
"assert",
"remote_dbs",
"return",
"remote_dbs"
] | 31.076923 | 16 |
def authenticate_credentials(self, payload):
"""
Returns an active user that matches the payload's user id and email.
"""
User = get_user_model()
username = jwt_get_username_from_payload(payload)
if not username:
msg = _('Invalid payload.')
raise exceptions.AuthenticationFailed(msg)
try:
user = User.objects.get_by_natural_key(username)
except User.DoesNotExist:
msg = _('Invalid signature.')
raise exceptions.AuthenticationFailed(msg)
if not user.is_active:
msg = _('User account is disabled.')
raise exceptions.AuthenticationFailed(msg)
return user | [
"def",
"authenticate_credentials",
"(",
"self",
",",
"payload",
")",
":",
"User",
"=",
"get_user_model",
"(",
")",
"username",
"=",
"jwt_get_username_from_payload",
"(",
"payload",
")",
"if",
"not",
"username",
":",
"msg",
"=",
"_",
"(",
"'Invalid payload.'",
")",
"raise",
"exceptions",
".",
"AuthenticationFailed",
"(",
"msg",
")",
"try",
":",
"user",
"=",
"User",
".",
"objects",
".",
"get_by_natural_key",
"(",
"username",
")",
"except",
"User",
".",
"DoesNotExist",
":",
"msg",
"=",
"_",
"(",
"'Invalid signature.'",
")",
"raise",
"exceptions",
".",
"AuthenticationFailed",
"(",
"msg",
")",
"if",
"not",
"user",
".",
"is_active",
":",
"msg",
"=",
"_",
"(",
"'User account is disabled.'",
")",
"raise",
"exceptions",
".",
"AuthenticationFailed",
"(",
"msg",
")",
"return",
"user"
] | 31.727273 | 17.272727 |
def spawn(self, parameters=None, arguments=None, stderr=None, timeout=None, short_option_prefix="-", long_option_prefix="--"):
"""
Spawn the process defined in `cmd`
parameters is converted to options the short and long option prefixes
if a list is given as the value, the parameter is repeated with each
value
If timeout is set the spawn will block until the process returns or
the timeout expires.
:param parameters: optional parameters
:param arguments: positional arguments
:param stderr: where to redirect stderr to
:param timeout: timeout for short lived process
:param long_option_prefix: option prefix, default -
:param short_option_prefix: long option prefix, default --
:return: spawned process
"""
stderr = stderr or self.stderr
cmd = self.bake(self._check_cmd(), parameters, arguments, short_option_prefix, long_option_prefix)
log.debug("Spawning command: {0}", subprocess.list2cmdline(cmd))
try:
process = subprocess.Popen(cmd, stderr=stderr, stdout=subprocess.PIPE)
except (OSError, IOError) as err:
raise StreamError("Failed to start process: {0} ({1})".format(self._check_cmd(), str(err)))
if timeout:
elapsed = 0
while elapsed < timeout and not process.poll():
time.sleep(0.25)
elapsed += 0.25
# kill after the timeout has expired and the process still hasn't ended
if not process.poll():
try:
log.debug("Process timeout expired ({0}s), killing process".format(timeout))
process.kill()
except Exception:
pass
process.wait()
return process | [
"def",
"spawn",
"(",
"self",
",",
"parameters",
"=",
"None",
",",
"arguments",
"=",
"None",
",",
"stderr",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"short_option_prefix",
"=",
"\"-\"",
",",
"long_option_prefix",
"=",
"\"--\"",
")",
":",
"stderr",
"=",
"stderr",
"or",
"self",
".",
"stderr",
"cmd",
"=",
"self",
".",
"bake",
"(",
"self",
".",
"_check_cmd",
"(",
")",
",",
"parameters",
",",
"arguments",
",",
"short_option_prefix",
",",
"long_option_prefix",
")",
"log",
".",
"debug",
"(",
"\"Spawning command: {0}\"",
",",
"subprocess",
".",
"list2cmdline",
"(",
"cmd",
")",
")",
"try",
":",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stderr",
"=",
"stderr",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"except",
"(",
"OSError",
",",
"IOError",
")",
"as",
"err",
":",
"raise",
"StreamError",
"(",
"\"Failed to start process: {0} ({1})\"",
".",
"format",
"(",
"self",
".",
"_check_cmd",
"(",
")",
",",
"str",
"(",
"err",
")",
")",
")",
"if",
"timeout",
":",
"elapsed",
"=",
"0",
"while",
"elapsed",
"<",
"timeout",
"and",
"not",
"process",
".",
"poll",
"(",
")",
":",
"time",
".",
"sleep",
"(",
"0.25",
")",
"elapsed",
"+=",
"0.25",
"# kill after the timeout has expired and the process still hasn't ended",
"if",
"not",
"process",
".",
"poll",
"(",
")",
":",
"try",
":",
"log",
".",
"debug",
"(",
"\"Process timeout expired ({0}s), killing process\"",
".",
"format",
"(",
"timeout",
")",
")",
"process",
".",
"kill",
"(",
")",
"except",
"Exception",
":",
"pass",
"process",
".",
"wait",
"(",
")",
"return",
"process"
] | 40.066667 | 25.311111 |
def sim(self, src, tar, qval=2):
r"""Return the cosine similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
Returns
-------
float
Cosine similarity
Examples
--------
>>> cmp = Cosine()
>>> cmp.sim('cat', 'hat')
0.5
>>> cmp.sim('Niall', 'Neil')
0.3651483716701107
>>> cmp.sim('aluminum', 'Catalan')
0.11785113019775793
>>> cmp.sim('ATCG', 'TAGC')
0.0
"""
if src == tar:
return 1.0
if not src or not tar:
return 0.0
q_src, q_tar = self._get_qgrams(src, tar, qval)
q_src_mag = sum(q_src.values())
q_tar_mag = sum(q_tar.values())
q_intersection_mag = sum((q_src & q_tar).values())
return q_intersection_mag / sqrt(q_src_mag * q_tar_mag) | [
"def",
"sim",
"(",
"self",
",",
"src",
",",
"tar",
",",
"qval",
"=",
"2",
")",
":",
"if",
"src",
"==",
"tar",
":",
"return",
"1.0",
"if",
"not",
"src",
"or",
"not",
"tar",
":",
"return",
"0.0",
"q_src",
",",
"q_tar",
"=",
"self",
".",
"_get_qgrams",
"(",
"src",
",",
"tar",
",",
"qval",
")",
"q_src_mag",
"=",
"sum",
"(",
"q_src",
".",
"values",
"(",
")",
")",
"q_tar_mag",
"=",
"sum",
"(",
"q_tar",
".",
"values",
"(",
")",
")",
"q_intersection_mag",
"=",
"sum",
"(",
"(",
"q_src",
"&",
"q_tar",
")",
".",
"values",
"(",
")",
")",
"return",
"q_intersection_mag",
"/",
"sqrt",
"(",
"q_src_mag",
"*",
"q_tar_mag",
")"
] | 26.512195 | 19.853659 |
def bind(self, extension: Extension) -> 'DictMentor':
"""
Add any predefined or custom extension.
Args:
extension: Extension to add to the processor.
Returns:
The DictMentor itself for chaining.
"""
if not Extension.is_valid_extension(extension):
raise ValueError("Cannot bind extension due to missing interface requirements")
self._extensions.append(extension)
return self | [
"def",
"bind",
"(",
"self",
",",
"extension",
":",
"Extension",
")",
"->",
"'DictMentor'",
":",
"if",
"not",
"Extension",
".",
"is_valid_extension",
"(",
"extension",
")",
":",
"raise",
"ValueError",
"(",
"\"Cannot bind extension due to missing interface requirements\"",
")",
"self",
".",
"_extensions",
".",
"append",
"(",
"extension",
")",
"return",
"self"
] | 30.8 | 20.266667 |
def _add_section_default(self, section, parameters):
''' Add the given section with the given paramters to the config. The
parameters must be a dictionary with all the keys to add. Each key
must be specified as an other dictionary with the following
parameters: default, description, type
@param section: The section to add
@param parameters: The paramters dictionary
'''
section = section.lower()
if not self.has_section(section):
self.add_section(section)
if not section in self.config_description:
self.config_description[section] = {}
for key, value in parameters.items():
key = key.lower()
if not ('default' in value and 'type' in value and
'description' in value):
raise AppConfigValueException('For the given key not all '
'required values are defined.')
if not self.has_option(section, key):
self.set(section, key, value['default'])
vtype = _get_type(value['type'])
self.config_description[section][key] = (value['description'],
vtype, value['default']) | [
"def",
"_add_section_default",
"(",
"self",
",",
"section",
",",
"parameters",
")",
":",
"section",
"=",
"section",
".",
"lower",
"(",
")",
"if",
"not",
"self",
".",
"has_section",
"(",
"section",
")",
":",
"self",
".",
"add_section",
"(",
"section",
")",
"if",
"not",
"section",
"in",
"self",
".",
"config_description",
":",
"self",
".",
"config_description",
"[",
"section",
"]",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"parameters",
".",
"items",
"(",
")",
":",
"key",
"=",
"key",
".",
"lower",
"(",
")",
"if",
"not",
"(",
"'default'",
"in",
"value",
"and",
"'type'",
"in",
"value",
"and",
"'description'",
"in",
"value",
")",
":",
"raise",
"AppConfigValueException",
"(",
"'For the given key not all '",
"'required values are defined.'",
")",
"if",
"not",
"self",
".",
"has_option",
"(",
"section",
",",
"key",
")",
":",
"self",
".",
"set",
"(",
"section",
",",
"key",
",",
"value",
"[",
"'default'",
"]",
")",
"vtype",
"=",
"_get_type",
"(",
"value",
"[",
"'type'",
"]",
")",
"self",
".",
"config_description",
"[",
"section",
"]",
"[",
"key",
"]",
"=",
"(",
"value",
"[",
"'description'",
"]",
",",
"vtype",
",",
"value",
"[",
"'default'",
"]",
")"
] | 45.407407 | 18 |
def accessible_organisms(user, orgs):
"""Get the list of organisms accessible to a user, filtered by `orgs`"""
permission_map = {
x['organism']: x['permissions']
for x in user.organismPermissions
if 'WRITE' in x['permissions'] or
'READ' in x['permissions'] or
'ADMINISTRATE' in x['permissions'] or
user.role == 'ADMIN'
}
if 'error' in orgs:
raise Exception("Error received from Apollo server: \"%s\"" % orgs['error'])
return [
(org['commonName'], org['id'], False)
for org in sorted(orgs, key=lambda x: x['commonName'])
if org['commonName'] in permission_map
] | [
"def",
"accessible_organisms",
"(",
"user",
",",
"orgs",
")",
":",
"permission_map",
"=",
"{",
"x",
"[",
"'organism'",
"]",
":",
"x",
"[",
"'permissions'",
"]",
"for",
"x",
"in",
"user",
".",
"organismPermissions",
"if",
"'WRITE'",
"in",
"x",
"[",
"'permissions'",
"]",
"or",
"'READ'",
"in",
"x",
"[",
"'permissions'",
"]",
"or",
"'ADMINISTRATE'",
"in",
"x",
"[",
"'permissions'",
"]",
"or",
"user",
".",
"role",
"==",
"'ADMIN'",
"}",
"if",
"'error'",
"in",
"orgs",
":",
"raise",
"Exception",
"(",
"\"Error received from Apollo server: \\\"%s\\\"\"",
"%",
"orgs",
"[",
"'error'",
"]",
")",
"return",
"[",
"(",
"org",
"[",
"'commonName'",
"]",
",",
"org",
"[",
"'id'",
"]",
",",
"False",
")",
"for",
"org",
"in",
"sorted",
"(",
"orgs",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"'commonName'",
"]",
")",
"if",
"org",
"[",
"'commonName'",
"]",
"in",
"permission_map",
"]"
] | 34.105263 | 16.631579 |
def gettrace(self, burn=0, thin=1, chain=-1, slicing=None):
"""Return the trace (last by default).
Input:
- burn (int): The number of transient steps to skip.
- thin (int): Keep one in thin.
- chain (int): The index of the chain to fetch. If None, return all
chains. By default, the last chain is returned.
- slicing: A slice, overriding burn and thin assignement.
"""
# warnings.warn('Use Sampler.trace method instead.',
# DeprecationWarning)
if not slicing:
slicing = slice(burn, None, thin)
# If chain is None, get the data from all chains.
if chain is None:
self.db.cur.execute('SELECT * FROM [%s]' % self.name)
trace = self.db.cur.fetchall()
else:
# Deal with negative chains (starting from the end)
if chain < 0:
chain = range(self.db.chains)[chain]
self.db.cur.execute(
'SELECT * FROM [%s] WHERE trace=%s' %
(self.name, chain))
trace = self.db.cur.fetchall()
trace = np.array(trace)[:, 2:]
if len(self._shape) > 1:
trace = trace.reshape(-1, *self._shape)
return squeeze(trace[slicing]) | [
"def",
"gettrace",
"(",
"self",
",",
"burn",
"=",
"0",
",",
"thin",
"=",
"1",
",",
"chain",
"=",
"-",
"1",
",",
"slicing",
"=",
"None",
")",
":",
"# warnings.warn('Use Sampler.trace method instead.',",
"# DeprecationWarning)",
"if",
"not",
"slicing",
":",
"slicing",
"=",
"slice",
"(",
"burn",
",",
"None",
",",
"thin",
")",
"# If chain is None, get the data from all chains.",
"if",
"chain",
"is",
"None",
":",
"self",
".",
"db",
".",
"cur",
".",
"execute",
"(",
"'SELECT * FROM [%s]'",
"%",
"self",
".",
"name",
")",
"trace",
"=",
"self",
".",
"db",
".",
"cur",
".",
"fetchall",
"(",
")",
"else",
":",
"# Deal with negative chains (starting from the end)",
"if",
"chain",
"<",
"0",
":",
"chain",
"=",
"range",
"(",
"self",
".",
"db",
".",
"chains",
")",
"[",
"chain",
"]",
"self",
".",
"db",
".",
"cur",
".",
"execute",
"(",
"'SELECT * FROM [%s] WHERE trace=%s'",
"%",
"(",
"self",
".",
"name",
",",
"chain",
")",
")",
"trace",
"=",
"self",
".",
"db",
".",
"cur",
".",
"fetchall",
"(",
")",
"trace",
"=",
"np",
".",
"array",
"(",
"trace",
")",
"[",
":",
",",
"2",
":",
"]",
"if",
"len",
"(",
"self",
".",
"_shape",
")",
">",
"1",
":",
"trace",
"=",
"trace",
".",
"reshape",
"(",
"-",
"1",
",",
"*",
"self",
".",
"_shape",
")",
"return",
"squeeze",
"(",
"trace",
"[",
"slicing",
"]",
")"
] | 40.516129 | 15.193548 |
def clean():
""" Remove all of the files contained in workdir.options.path """
if os.path.isdir(options.path):
logger.info('cleaning working directory: ' + options.path)
for filename in os.listdir(options.path):
filepath = os.path.join(options.path, filename)
if os.path.isdir(filepath):
shutil.rmtree(os.path.join(options.path, filename))
else:
os.remove(filepath) | [
"def",
"clean",
"(",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"options",
".",
"path",
")",
":",
"logger",
".",
"info",
"(",
"'cleaning working directory: '",
"+",
"options",
".",
"path",
")",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"options",
".",
"path",
")",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"options",
".",
"path",
",",
"filename",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"filepath",
")",
":",
"shutil",
".",
"rmtree",
"(",
"os",
".",
"path",
".",
"join",
"(",
"options",
".",
"path",
",",
"filename",
")",
")",
"else",
":",
"os",
".",
"remove",
"(",
"filepath",
")"
] | 44.8 | 14.3 |
def get_as_integer_with_default(self, index, default_value):
"""
Converts array element into an integer or returns default value if conversion is not possible.
:param index: an index of element to get.
:param default_value: the default value
:return: integer value ot the element or default value if conversion is not supported.
"""
value = self[index]
return IntegerConverter.to_integer_with_default(value, default_value) | [
"def",
"get_as_integer_with_default",
"(",
"self",
",",
"index",
",",
"default_value",
")",
":",
"value",
"=",
"self",
"[",
"index",
"]",
"return",
"IntegerConverter",
".",
"to_integer_with_default",
"(",
"value",
",",
"default_value",
")"
] | 39.833333 | 26.833333 |
def translate(self):
"""Compile the variable lookup."""
ident = self.ident
expr = ex_rvalue(VARIABLE_PREFIX + ident)
return [expr], set([ident]), set() | [
"def",
"translate",
"(",
"self",
")",
":",
"ident",
"=",
"self",
".",
"ident",
"expr",
"=",
"ex_rvalue",
"(",
"VARIABLE_PREFIX",
"+",
"ident",
")",
"return",
"[",
"expr",
"]",
",",
"set",
"(",
"[",
"ident",
"]",
")",
",",
"set",
"(",
")"
] | 35.8 | 9 |
def main():
"""Install or upgrade setuptools and EasyInstall."""
options = _parse_args()
archive = download_setuptools(**_download_args(options))
return _install(archive, _build_install_args(options)) | [
"def",
"main",
"(",
")",
":",
"options",
"=",
"_parse_args",
"(",
")",
"archive",
"=",
"download_setuptools",
"(",
"*",
"*",
"_download_args",
"(",
"options",
")",
")",
"return",
"_install",
"(",
"archive",
",",
"_build_install_args",
"(",
"options",
")",
")"
] | 42.4 | 16 |
def _ParseKey(self, knowledge_base, registry_key, value_name):
"""Parses a Windows Registry key for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
value_name (str): name of the Windows Registry value.
Raises:
PreProcessFail: if the preprocessing fails.
"""
try:
registry_value = registry_key.GetValueByName(value_name)
except IOError as exception:
raise errors.PreProcessFail((
'Unable to retrieve Windows Registry key: {0:s} value: {1:s} '
'with error: {2!s}').format(
registry_key.path, value_name, exception))
if registry_value:
value_object = registry_value.GetDataAsObject()
if value_object:
self._ParseValueData(knowledge_base, value_object) | [
"def",
"_ParseKey",
"(",
"self",
",",
"knowledge_base",
",",
"registry_key",
",",
"value_name",
")",
":",
"try",
":",
"registry_value",
"=",
"registry_key",
".",
"GetValueByName",
"(",
"value_name",
")",
"except",
"IOError",
"as",
"exception",
":",
"raise",
"errors",
".",
"PreProcessFail",
"(",
"(",
"'Unable to retrieve Windows Registry key: {0:s} value: {1:s} '",
"'with error: {2!s}'",
")",
".",
"format",
"(",
"registry_key",
".",
"path",
",",
"value_name",
",",
"exception",
")",
")",
"if",
"registry_value",
":",
"value_object",
"=",
"registry_value",
".",
"GetDataAsObject",
"(",
")",
"if",
"value_object",
":",
"self",
".",
"_ParseValueData",
"(",
"knowledge_base",
",",
"value_object",
")"
] | 37.652174 | 20.782609 |
def has_hints(self):
"""
True if self provides hints on the cutoff energy.
"""
for acc in ["low", "normal", "high"]:
try:
if self.hint_for_accuracy(acc) is None:
return False
except KeyError:
return False
return True | [
"def",
"has_hints",
"(",
"self",
")",
":",
"for",
"acc",
"in",
"[",
"\"low\"",
",",
"\"normal\"",
",",
"\"high\"",
"]",
":",
"try",
":",
"if",
"self",
".",
"hint_for_accuracy",
"(",
"acc",
")",
"is",
"None",
":",
"return",
"False",
"except",
"KeyError",
":",
"return",
"False",
"return",
"True"
] | 29.272727 | 12.181818 |
def fromfilenames(filenames, coltype = int):
"""
Return a segmentlist describing the intervals spanned by the files
whose names are given in the list filenames. The segmentlist is
constructed by parsing the file names, and the boundaries of each
segment are coerced to type coltype.
The file names are parsed using a generalization of the format
described in Technical Note LIGO-T010150-00-E, which allows the
start time and duration appearing in the file name to be
non-integers.
NOTE: the output is a segmentlist as described by the file names;
if the file names are not in time order, or describe overlaping
segments, then thusly shall be the output of this function. It is
recommended that this function's output be coalesced before use.
"""
pattern = re.compile(r"-([\d.]+)-([\d.]+)\.[\w_+#]+\Z")
l = segments.segmentlist()
for name in filenames:
[(s, d)] = pattern.findall(name.strip().rstrip(".gz"))
s = coltype(s)
d = coltype(d)
l.append(segments.segment(s, s + d))
return l | [
"def",
"fromfilenames",
"(",
"filenames",
",",
"coltype",
"=",
"int",
")",
":",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r\"-([\\d.]+)-([\\d.]+)\\.[\\w_+#]+\\Z\"",
")",
"l",
"=",
"segments",
".",
"segmentlist",
"(",
")",
"for",
"name",
"in",
"filenames",
":",
"[",
"(",
"s",
",",
"d",
")",
"]",
"=",
"pattern",
".",
"findall",
"(",
"name",
".",
"strip",
"(",
")",
".",
"rstrip",
"(",
"\".gz\"",
")",
")",
"s",
"=",
"coltype",
"(",
"s",
")",
"d",
"=",
"coltype",
"(",
"d",
")",
"l",
".",
"append",
"(",
"segments",
".",
"segment",
"(",
"s",
",",
"s",
"+",
"d",
")",
")",
"return",
"l"
] | 39.56 | 20.04 |
def reduceByKeyAndWindow(self, func, invFunc, windowDuration, slideDuration=None,
numPartitions=None, filterFunc=None):
"""
Return a new DStream by applying incremental `reduceByKey` over a sliding window.
The reduced value of over a new window is calculated using the old window's reduce value :
1. reduce the new values that entered the window (e.g., adding new counts)
2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
`invFunc` can be None, then it will reduce all the RDDs in window, could be slower
than having `invFunc`.
@param func: associative and commutative reduce function
@param invFunc: inverse function of `reduceFunc`
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
@param numPartitions: number of partitions of each RDD in the new DStream.
@param filterFunc: function to filter expired key-value pairs;
only pairs that satisfy the function are retained
set this to null if you do not want to filter
"""
self._validate_window_param(windowDuration, slideDuration)
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
reduced = self.reduceByKey(func, numPartitions)
if invFunc:
def reduceFunc(t, a, b):
b = b.reduceByKey(func, numPartitions)
r = a.union(b).reduceByKey(func, numPartitions) if a else b
if filterFunc:
r = r.filter(filterFunc)
return r
def invReduceFunc(t, a, b):
b = b.reduceByKey(func, numPartitions)
joined = a.leftOuterJoin(b, numPartitions)
return joined.mapValues(lambda kv: invFunc(kv[0], kv[1])
if kv[1] is not None else kv[0])
jreduceFunc = TransformFunction(self._sc, reduceFunc, reduced._jrdd_deserializer)
jinvReduceFunc = TransformFunction(self._sc, invReduceFunc, reduced._jrdd_deserializer)
if slideDuration is None:
slideDuration = self._slideDuration
dstream = self._sc._jvm.PythonReducedWindowedDStream(
reduced._jdstream.dstream(),
jreduceFunc, jinvReduceFunc,
self._ssc._jduration(windowDuration),
self._ssc._jduration(slideDuration))
return DStream(dstream.asJavaDStream(), self._ssc, self._sc.serializer)
else:
return reduced.window(windowDuration, slideDuration).reduceByKey(func, numPartitions) | [
"def",
"reduceByKeyAndWindow",
"(",
"self",
",",
"func",
",",
"invFunc",
",",
"windowDuration",
",",
"slideDuration",
"=",
"None",
",",
"numPartitions",
"=",
"None",
",",
"filterFunc",
"=",
"None",
")",
":",
"self",
".",
"_validate_window_param",
"(",
"windowDuration",
",",
"slideDuration",
")",
"if",
"numPartitions",
"is",
"None",
":",
"numPartitions",
"=",
"self",
".",
"_sc",
".",
"defaultParallelism",
"reduced",
"=",
"self",
".",
"reduceByKey",
"(",
"func",
",",
"numPartitions",
")",
"if",
"invFunc",
":",
"def",
"reduceFunc",
"(",
"t",
",",
"a",
",",
"b",
")",
":",
"b",
"=",
"b",
".",
"reduceByKey",
"(",
"func",
",",
"numPartitions",
")",
"r",
"=",
"a",
".",
"union",
"(",
"b",
")",
".",
"reduceByKey",
"(",
"func",
",",
"numPartitions",
")",
"if",
"a",
"else",
"b",
"if",
"filterFunc",
":",
"r",
"=",
"r",
".",
"filter",
"(",
"filterFunc",
")",
"return",
"r",
"def",
"invReduceFunc",
"(",
"t",
",",
"a",
",",
"b",
")",
":",
"b",
"=",
"b",
".",
"reduceByKey",
"(",
"func",
",",
"numPartitions",
")",
"joined",
"=",
"a",
".",
"leftOuterJoin",
"(",
"b",
",",
"numPartitions",
")",
"return",
"joined",
".",
"mapValues",
"(",
"lambda",
"kv",
":",
"invFunc",
"(",
"kv",
"[",
"0",
"]",
",",
"kv",
"[",
"1",
"]",
")",
"if",
"kv",
"[",
"1",
"]",
"is",
"not",
"None",
"else",
"kv",
"[",
"0",
"]",
")",
"jreduceFunc",
"=",
"TransformFunction",
"(",
"self",
".",
"_sc",
",",
"reduceFunc",
",",
"reduced",
".",
"_jrdd_deserializer",
")",
"jinvReduceFunc",
"=",
"TransformFunction",
"(",
"self",
".",
"_sc",
",",
"invReduceFunc",
",",
"reduced",
".",
"_jrdd_deserializer",
")",
"if",
"slideDuration",
"is",
"None",
":",
"slideDuration",
"=",
"self",
".",
"_slideDuration",
"dstream",
"=",
"self",
".",
"_sc",
".",
"_jvm",
".",
"PythonReducedWindowedDStream",
"(",
"reduced",
".",
"_jdstream",
".",
"dstream",
"(",
")",
",",
"jreduceFunc",
",",
"jinvReduceFunc",
",",
"self",
".",
"_ssc",
".",
"_jduration",
"(",
"windowDuration",
")",
",",
"self",
".",
"_ssc",
".",
"_jduration",
"(",
"slideDuration",
")",
")",
"return",
"DStream",
"(",
"dstream",
".",
"asJavaDStream",
"(",
")",
",",
"self",
".",
"_ssc",
",",
"self",
".",
"_sc",
".",
"serializer",
")",
"else",
":",
"return",
"reduced",
".",
"window",
"(",
"windowDuration",
",",
"slideDuration",
")",
".",
"reduceByKey",
"(",
"func",
",",
"numPartitions",
")"
] | 53.517857 | 28.089286 |
def IsValidLanguageCode(lang):
"""
Checks the validity of a language code value:
- checks whether the code, as lower case, is well formed and valid BCP47
using the pybcp47 module
"""
bcp47_obj = parser.ParseLanguage(str(lang.lower()))
return bcp47_obj.IsWellformed() and bcp47_obj.IsValid() | [
"def",
"IsValidLanguageCode",
"(",
"lang",
")",
":",
"bcp47_obj",
"=",
"parser",
".",
"ParseLanguage",
"(",
"str",
"(",
"lang",
".",
"lower",
"(",
")",
")",
")",
"return",
"bcp47_obj",
".",
"IsWellformed",
"(",
")",
"and",
"bcp47_obj",
".",
"IsValid",
"(",
")"
] | 37.875 | 11.625 |
def update_build(self, build, project, build_id, retry=None):
"""UpdateBuild.
Updates a build.
:param :class:`<Build> <azure.devops.v5_0.build.models.Build>` build: The build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param bool retry:
:rtype: :class:`<Build> <azure.devops.v5_0.build.models.Build>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if retry is not None:
query_parameters['retry'] = self._serialize.query('retry', retry, 'bool')
content = self._serialize.body(build, 'Build')
response = self._send(http_method='PATCH',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.0',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('Build', response) | [
"def",
"update_build",
"(",
"self",
",",
"build",
",",
"project",
",",
"build_id",
",",
"retry",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"build_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'buildId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'build_id'",
",",
"build_id",
",",
"'int'",
")",
"query_parameters",
"=",
"{",
"}",
"if",
"retry",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'retry'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'retry'",
",",
"retry",
",",
"'bool'",
")",
"content",
"=",
"self",
".",
"_serialize",
".",
"body",
"(",
"build",
",",
"'Build'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'PATCH'",
",",
"location_id",
"=",
"'0cd358e1-9217-4d94-8269-1c1ee6f93dcf'",
",",
"version",
"=",
"'5.0'",
",",
"route_values",
"=",
"route_values",
",",
"query_parameters",
"=",
"query_parameters",
",",
"content",
"=",
"content",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'Build'",
",",
"response",
")"
] | 50.16 | 18.72 |
def fletcher_checksum(data, offset):
"""
Fletcher Checksum -- Refer to RFC1008
calling with offset == _FLETCHER_CHECKSUM_VALIDATE will validate the
checksum without modifying the buffer; a valid checksum returns 0.
"""
c0 = 0
c1 = 0
pos = 0
length = len(data)
data = bytearray(data)
data[offset:offset + 2] = [0] * 2
while pos < length:
tlen = min(length - pos, _MODX)
for d in data[pos:pos + tlen]:
c0 += d
c1 += c0
c0 %= 255
c1 %= 255
pos += tlen
x = ((length - offset - 1) * c0 - c1) % 255
if x <= 0:
x += 255
y = 510 - c0 - x
if y > 255:
y -= 255
data[offset] = x
data[offset + 1] = y
return (x << 8) | (y & 0xff) | [
"def",
"fletcher_checksum",
"(",
"data",
",",
"offset",
")",
":",
"c0",
"=",
"0",
"c1",
"=",
"0",
"pos",
"=",
"0",
"length",
"=",
"len",
"(",
"data",
")",
"data",
"=",
"bytearray",
"(",
"data",
")",
"data",
"[",
"offset",
":",
"offset",
"+",
"2",
"]",
"=",
"[",
"0",
"]",
"*",
"2",
"while",
"pos",
"<",
"length",
":",
"tlen",
"=",
"min",
"(",
"length",
"-",
"pos",
",",
"_MODX",
")",
"for",
"d",
"in",
"data",
"[",
"pos",
":",
"pos",
"+",
"tlen",
"]",
":",
"c0",
"+=",
"d",
"c1",
"+=",
"c0",
"c0",
"%=",
"255",
"c1",
"%=",
"255",
"pos",
"+=",
"tlen",
"x",
"=",
"(",
"(",
"length",
"-",
"offset",
"-",
"1",
")",
"*",
"c0",
"-",
"c1",
")",
"%",
"255",
"if",
"x",
"<=",
"0",
":",
"x",
"+=",
"255",
"y",
"=",
"510",
"-",
"c0",
"-",
"x",
"if",
"y",
">",
"255",
":",
"y",
"-=",
"255",
"data",
"[",
"offset",
"]",
"=",
"x",
"data",
"[",
"offset",
"+",
"1",
"]",
"=",
"y",
"return",
"(",
"x",
"<<",
"8",
")",
"|",
"(",
"y",
"&",
"0xff",
")"
] | 22.575758 | 19.666667 |
def consultar_sat(retorno):
"""Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função
:meth:`~satcfe.base.FuncoesSAT.consultar_sat`.
"""
resposta = analisar_retorno(forcar_unicode(retorno),
funcao='ConsultarSAT')
if resposta.EEEEE not in ('08000',):
raise ExcecaoRespostaSAT(resposta)
return resposta | [
"def",
"consultar_sat",
"(",
"retorno",
")",
":",
"resposta",
"=",
"analisar_retorno",
"(",
"forcar_unicode",
"(",
"retorno",
")",
",",
"funcao",
"=",
"'ConsultarSAT'",
")",
"if",
"resposta",
".",
"EEEEE",
"not",
"in",
"(",
"'08000'",
",",
")",
":",
"raise",
"ExcecaoRespostaSAT",
"(",
"resposta",
")",
"return",
"resposta"
] | 42.444444 | 8.444444 |
def _handleBulletWidth(bulletText, style, maxWidths):
"""
work out bullet width and adjust maxWidths[0] if neccessary
"""
if bulletText:
if isinstance(bulletText, basestring):
bulletWidth = stringWidth(bulletText, style.bulletFontName, style.bulletFontSize)
else:
#it's a list of fragments
bulletWidth = 0
for f in bulletText:
bulletWidth = bulletWidth + stringWidth(f.text, f.fontName, f.fontSize)
bulletRight = style.bulletIndent + bulletWidth + 0.6 * style.bulletFontSize
indent = style.leftIndent + style.firstLineIndent
if bulletRight > indent:
#..then it overruns, and we have less space available on line 1
maxWidths[0] -= (bulletRight - indent) | [
"def",
"_handleBulletWidth",
"(",
"bulletText",
",",
"style",
",",
"maxWidths",
")",
":",
"if",
"bulletText",
":",
"if",
"isinstance",
"(",
"bulletText",
",",
"basestring",
")",
":",
"bulletWidth",
"=",
"stringWidth",
"(",
"bulletText",
",",
"style",
".",
"bulletFontName",
",",
"style",
".",
"bulletFontSize",
")",
"else",
":",
"#it's a list of fragments",
"bulletWidth",
"=",
"0",
"for",
"f",
"in",
"bulletText",
":",
"bulletWidth",
"=",
"bulletWidth",
"+",
"stringWidth",
"(",
"f",
".",
"text",
",",
"f",
".",
"fontName",
",",
"f",
".",
"fontSize",
")",
"bulletRight",
"=",
"style",
".",
"bulletIndent",
"+",
"bulletWidth",
"+",
"0.6",
"*",
"style",
".",
"bulletFontSize",
"indent",
"=",
"style",
".",
"leftIndent",
"+",
"style",
".",
"firstLineIndent",
"if",
"bulletRight",
">",
"indent",
":",
"#..then it overruns, and we have less space available on line 1",
"maxWidths",
"[",
"0",
"]",
"-=",
"(",
"bulletRight",
"-",
"indent",
")"
] | 45.882353 | 19.294118 |
def add_controller(self, key, controller):
"""Add child controller
The passed controller is registered as child of self. The register_actions method of the child controller is
called, allowing the child controller to register shortcut callbacks.
:param key: Name of the controller (unique within self), to later access it again
:param ExtendedController controller: Controller to be added as child
"""
assert isinstance(controller, ExtendedController)
controller.parent = self
self.__child_controllers[key] = controller
if self.__shortcut_manager is not None and controller not in self.__action_registered_controllers:
controller.register_actions(self.__shortcut_manager)
self.__action_registered_controllers.append(controller) | [
"def",
"add_controller",
"(",
"self",
",",
"key",
",",
"controller",
")",
":",
"assert",
"isinstance",
"(",
"controller",
",",
"ExtendedController",
")",
"controller",
".",
"parent",
"=",
"self",
"self",
".",
"__child_controllers",
"[",
"key",
"]",
"=",
"controller",
"if",
"self",
".",
"__shortcut_manager",
"is",
"not",
"None",
"and",
"controller",
"not",
"in",
"self",
".",
"__action_registered_controllers",
":",
"controller",
".",
"register_actions",
"(",
"self",
".",
"__shortcut_manager",
")",
"self",
".",
"__action_registered_controllers",
".",
"append",
"(",
"controller",
")"
] | 54.6 | 28.866667 |
def _scalar_power(self, f, p, out):
"""Compute ``p``-th power of ``f`` for ``p`` scalar."""
# Avoid infinite recursions by making a copy of the function
f_copy = f.copy()
def pow_posint(x, n):
"""Power function for positive integer ``n``, out-of-place."""
if isinstance(x, np.ndarray):
y = x.copy()
return ipow_posint(y, n)
else:
return x ** n
def ipow_posint(x, n):
"""Power function for positive integer ``n``, in-place."""
if n == 1:
return x
elif n % 2 == 0:
x *= x
return ipow_posint(x, n // 2)
else:
tmp = x.copy()
x *= x
ipow_posint(x, n // 2)
x *= tmp
return x
def power_oop(x, **kwargs):
"""Power out-of-place evaluation function."""
if p == 0:
return self.one()
elif p == int(p) and p >= 1:
return np.asarray(pow_posint(f_copy(x, **kwargs), int(p)),
dtype=self.scalar_out_dtype)
else:
result = np.power(f_copy(x, **kwargs), p)
return result.astype(self.scalar_out_dtype)
out._call_out_of_place = power_oop
decorator = preload_first_arg(out, 'in-place')
out._call_in_place = decorator(_default_in_place)
out._call_has_out = out._call_out_optional = False
return out | [
"def",
"_scalar_power",
"(",
"self",
",",
"f",
",",
"p",
",",
"out",
")",
":",
"# Avoid infinite recursions by making a copy of the function",
"f_copy",
"=",
"f",
".",
"copy",
"(",
")",
"def",
"pow_posint",
"(",
"x",
",",
"n",
")",
":",
"\"\"\"Power function for positive integer ``n``, out-of-place.\"\"\"",
"if",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
":",
"y",
"=",
"x",
".",
"copy",
"(",
")",
"return",
"ipow_posint",
"(",
"y",
",",
"n",
")",
"else",
":",
"return",
"x",
"**",
"n",
"def",
"ipow_posint",
"(",
"x",
",",
"n",
")",
":",
"\"\"\"Power function for positive integer ``n``, in-place.\"\"\"",
"if",
"n",
"==",
"1",
":",
"return",
"x",
"elif",
"n",
"%",
"2",
"==",
"0",
":",
"x",
"*=",
"x",
"return",
"ipow_posint",
"(",
"x",
",",
"n",
"//",
"2",
")",
"else",
":",
"tmp",
"=",
"x",
".",
"copy",
"(",
")",
"x",
"*=",
"x",
"ipow_posint",
"(",
"x",
",",
"n",
"//",
"2",
")",
"x",
"*=",
"tmp",
"return",
"x",
"def",
"power_oop",
"(",
"x",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Power out-of-place evaluation function.\"\"\"",
"if",
"p",
"==",
"0",
":",
"return",
"self",
".",
"one",
"(",
")",
"elif",
"p",
"==",
"int",
"(",
"p",
")",
"and",
"p",
">=",
"1",
":",
"return",
"np",
".",
"asarray",
"(",
"pow_posint",
"(",
"f_copy",
"(",
"x",
",",
"*",
"*",
"kwargs",
")",
",",
"int",
"(",
"p",
")",
")",
",",
"dtype",
"=",
"self",
".",
"scalar_out_dtype",
")",
"else",
":",
"result",
"=",
"np",
".",
"power",
"(",
"f_copy",
"(",
"x",
",",
"*",
"*",
"kwargs",
")",
",",
"p",
")",
"return",
"result",
".",
"astype",
"(",
"self",
".",
"scalar_out_dtype",
")",
"out",
".",
"_call_out_of_place",
"=",
"power_oop",
"decorator",
"=",
"preload_first_arg",
"(",
"out",
",",
"'in-place'",
")",
"out",
".",
"_call_in_place",
"=",
"decorator",
"(",
"_default_in_place",
")",
"out",
".",
"_call_has_out",
"=",
"out",
".",
"_call_out_optional",
"=",
"False",
"return",
"out"
] | 35.581395 | 15.069767 |
def numberize(string):
'''Turns a string into a number (``int`` or ``float``) if it's only a number (ignoring spaces), otherwise returns the string.
For example, ``"5 "`` becomes ``5`` and ``"2 ton"`` remains ``"2 ton"``'''
if not isinstance(string,basestring):
return string
just_int = r'^\s*[-+]?\d+\s*$'
just_float = r'^\s*[-+]?\d+\.(\d+)?\s*$'
if re.match(just_int,string):
return int(string)
if re.match(just_float,string):
return float(string)
return string | [
"def",
"numberize",
"(",
"string",
")",
":",
"if",
"not",
"isinstance",
"(",
"string",
",",
"basestring",
")",
":",
"return",
"string",
"just_int",
"=",
"r'^\\s*[-+]?\\d+\\s*$'",
"just_float",
"=",
"r'^\\s*[-+]?\\d+\\.(\\d+)?\\s*$'",
"if",
"re",
".",
"match",
"(",
"just_int",
",",
"string",
")",
":",
"return",
"int",
"(",
"string",
")",
"if",
"re",
".",
"match",
"(",
"just_float",
",",
"string",
")",
":",
"return",
"float",
"(",
"string",
")",
"return",
"string"
] | 42.333333 | 19.666667 |
def get_rgb_from_xy_and_brightness(self, x, y, bri=1):
"""Inverse of `get_xy_point_from_rgb`. Returns (r, g, b) for given x, y values.
Implementation of the instructions found on the Philips Hue iOS SDK docs: http://goo.gl/kWKXKl
"""
# The xy to color conversion is almost the same, but in reverse order.
# Check if the xy value is within the color gamut of the lamp.
# If not continue with step 2, otherwise step 3.
# We do this to calculate the most accurate color the given light can actually do.
xy_point = XYPoint(x, y)
if not self.check_point_in_lamps_reach(xy_point):
# Calculate the closest point on the color gamut triangle
# and use that as xy value See step 6 of color to xy.
xy_point = self.get_closest_point_to_point(xy_point)
# Calculate XYZ values Convert using the following formulas:
Y = bri
X = (Y / xy_point.y) * xy_point.x
Z = (Y / xy_point.y) * (1 - xy_point.x - xy_point.y)
# Convert to RGB using Wide RGB D65 conversion
r = X * 1.656492 - Y * 0.354851 - Z * 0.255038
g = -X * 0.707196 + Y * 1.655397 + Z * 0.036152
b = X * 0.051713 - Y * 0.121364 + Z * 1.011530
# Apply reverse gamma correction
r, g, b = map(
lambda x: (12.92 * x) if (x <= 0.0031308) else ((1.0 + 0.055) * pow(x, (1.0 / 2.4)) - 0.055),
[r, g, b]
)
# Bring all negative components to zero
r, g, b = map(lambda x: max(0, x), [r, g, b])
# If one component is greater than 1, weight components by that value.
max_component = max(r, g, b)
if max_component > 1:
r, g, b = map(lambda x: x / max_component, [r, g, b])
r, g, b = map(lambda x: int(x * 255), [r, g, b])
# Convert the RGB values to your color object The rgb values from the above formulas are between 0.0 and 1.0.
return (r, g, b) | [
"def",
"get_rgb_from_xy_and_brightness",
"(",
"self",
",",
"x",
",",
"y",
",",
"bri",
"=",
"1",
")",
":",
"# The xy to color conversion is almost the same, but in reverse order.",
"# Check if the xy value is within the color gamut of the lamp.",
"# If not continue with step 2, otherwise step 3.",
"# We do this to calculate the most accurate color the given light can actually do.",
"xy_point",
"=",
"XYPoint",
"(",
"x",
",",
"y",
")",
"if",
"not",
"self",
".",
"check_point_in_lamps_reach",
"(",
"xy_point",
")",
":",
"# Calculate the closest point on the color gamut triangle",
"# and use that as xy value See step 6 of color to xy.",
"xy_point",
"=",
"self",
".",
"get_closest_point_to_point",
"(",
"xy_point",
")",
"# Calculate XYZ values Convert using the following formulas:",
"Y",
"=",
"bri",
"X",
"=",
"(",
"Y",
"/",
"xy_point",
".",
"y",
")",
"*",
"xy_point",
".",
"x",
"Z",
"=",
"(",
"Y",
"/",
"xy_point",
".",
"y",
")",
"*",
"(",
"1",
"-",
"xy_point",
".",
"x",
"-",
"xy_point",
".",
"y",
")",
"# Convert to RGB using Wide RGB D65 conversion",
"r",
"=",
"X",
"*",
"1.656492",
"-",
"Y",
"*",
"0.354851",
"-",
"Z",
"*",
"0.255038",
"g",
"=",
"-",
"X",
"*",
"0.707196",
"+",
"Y",
"*",
"1.655397",
"+",
"Z",
"*",
"0.036152",
"b",
"=",
"X",
"*",
"0.051713",
"-",
"Y",
"*",
"0.121364",
"+",
"Z",
"*",
"1.011530",
"# Apply reverse gamma correction",
"r",
",",
"g",
",",
"b",
"=",
"map",
"(",
"lambda",
"x",
":",
"(",
"12.92",
"*",
"x",
")",
"if",
"(",
"x",
"<=",
"0.0031308",
")",
"else",
"(",
"(",
"1.0",
"+",
"0.055",
")",
"*",
"pow",
"(",
"x",
",",
"(",
"1.0",
"/",
"2.4",
")",
")",
"-",
"0.055",
")",
",",
"[",
"r",
",",
"g",
",",
"b",
"]",
")",
"# Bring all negative components to zero",
"r",
",",
"g",
",",
"b",
"=",
"map",
"(",
"lambda",
"x",
":",
"max",
"(",
"0",
",",
"x",
")",
",",
"[",
"r",
",",
"g",
",",
"b",
"]",
")",
"# If one component is greater than 1, weight components by that value.",
"max_component",
"=",
"max",
"(",
"r",
",",
"g",
",",
"b",
")",
"if",
"max_component",
">",
"1",
":",
"r",
",",
"g",
",",
"b",
"=",
"map",
"(",
"lambda",
"x",
":",
"x",
"/",
"max_component",
",",
"[",
"r",
",",
"g",
",",
"b",
"]",
")",
"r",
",",
"g",
",",
"b",
"=",
"map",
"(",
"lambda",
"x",
":",
"int",
"(",
"x",
"*",
"255",
")",
",",
"[",
"r",
",",
"g",
",",
"b",
"]",
")",
"# Convert the RGB values to your color object The rgb values from the above formulas are between 0.0 and 1.0.",
"return",
"(",
"r",
",",
"g",
",",
"b",
")"
] | 45.069767 | 25.674419 |
def notebook_to_md(notebook):
"""Convert a notebook to its Markdown representation, using Pandoc"""
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.write(ipynb_writes(notebook).encode('utf-8'))
tmp_file.close()
pandoc(u'--from ipynb --to markdown -s --atx-headers --wrap=preserve --preserve-tabs', tmp_file.name, tmp_file.name)
with open(tmp_file.name, encoding='utf-8') as opened_file:
text = opened_file.read()
os.unlink(tmp_file.name)
return '\n'.join(text.splitlines()) | [
"def",
"notebook_to_md",
"(",
"notebook",
")",
":",
"tmp_file",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"delete",
"=",
"False",
")",
"tmp_file",
".",
"write",
"(",
"ipynb_writes",
"(",
"notebook",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"tmp_file",
".",
"close",
"(",
")",
"pandoc",
"(",
"u'--from ipynb --to markdown -s --atx-headers --wrap=preserve --preserve-tabs'",
",",
"tmp_file",
".",
"name",
",",
"tmp_file",
".",
"name",
")",
"with",
"open",
"(",
"tmp_file",
".",
"name",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"opened_file",
":",
"text",
"=",
"opened_file",
".",
"read",
"(",
")",
"os",
".",
"unlink",
"(",
"tmp_file",
".",
"name",
")",
"return",
"'\\n'",
".",
"join",
"(",
"text",
".",
"splitlines",
"(",
")",
")"
] | 39.846154 | 23.615385 |
def ts_to_dt_str(ts, dt_format='%Y-%m-%d %H:%M:%S'):
"""
时间戳转换为日期字符串
Args:
ts: 待转换的时间戳
dt_format: 目标日期字符串格式
Returns: 日期字符串
"""
return datetime.datetime.fromtimestamp(int(ts)).strftime(dt_format) | [
"def",
"ts_to_dt_str",
"(",
"ts",
",",
"dt_format",
"=",
"'%Y-%m-%d %H:%M:%S'",
")",
":",
"return",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"int",
"(",
"ts",
")",
")",
".",
"strftime",
"(",
"dt_format",
")"
] | 20.545455 | 21.272727 |
def classify_intersection9(s, curve1, curve2):
"""Image for :func:`._surface_helpers.classify_intersection` docstring."""
if NO_IMAGES:
return
surface1 = bezier.Surface.from_nodes(
np.asfortranarray(
[
[0.0, 20.0, 40.0, 10.0, 30.0, 20.0],
[0.0, 40.0, 0.0, 25.0, 25.0, 50.0],
]
)
)
surface2 = bezier.Surface.from_nodes(
np.asfortranarray(
[
[40.0, 20.0, 0.0, 30.0, 10.0, 20.0],
[40.0, 0.0, 40.0, 15.0, 15.0, -10.0],
]
)
)
figure, (ax1, ax2) = plt.subplots(1, 2)
classify_help(s, curve1, surface1, curve2, surface2, 0, ax=ax1)
classify_help(s, curve1, surface1, curve2, surface2, 1, ax=ax2)
# Remove the alpha from the color
color1 = ax1.patches[0].get_facecolor()[:3]
color2 = ax1.patches[1].get_facecolor()[:3]
# Now add the "degenerate" intersection polygons.
cp_edges1, cp_edges2 = _edges_classify_intersection9()
curved_polygon1 = bezier.CurvedPolygon(*cp_edges1)
curved_polygon1.plot(256, ax=ax1)
curved_polygon2 = bezier.CurvedPolygon(*cp_edges2)
curved_polygon2.plot(256, ax=ax2)
(int_x,), (int_y,) = curve1.evaluate(s)
ax1.plot([int_x], [int_y], color=color1, linestyle="None", marker="o")
ax2.plot([int_x], [int_y], color=color2, linestyle="None", marker="o")
for ax in (ax1, ax2):
ax.axis("scaled")
ax.set_xlim(-2.0, 42.0)
ax.set_ylim(-12.0, 52.0)
plt.setp(ax2.get_yticklabels(), visible=False)
figure.tight_layout(w_pad=1.0)
save_image(figure, "classify_intersection9.png") | [
"def",
"classify_intersection9",
"(",
"s",
",",
"curve1",
",",
"curve2",
")",
":",
"if",
"NO_IMAGES",
":",
"return",
"surface1",
"=",
"bezier",
".",
"Surface",
".",
"from_nodes",
"(",
"np",
".",
"asfortranarray",
"(",
"[",
"[",
"0.0",
",",
"20.0",
",",
"40.0",
",",
"10.0",
",",
"30.0",
",",
"20.0",
"]",
",",
"[",
"0.0",
",",
"40.0",
",",
"0.0",
",",
"25.0",
",",
"25.0",
",",
"50.0",
"]",
",",
"]",
")",
")",
"surface2",
"=",
"bezier",
".",
"Surface",
".",
"from_nodes",
"(",
"np",
".",
"asfortranarray",
"(",
"[",
"[",
"40.0",
",",
"20.0",
",",
"0.0",
",",
"30.0",
",",
"10.0",
",",
"20.0",
"]",
",",
"[",
"40.0",
",",
"0.0",
",",
"40.0",
",",
"15.0",
",",
"15.0",
",",
"-",
"10.0",
"]",
",",
"]",
")",
")",
"figure",
",",
"(",
"ax1",
",",
"ax2",
")",
"=",
"plt",
".",
"subplots",
"(",
"1",
",",
"2",
")",
"classify_help",
"(",
"s",
",",
"curve1",
",",
"surface1",
",",
"curve2",
",",
"surface2",
",",
"0",
",",
"ax",
"=",
"ax1",
")",
"classify_help",
"(",
"s",
",",
"curve1",
",",
"surface1",
",",
"curve2",
",",
"surface2",
",",
"1",
",",
"ax",
"=",
"ax2",
")",
"# Remove the alpha from the color",
"color1",
"=",
"ax1",
".",
"patches",
"[",
"0",
"]",
".",
"get_facecolor",
"(",
")",
"[",
":",
"3",
"]",
"color2",
"=",
"ax1",
".",
"patches",
"[",
"1",
"]",
".",
"get_facecolor",
"(",
")",
"[",
":",
"3",
"]",
"# Now add the \"degenerate\" intersection polygons.",
"cp_edges1",
",",
"cp_edges2",
"=",
"_edges_classify_intersection9",
"(",
")",
"curved_polygon1",
"=",
"bezier",
".",
"CurvedPolygon",
"(",
"*",
"cp_edges1",
")",
"curved_polygon1",
".",
"plot",
"(",
"256",
",",
"ax",
"=",
"ax1",
")",
"curved_polygon2",
"=",
"bezier",
".",
"CurvedPolygon",
"(",
"*",
"cp_edges2",
")",
"curved_polygon2",
".",
"plot",
"(",
"256",
",",
"ax",
"=",
"ax2",
")",
"(",
"int_x",
",",
")",
",",
"(",
"int_y",
",",
")",
"=",
"curve1",
".",
"evaluate",
"(",
"s",
")",
"ax1",
".",
"plot",
"(",
"[",
"int_x",
"]",
",",
"[",
"int_y",
"]",
",",
"color",
"=",
"color1",
",",
"linestyle",
"=",
"\"None\"",
",",
"marker",
"=",
"\"o\"",
")",
"ax2",
".",
"plot",
"(",
"[",
"int_x",
"]",
",",
"[",
"int_y",
"]",
",",
"color",
"=",
"color2",
",",
"linestyle",
"=",
"\"None\"",
",",
"marker",
"=",
"\"o\"",
")",
"for",
"ax",
"in",
"(",
"ax1",
",",
"ax2",
")",
":",
"ax",
".",
"axis",
"(",
"\"scaled\"",
")",
"ax",
".",
"set_xlim",
"(",
"-",
"2.0",
",",
"42.0",
")",
"ax",
".",
"set_ylim",
"(",
"-",
"12.0",
",",
"52.0",
")",
"plt",
".",
"setp",
"(",
"ax2",
".",
"get_yticklabels",
"(",
")",
",",
"visible",
"=",
"False",
")",
"figure",
".",
"tight_layout",
"(",
"w_pad",
"=",
"1.0",
")",
"save_image",
"(",
"figure",
",",
"\"classify_intersection9.png\"",
")"
] | 37.627907 | 16.232558 |
def augment(self, dct: NonAugmentedDict,
document: Optional[YamlDocument] = None) -> AugmentedDict:
"""
Augments the given dictionary by using all the bound extensions.
Args:
dct: Dictionary to augment.
document: The document the dictionary was loaded from.
Returns:
The augmented dictionary.
"""
Validator.instance_of(dict, raise_ex=True, dct=dct)
# Apply any configured loader
for instance in self._extensions:
nodes = list(dict_find_pattern(dct, **instance.config()))
for parent, k, val in nodes:
parent.pop(k)
fragment = instance.apply(
ExtensionContext(
mentor=self,
document=document or dct,
dct=dct,
parent_node=parent,
node=(k, val)
)
)
if fragment is not None:
parent.update(fragment)
return dct | [
"def",
"augment",
"(",
"self",
",",
"dct",
":",
"NonAugmentedDict",
",",
"document",
":",
"Optional",
"[",
"YamlDocument",
"]",
"=",
"None",
")",
"->",
"AugmentedDict",
":",
"Validator",
".",
"instance_of",
"(",
"dict",
",",
"raise_ex",
"=",
"True",
",",
"dct",
"=",
"dct",
")",
"# Apply any configured loader",
"for",
"instance",
"in",
"self",
".",
"_extensions",
":",
"nodes",
"=",
"list",
"(",
"dict_find_pattern",
"(",
"dct",
",",
"*",
"*",
"instance",
".",
"config",
"(",
")",
")",
")",
"for",
"parent",
",",
"k",
",",
"val",
"in",
"nodes",
":",
"parent",
".",
"pop",
"(",
"k",
")",
"fragment",
"=",
"instance",
".",
"apply",
"(",
"ExtensionContext",
"(",
"mentor",
"=",
"self",
",",
"document",
"=",
"document",
"or",
"dct",
",",
"dct",
"=",
"dct",
",",
"parent_node",
"=",
"parent",
",",
"node",
"=",
"(",
"k",
",",
"val",
")",
")",
")",
"if",
"fragment",
"is",
"not",
"None",
":",
"parent",
".",
"update",
"(",
"fragment",
")",
"return",
"dct"
] | 33.40625 | 14.65625 |
def has_annotation(self, annotation: str) -> bool:
"""Check if this annotation is defined."""
return (
self.has_enumerated_annotation(annotation) or
self.has_regex_annotation(annotation) or
self.has_local_annotation(annotation)
) | [
"def",
"has_annotation",
"(",
"self",
",",
"annotation",
":",
"str",
")",
"->",
"bool",
":",
"return",
"(",
"self",
".",
"has_enumerated_annotation",
"(",
"annotation",
")",
"or",
"self",
".",
"has_regex_annotation",
"(",
"annotation",
")",
"or",
"self",
".",
"has_local_annotation",
"(",
"annotation",
")",
")"
] | 40.428571 | 14.714286 |
def has_all_changes_covered(self):
"""
Return `True` if all changes have been covered, `False` otherwise.
"""
for filename in self.files():
for hunk in self.file_source_hunks(filename):
for line in hunk:
if line.reason is None:
continue # line untouched
if line.status is False:
return False # line not covered
return True | [
"def",
"has_all_changes_covered",
"(",
"self",
")",
":",
"for",
"filename",
"in",
"self",
".",
"files",
"(",
")",
":",
"for",
"hunk",
"in",
"self",
".",
"file_source_hunks",
"(",
"filename",
")",
":",
"for",
"line",
"in",
"hunk",
":",
"if",
"line",
".",
"reason",
"is",
"None",
":",
"continue",
"# line untouched",
"if",
"line",
".",
"status",
"is",
"False",
":",
"return",
"False",
"# line not covered",
"return",
"True"
] | 39.083333 | 10.083333 |
def strip_querystring(url):
"""Remove the querystring from the end of a URL."""
p = six.moves.urllib.parse.urlparse(url)
return p.scheme + "://" + p.netloc + p.path | [
"def",
"strip_querystring",
"(",
"url",
")",
":",
"p",
"=",
"six",
".",
"moves",
".",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"url",
")",
"return",
"p",
".",
"scheme",
"+",
"\"://\"",
"+",
"p",
".",
"netloc",
"+",
"p",
".",
"path"
] | 43.25 | 6 |
def R(self, value):
""" measurement uncertainty"""
self._R = value
self._R1_2 = cholesky(self._R, lower=True) | [
"def",
"R",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_R",
"=",
"value",
"self",
".",
"_R1_2",
"=",
"cholesky",
"(",
"self",
".",
"_R",
",",
"lower",
"=",
"True",
")"
] | 32.5 | 12 |
def matches_video_filename(self, video):
"""
Detect whether the filename of videofile matches with this SubtitleFile.
:param video: VideoFile instance
:return: True if match
"""
vid_fn = video.get_filename()
vid_base, _ = os.path.splitext(vid_fn)
vid_base = vid_base.lower()
sub_fn = self.get_filename()
sub_base, _ = os.path.splitext(sub_fn)
sub_base = sub_base.lower()
log.debug('matches_filename(subtitle="{sub_filename}", video="{vid_filename}") ...'.format(
sub_filename=sub_fn, vid_filename=vid_fn))
matches = sub_base == vid_base
lang = None
if not matches:
if sub_base.startswith(vid_base):
sub_rest = sub_base[len(vid_base):]
while len(sub_rest) > 0:
if sub_rest[0].isalnum():
break
sub_rest = sub_rest[1:]
try:
lang = Language.from_unknown(sub_rest, xx=True, xxx=True)
matches = True
except NotALanguageException:
matches = False
if matches:
log.debug('... matches (language={language})'.format(language=lang))
else:
log.debug('... does not match')
return matches | [
"def",
"matches_video_filename",
"(",
"self",
",",
"video",
")",
":",
"vid_fn",
"=",
"video",
".",
"get_filename",
"(",
")",
"vid_base",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"vid_fn",
")",
"vid_base",
"=",
"vid_base",
".",
"lower",
"(",
")",
"sub_fn",
"=",
"self",
".",
"get_filename",
"(",
")",
"sub_base",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"sub_fn",
")",
"sub_base",
"=",
"sub_base",
".",
"lower",
"(",
")",
"log",
".",
"debug",
"(",
"'matches_filename(subtitle=\"{sub_filename}\", video=\"{vid_filename}\") ...'",
".",
"format",
"(",
"sub_filename",
"=",
"sub_fn",
",",
"vid_filename",
"=",
"vid_fn",
")",
")",
"matches",
"=",
"sub_base",
"==",
"vid_base",
"lang",
"=",
"None",
"if",
"not",
"matches",
":",
"if",
"sub_base",
".",
"startswith",
"(",
"vid_base",
")",
":",
"sub_rest",
"=",
"sub_base",
"[",
"len",
"(",
"vid_base",
")",
":",
"]",
"while",
"len",
"(",
"sub_rest",
")",
">",
"0",
":",
"if",
"sub_rest",
"[",
"0",
"]",
".",
"isalnum",
"(",
")",
":",
"break",
"sub_rest",
"=",
"sub_rest",
"[",
"1",
":",
"]",
"try",
":",
"lang",
"=",
"Language",
".",
"from_unknown",
"(",
"sub_rest",
",",
"xx",
"=",
"True",
",",
"xxx",
"=",
"True",
")",
"matches",
"=",
"True",
"except",
"NotALanguageException",
":",
"matches",
"=",
"False",
"if",
"matches",
":",
"log",
".",
"debug",
"(",
"'... matches (language={language})'",
".",
"format",
"(",
"language",
"=",
"lang",
")",
")",
"else",
":",
"log",
".",
"debug",
"(",
"'... does not match'",
")",
"return",
"matches"
] | 33.871795 | 16.641026 |
def slice_reStructuredText(input, output):
"""
Slices given reStructuredText file.
:param input: ReStructuredText file to slice.
:type input: unicode
:param output: Directory to output sliced reStructuredText files.
:type output: unicode
:return: Definition success.
:rtype: bool
"""
LOGGER.info("{0} | Slicing '{1}' file!".format(slice_reStructuredText.__name__, input))
file = File(input)
file.cache()
slices = OrderedDict()
for i, line in enumerate(file.content):
search = re.search(r"^\.\. \.(\w+)", line)
if search:
slices[search.groups()[0]] = i + SLICE_ATTRIBUTE_INDENT
index = 0
for slice, slice_start in slices.iteritems():
slice_file = File(os.path.join(output, "{0}.{1}".format(slice, OUTPUT_FILES_EXTENSION)))
LOGGER.info("{0} | Outputing '{1}' file!".format(slice_reStructuredText.__name__, slice_file.path))
slice_end = index < (len(slices.values()) - 1) and slices.values()[index + 1] - SLICE_ATTRIBUTE_INDENT or \
len(file.content)
for i in range(slice_start, slice_end):
skip_line = False
for item in CONTENT_DELETION:
if re.search(item, file.content[i]):
LOGGER.info("{0} | Skipping Line '{1}' with '{2}' content!".format(slice_reStructuredText.__name__,
i,
item))
skip_line = True
break
if skip_line:
continue
line = file.content[i]
for pattern, value in STATEMENT_SUBSTITUTE.iteritems():
line = re.sub(pattern, value, line)
search = re.search(r"- `[\w ]+`_ \(([\w\.]+)\)", line)
if search:
LOGGER.info("{0} | Updating Line '{1}' link: '{2}'!".format(slice_reStructuredText.__name__,
i,
search.groups()[0]))
line = "- :ref:`{0}`\n".format(search.groups()[0])
slice_file.content.append(line)
slice_file.write()
index += 1
return True | [
"def",
"slice_reStructuredText",
"(",
"input",
",",
"output",
")",
":",
"LOGGER",
".",
"info",
"(",
"\"{0} | Slicing '{1}' file!\"",
".",
"format",
"(",
"slice_reStructuredText",
".",
"__name__",
",",
"input",
")",
")",
"file",
"=",
"File",
"(",
"input",
")",
"file",
".",
"cache",
"(",
")",
"slices",
"=",
"OrderedDict",
"(",
")",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"file",
".",
"content",
")",
":",
"search",
"=",
"re",
".",
"search",
"(",
"r\"^\\.\\. \\.(\\w+)\"",
",",
"line",
")",
"if",
"search",
":",
"slices",
"[",
"search",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"]",
"=",
"i",
"+",
"SLICE_ATTRIBUTE_INDENT",
"index",
"=",
"0",
"for",
"slice",
",",
"slice_start",
"in",
"slices",
".",
"iteritems",
"(",
")",
":",
"slice_file",
"=",
"File",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output",
",",
"\"{0}.{1}\"",
".",
"format",
"(",
"slice",
",",
"OUTPUT_FILES_EXTENSION",
")",
")",
")",
"LOGGER",
".",
"info",
"(",
"\"{0} | Outputing '{1}' file!\"",
".",
"format",
"(",
"slice_reStructuredText",
".",
"__name__",
",",
"slice_file",
".",
"path",
")",
")",
"slice_end",
"=",
"index",
"<",
"(",
"len",
"(",
"slices",
".",
"values",
"(",
")",
")",
"-",
"1",
")",
"and",
"slices",
".",
"values",
"(",
")",
"[",
"index",
"+",
"1",
"]",
"-",
"SLICE_ATTRIBUTE_INDENT",
"or",
"len",
"(",
"file",
".",
"content",
")",
"for",
"i",
"in",
"range",
"(",
"slice_start",
",",
"slice_end",
")",
":",
"skip_line",
"=",
"False",
"for",
"item",
"in",
"CONTENT_DELETION",
":",
"if",
"re",
".",
"search",
"(",
"item",
",",
"file",
".",
"content",
"[",
"i",
"]",
")",
":",
"LOGGER",
".",
"info",
"(",
"\"{0} | Skipping Line '{1}' with '{2}' content!\"",
".",
"format",
"(",
"slice_reStructuredText",
".",
"__name__",
",",
"i",
",",
"item",
")",
")",
"skip_line",
"=",
"True",
"break",
"if",
"skip_line",
":",
"continue",
"line",
"=",
"file",
".",
"content",
"[",
"i",
"]",
"for",
"pattern",
",",
"value",
"in",
"STATEMENT_SUBSTITUTE",
".",
"iteritems",
"(",
")",
":",
"line",
"=",
"re",
".",
"sub",
"(",
"pattern",
",",
"value",
",",
"line",
")",
"search",
"=",
"re",
".",
"search",
"(",
"r\"- `[\\w ]+`_ \\(([\\w\\.]+)\\)\"",
",",
"line",
")",
"if",
"search",
":",
"LOGGER",
".",
"info",
"(",
"\"{0} | Updating Line '{1}' link: '{2}'!\"",
".",
"format",
"(",
"slice_reStructuredText",
".",
"__name__",
",",
"i",
",",
"search",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
")",
"line",
"=",
"\"- :ref:`{0}`\\n\"",
".",
"format",
"(",
"search",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
"slice_file",
".",
"content",
".",
"append",
"(",
"line",
")",
"slice_file",
".",
"write",
"(",
")",
"index",
"+=",
"1",
"return",
"True"
] | 40.206897 | 26.103448 |
async def verify_chain_of_trust(chain):
"""Build and verify the chain of trust.
Args:
chain (ChainOfTrust): the chain we're operating on
Raises:
CoTError: on failure
"""
log_path = os.path.join(chain.context.config["task_log_dir"], "chain_of_trust.log")
scriptworker_log = logging.getLogger('scriptworker')
with contextual_log_handler(
chain.context, path=log_path, log_obj=scriptworker_log,
formatter=AuditLogFormatter(
fmt=chain.context.config['log_fmt'],
datefmt=chain.context.config['log_datefmt'],
)
):
try:
# build LinkOfTrust objects
await build_task_dependencies(chain, chain.task, chain.name, chain.task_id)
# download the signed chain of trust artifacts
await download_cot(chain)
# verify the signatures and populate the ``link.cot``s
verify_cot_signatures(chain)
# download all other artifacts needed to verify chain of trust
await download_cot_artifacts(chain)
# verify the task types, e.g. decision
task_count = await verify_task_types(chain)
check_num_tasks(chain, task_count)
# verify the worker_impls, e.g. docker-worker
await verify_worker_impls(chain)
await trace_back_to_tree(chain)
except (BaseDownloadError, KeyError, AttributeError) as exc:
log.critical("Chain of Trust verification error!", exc_info=True)
if isinstance(exc, CoTError):
raise
else:
raise CoTError(str(exc))
log.info("Good.") | [
"async",
"def",
"verify_chain_of_trust",
"(",
"chain",
")",
":",
"log_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"chain",
".",
"context",
".",
"config",
"[",
"\"task_log_dir\"",
"]",
",",
"\"chain_of_trust.log\"",
")",
"scriptworker_log",
"=",
"logging",
".",
"getLogger",
"(",
"'scriptworker'",
")",
"with",
"contextual_log_handler",
"(",
"chain",
".",
"context",
",",
"path",
"=",
"log_path",
",",
"log_obj",
"=",
"scriptworker_log",
",",
"formatter",
"=",
"AuditLogFormatter",
"(",
"fmt",
"=",
"chain",
".",
"context",
".",
"config",
"[",
"'log_fmt'",
"]",
",",
"datefmt",
"=",
"chain",
".",
"context",
".",
"config",
"[",
"'log_datefmt'",
"]",
",",
")",
")",
":",
"try",
":",
"# build LinkOfTrust objects",
"await",
"build_task_dependencies",
"(",
"chain",
",",
"chain",
".",
"task",
",",
"chain",
".",
"name",
",",
"chain",
".",
"task_id",
")",
"# download the signed chain of trust artifacts",
"await",
"download_cot",
"(",
"chain",
")",
"# verify the signatures and populate the ``link.cot``s",
"verify_cot_signatures",
"(",
"chain",
")",
"# download all other artifacts needed to verify chain of trust",
"await",
"download_cot_artifacts",
"(",
"chain",
")",
"# verify the task types, e.g. decision",
"task_count",
"=",
"await",
"verify_task_types",
"(",
"chain",
")",
"check_num_tasks",
"(",
"chain",
",",
"task_count",
")",
"# verify the worker_impls, e.g. docker-worker",
"await",
"verify_worker_impls",
"(",
"chain",
")",
"await",
"trace_back_to_tree",
"(",
"chain",
")",
"except",
"(",
"BaseDownloadError",
",",
"KeyError",
",",
"AttributeError",
")",
"as",
"exc",
":",
"log",
".",
"critical",
"(",
"\"Chain of Trust verification error!\"",
",",
"exc_info",
"=",
"True",
")",
"if",
"isinstance",
"(",
"exc",
",",
"CoTError",
")",
":",
"raise",
"else",
":",
"raise",
"CoTError",
"(",
"str",
"(",
"exc",
")",
")",
"log",
".",
"info",
"(",
"\"Good.\"",
")"
] | 39.804878 | 18.04878 |
def count(self):
""" Compute count of group, excluding missing values """
from pandas.core.dtypes.missing import _isna_ndarraylike as _isna
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
val = ((mask & ~_isna(np.atleast_2d(blk.get_values())))
for blk in data.blocks)
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(
lib.count_level_2d, labels=ids, max_bin=ngroups, axis=1)
blk = map(make_block, map(counter, val), loc)
return self._wrap_agged_blocks(data.items, list(blk)) | [
"def",
"count",
"(",
"self",
")",
":",
"from",
"pandas",
".",
"core",
".",
"dtypes",
".",
"missing",
"import",
"_isna_ndarraylike",
"as",
"_isna",
"data",
",",
"_",
"=",
"self",
".",
"_get_data_to_aggregate",
"(",
")",
"ids",
",",
"_",
",",
"ngroups",
"=",
"self",
".",
"grouper",
".",
"group_info",
"mask",
"=",
"ids",
"!=",
"-",
"1",
"val",
"=",
"(",
"(",
"mask",
"&",
"~",
"_isna",
"(",
"np",
".",
"atleast_2d",
"(",
"blk",
".",
"get_values",
"(",
")",
")",
")",
")",
"for",
"blk",
"in",
"data",
".",
"blocks",
")",
"loc",
"=",
"(",
"blk",
".",
"mgr_locs",
"for",
"blk",
"in",
"data",
".",
"blocks",
")",
"counter",
"=",
"partial",
"(",
"lib",
".",
"count_level_2d",
",",
"labels",
"=",
"ids",
",",
"max_bin",
"=",
"ngroups",
",",
"axis",
"=",
"1",
")",
"blk",
"=",
"map",
"(",
"make_block",
",",
"map",
"(",
"counter",
",",
"val",
")",
",",
"loc",
")",
"return",
"self",
".",
"_wrap_agged_blocks",
"(",
"data",
".",
"items",
",",
"list",
"(",
"blk",
")",
")"
] | 37.235294 | 21.235294 |
def iter_regions(self):
"""
Return an iterable list of all region files. Use this function if you only
want to loop through each region files once, and do not want to cache the results.
"""
# TODO: Implement BoundingBox
# TODO: Implement sort order
for x,z in self.regionfiles.keys():
close_after_use = False
if (x,z) in self.regions:
regionfile = self.regions[(x,z)]
else:
# It is not yet cached.
# Get file, but do not cache later.
regionfile = region.RegionFile(self.regionfiles[(x,z)], chunkclass = self.chunkclass)
regionfile.loc = Location(x=x,z=z)
close_after_use = True
try:
yield regionfile
finally:
if close_after_use:
regionfile.close() | [
"def",
"iter_regions",
"(",
"self",
")",
":",
"# TODO: Implement BoundingBox",
"# TODO: Implement sort order",
"for",
"x",
",",
"z",
"in",
"self",
".",
"regionfiles",
".",
"keys",
"(",
")",
":",
"close_after_use",
"=",
"False",
"if",
"(",
"x",
",",
"z",
")",
"in",
"self",
".",
"regions",
":",
"regionfile",
"=",
"self",
".",
"regions",
"[",
"(",
"x",
",",
"z",
")",
"]",
"else",
":",
"# It is not yet cached.",
"# Get file, but do not cache later.",
"regionfile",
"=",
"region",
".",
"RegionFile",
"(",
"self",
".",
"regionfiles",
"[",
"(",
"x",
",",
"z",
")",
"]",
",",
"chunkclass",
"=",
"self",
".",
"chunkclass",
")",
"regionfile",
".",
"loc",
"=",
"Location",
"(",
"x",
"=",
"x",
",",
"z",
"=",
"z",
")",
"close_after_use",
"=",
"True",
"try",
":",
"yield",
"regionfile",
"finally",
":",
"if",
"close_after_use",
":",
"regionfile",
".",
"close",
"(",
")"
] | 40.454545 | 13.727273 |
def base_url(klass, space_id, parent_resource_id, resource_url='entries', resource_id=None, environment_id=None):
"""
Returns the URI for the snapshot.
"""
return "spaces/{0}{1}/{2}/{3}/snapshots/{4}".format(
space_id,
'/environments/{0}'.format(environment_id) if environment_id is not None else '',
resource_url,
parent_resource_id,
resource_id if resource_id is not None else ''
) | [
"def",
"base_url",
"(",
"klass",
",",
"space_id",
",",
"parent_resource_id",
",",
"resource_url",
"=",
"'entries'",
",",
"resource_id",
"=",
"None",
",",
"environment_id",
"=",
"None",
")",
":",
"return",
"\"spaces/{0}{1}/{2}/{3}/snapshots/{4}\"",
".",
"format",
"(",
"space_id",
",",
"'/environments/{0}'",
".",
"format",
"(",
"environment_id",
")",
"if",
"environment_id",
"is",
"not",
"None",
"else",
"''",
",",
"resource_url",
",",
"parent_resource_id",
",",
"resource_id",
"if",
"resource_id",
"is",
"not",
"None",
"else",
"''",
")"
] | 39.416667 | 23.25 |
def impersonate_user(self, username, password):
"""delegate to personate_user method
"""
if self.personate_user:
self.personate_user.impersonate_user(username, password) | [
"def",
"impersonate_user",
"(",
"self",
",",
"username",
",",
"password",
")",
":",
"if",
"self",
".",
"personate_user",
":",
"self",
".",
"personate_user",
".",
"impersonate_user",
"(",
"username",
",",
"password",
")"
] | 40.2 | 8.8 |
def get_target_transcript(self,min_intron=1):
"""Get the mapping of to the target strand
:returns: Transcript mapped to target
:rtype: Transcript
"""
if min_intron < 1:
sys.stderr.write("ERROR minimum intron should be 1 base or longer\n")
sys.exit()
#tx = Transcript()
rngs = [self.alignment_ranges[0][0].copy()]
#rngs[0].set_direction(None)
for i in range(len(self.alignment_ranges)-1):
dist = self.alignment_ranges[i+1][0].start - rngs[-1].end-1
#print 'dist '+str(dist)
if dist >= min_intron:
rngs.append(self.alignment_ranges[i+1][0].copy())
#rngs[-1].set_direction(None)
else:
rngs[-1].end = self.alignment_ranges[i+1][0].end
tx = Transcript(rngs,options=Transcript.Options(
direction=self.strand,
name = self.alignment_ranges[0][1].chr,
gene_name = self.alignment_ranges[0][1].chr
))
#tx.set_exons_and_junctions_from_ranges(rngs)
#tx.set_range()
#tx.set_strand(self.get_strand())
#tx.set_transcript_name(self.get_alignment_ranges()[0][1].chr)
#tx.set_gene_name(self.get_alignment_ranges()[0][1].chr)
return tx | [
"def",
"get_target_transcript",
"(",
"self",
",",
"min_intron",
"=",
"1",
")",
":",
"if",
"min_intron",
"<",
"1",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"ERROR minimum intron should be 1 base or longer\\n\"",
")",
"sys",
".",
"exit",
"(",
")",
"#tx = Transcript()",
"rngs",
"=",
"[",
"self",
".",
"alignment_ranges",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"copy",
"(",
")",
"]",
"#rngs[0].set_direction(None)",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"alignment_ranges",
")",
"-",
"1",
")",
":",
"dist",
"=",
"self",
".",
"alignment_ranges",
"[",
"i",
"+",
"1",
"]",
"[",
"0",
"]",
".",
"start",
"-",
"rngs",
"[",
"-",
"1",
"]",
".",
"end",
"-",
"1",
"#print 'dist '+str(dist)",
"if",
"dist",
">=",
"min_intron",
":",
"rngs",
".",
"append",
"(",
"self",
".",
"alignment_ranges",
"[",
"i",
"+",
"1",
"]",
"[",
"0",
"]",
".",
"copy",
"(",
")",
")",
"#rngs[-1].set_direction(None)",
"else",
":",
"rngs",
"[",
"-",
"1",
"]",
".",
"end",
"=",
"self",
".",
"alignment_ranges",
"[",
"i",
"+",
"1",
"]",
"[",
"0",
"]",
".",
"end",
"tx",
"=",
"Transcript",
"(",
"rngs",
",",
"options",
"=",
"Transcript",
".",
"Options",
"(",
"direction",
"=",
"self",
".",
"strand",
",",
"name",
"=",
"self",
".",
"alignment_ranges",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"chr",
",",
"gene_name",
"=",
"self",
".",
"alignment_ranges",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"chr",
")",
")",
"#tx.set_exons_and_junctions_from_ranges(rngs)",
"#tx.set_range()",
"#tx.set_strand(self.get_strand())",
"#tx.set_transcript_name(self.get_alignment_ranges()[0][1].chr)",
"#tx.set_gene_name(self.get_alignment_ranges()[0][1].chr)",
"return",
"tx"
] | 37.125 | 15.40625 |
def exit(self, signal=None, frame=None):
"""
Properly close the AMQP connections
"""
self.input_channel.close()
self.client_queue.close()
self.connection.close()
log.info("Worker exiting")
sys.exit(0) | [
"def",
"exit",
"(",
"self",
",",
"signal",
"=",
"None",
",",
"frame",
"=",
"None",
")",
":",
"self",
".",
"input_channel",
".",
"close",
"(",
")",
"self",
".",
"client_queue",
".",
"close",
"(",
")",
"self",
".",
"connection",
".",
"close",
"(",
")",
"log",
".",
"info",
"(",
"\"Worker exiting\"",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] | 28.444444 | 5.777778 |
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value
set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value
set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
diffBase = wkdy - wd
origOffset = offset
if offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if wkdy * style > wd * style or \
currentDayStyle and wkdy == wd:
# wkdy located in current week
offset = 0
elif style in (-1, 1):
# wkdy located in last (-1) or next (1) week
offset = style
else:
# invalid style, or should raise error?
offset = 0
# offset = -1 means last week
# offset = 0 means current week
# offset = 1 means next week
diff = diffBase + 7 * offset
if style == 1 and diff < -7:
diff += 7
elif style == -1 and diff > 7:
diff -= 7
debug and log.debug("wd %s, wkdy %s, offset %d, "
"style %d, currentDayStyle %d",
wd, wkdy, origOffset, style, currentDayStyle)
return diff | [
"def",
"_CalculateDOWDelta",
"(",
"self",
",",
"wd",
",",
"wkdy",
",",
"offset",
",",
"style",
",",
"currentDayStyle",
")",
":",
"diffBase",
"=",
"wkdy",
"-",
"wd",
"origOffset",
"=",
"offset",
"if",
"offset",
"==",
"2",
":",
"# no modifier is present.",
"# i.e. string to be parsed is just DOW",
"if",
"wkdy",
"*",
"style",
">",
"wd",
"*",
"style",
"or",
"currentDayStyle",
"and",
"wkdy",
"==",
"wd",
":",
"# wkdy located in current week",
"offset",
"=",
"0",
"elif",
"style",
"in",
"(",
"-",
"1",
",",
"1",
")",
":",
"# wkdy located in last (-1) or next (1) week",
"offset",
"=",
"style",
"else",
":",
"# invalid style, or should raise error?",
"offset",
"=",
"0",
"# offset = -1 means last week",
"# offset = 0 means current week",
"# offset = 1 means next week",
"diff",
"=",
"diffBase",
"+",
"7",
"*",
"offset",
"if",
"style",
"==",
"1",
"and",
"diff",
"<",
"-",
"7",
":",
"diff",
"+=",
"7",
"elif",
"style",
"==",
"-",
"1",
"and",
"diff",
">",
"7",
":",
"diff",
"-=",
"7",
"debug",
"and",
"log",
".",
"debug",
"(",
"\"wd %s, wkdy %s, offset %d, \"",
"\"style %d, currentDayStyle %d\"",
",",
"wd",
",",
"wkdy",
",",
"origOffset",
",",
"style",
",",
"currentDayStyle",
")",
"return",
"diff"
] | 37.307692 | 15.769231 |
def delete_fixed_rate_shipping_by_id(cls, fixed_rate_shipping_id, **kwargs):
"""Delete FixedRateShipping
Delete an instance of FixedRateShipping by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_fixed_rate_shipping_by_id(fixed_rate_shipping_id, async=True)
>>> result = thread.get()
:param async bool
:param str fixed_rate_shipping_id: ID of fixedRateShipping to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, **kwargs)
else:
(data) = cls._delete_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, **kwargs)
return data | [
"def",
"delete_fixed_rate_shipping_by_id",
"(",
"cls",
",",
"fixed_rate_shipping_id",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_delete_fixed_rate_shipping_by_id_with_http_info",
"(",
"fixed_rate_shipping_id",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"cls",
".",
"_delete_fixed_rate_shipping_by_id_with_http_info",
"(",
"fixed_rate_shipping_id",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 47.52381 | 25.714286 |
def add_data_point(self, x, y):
"""Adds a data point to the series.
:param x: The numerical x value to be added.
:param y: The numerical y value to be added."""
if not is_numeric(x):
raise TypeError("x value must be numeric, not '%s'" % str(x))
if not is_numeric(y):
raise TypeError("y value must be numeric, not '%s'" % str(y))
current_last_x = self._data[-1][0]
self._data.append((x, y))
if x < current_last_x:
self._data = sorted(self._data, key=lambda k: k[0]) | [
"def",
"add_data_point",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"if",
"not",
"is_numeric",
"(",
"x",
")",
":",
"raise",
"TypeError",
"(",
"\"x value must be numeric, not '%s'\"",
"%",
"str",
"(",
"x",
")",
")",
"if",
"not",
"is_numeric",
"(",
"y",
")",
":",
"raise",
"TypeError",
"(",
"\"y value must be numeric, not '%s'\"",
"%",
"str",
"(",
"y",
")",
")",
"current_last_x",
"=",
"self",
".",
"_data",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"self",
".",
"_data",
".",
"append",
"(",
"(",
"x",
",",
"y",
")",
")",
"if",
"x",
"<",
"current_last_x",
":",
"self",
".",
"_data",
"=",
"sorted",
"(",
"self",
".",
"_data",
",",
"key",
"=",
"lambda",
"k",
":",
"k",
"[",
"0",
"]",
")"
] | 39.5 | 16.5 |
def _sendmsg(self,method,url,headers):
'''发送消息'''
msg = '%s %s %s'%(method,url,RTSP_VERSION)
headers['User-Agent'] = DEFAULT_USERAGENT
cseq = self._next_seq()
self._cseq_map[cseq] = method
headers['CSeq'] = str(cseq)
if self._session_id: headers['Session'] = self._session_id
for (k,v) in headers.items():
msg += LINE_SPLIT_STR + '%s: %s'%(k,str(v))
msg += HEADER_END_STR # End headers
if method != 'GET_PARAMETER' or 'x-RetransSeq' in headers:
PRINT(self._get_time_str() + LINE_SPLIT_STR + msg)
try:
self._sock.send(msg)
except socket.error, e:
PRINT('Send msg error: %s'%e, RED) | [
"def",
"_sendmsg",
"(",
"self",
",",
"method",
",",
"url",
",",
"headers",
")",
":",
"msg",
"=",
"'%s %s %s'",
"%",
"(",
"method",
",",
"url",
",",
"RTSP_VERSION",
")",
"headers",
"[",
"'User-Agent'",
"]",
"=",
"DEFAULT_USERAGENT",
"cseq",
"=",
"self",
".",
"_next_seq",
"(",
")",
"self",
".",
"_cseq_map",
"[",
"cseq",
"]",
"=",
"method",
"headers",
"[",
"'CSeq'",
"]",
"=",
"str",
"(",
"cseq",
")",
"if",
"self",
".",
"_session_id",
":",
"headers",
"[",
"'Session'",
"]",
"=",
"self",
".",
"_session_id",
"for",
"(",
"k",
",",
"v",
")",
"in",
"headers",
".",
"items",
"(",
")",
":",
"msg",
"+=",
"LINE_SPLIT_STR",
"+",
"'%s: %s'",
"%",
"(",
"k",
",",
"str",
"(",
"v",
")",
")",
"msg",
"+=",
"HEADER_END_STR",
"# End headers",
"if",
"method",
"!=",
"'GET_PARAMETER'",
"or",
"'x-RetransSeq'",
"in",
"headers",
":",
"PRINT",
"(",
"self",
".",
"_get_time_str",
"(",
")",
"+",
"LINE_SPLIT_STR",
"+",
"msg",
")",
"try",
":",
"self",
".",
"_sock",
".",
"send",
"(",
"msg",
")",
"except",
"socket",
".",
"error",
",",
"e",
":",
"PRINT",
"(",
"'Send msg error: %s'",
"%",
"e",
",",
"RED",
")"
] | 41.647059 | 12.117647 |
def pprint(walker):
"""Pretty printer for tree walkers
Takes a TreeWalker instance and pretty prints the output of walking the tree.
:arg walker: a TreeWalker instance
"""
output = []
indent = 0
for token in concatenateCharacterTokens(walker):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
# tag name
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
if token["namespace"] in constants.prefixes:
ns = constants.prefixes[token["namespace"]]
else:
ns = token["namespace"]
name = "%s %s" % (ns, token["name"])
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
# attributes (sorted for consistent ordering)
attrs = token["data"]
for (namespace, localname), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
ns = constants.prefixes[namespace]
else:
ns = namespace
name = "%s %s" % (ns, localname)
else:
name = localname
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
# self-closing
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent,
token["name"],
token["publicId"],
token["systemId"] if token["systemId"] else ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent,
token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type == "Characters":
output.append("%s\"%s\"" % (" " * indent, token["data"]))
elif type == "SpaceCharacters":
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
else:
raise ValueError("Unknown token type, %s" % type)
return "\n".join(output) | [
"def",
"pprint",
"(",
"walker",
")",
":",
"output",
"=",
"[",
"]",
"indent",
"=",
"0",
"for",
"token",
"in",
"concatenateCharacterTokens",
"(",
"walker",
")",
":",
"type",
"=",
"token",
"[",
"\"type\"",
"]",
"if",
"type",
"in",
"(",
"\"StartTag\"",
",",
"\"EmptyTag\"",
")",
":",
"# tag name",
"if",
"token",
"[",
"\"namespace\"",
"]",
"and",
"token",
"[",
"\"namespace\"",
"]",
"!=",
"constants",
".",
"namespaces",
"[",
"\"html\"",
"]",
":",
"if",
"token",
"[",
"\"namespace\"",
"]",
"in",
"constants",
".",
"prefixes",
":",
"ns",
"=",
"constants",
".",
"prefixes",
"[",
"token",
"[",
"\"namespace\"",
"]",
"]",
"else",
":",
"ns",
"=",
"token",
"[",
"\"namespace\"",
"]",
"name",
"=",
"\"%s %s\"",
"%",
"(",
"ns",
",",
"token",
"[",
"\"name\"",
"]",
")",
"else",
":",
"name",
"=",
"token",
"[",
"\"name\"",
"]",
"output",
".",
"append",
"(",
"\"%s<%s>\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
"name",
")",
")",
"indent",
"+=",
"2",
"# attributes (sorted for consistent ordering)",
"attrs",
"=",
"token",
"[",
"\"data\"",
"]",
"for",
"(",
"namespace",
",",
"localname",
")",
",",
"value",
"in",
"sorted",
"(",
"attrs",
".",
"items",
"(",
")",
")",
":",
"if",
"namespace",
":",
"if",
"namespace",
"in",
"constants",
".",
"prefixes",
":",
"ns",
"=",
"constants",
".",
"prefixes",
"[",
"namespace",
"]",
"else",
":",
"ns",
"=",
"namespace",
"name",
"=",
"\"%s %s\"",
"%",
"(",
"ns",
",",
"localname",
")",
"else",
":",
"name",
"=",
"localname",
"output",
".",
"append",
"(",
"\"%s%s=\\\"%s\\\"\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
"name",
",",
"value",
")",
")",
"# self-closing",
"if",
"type",
"==",
"\"EmptyTag\"",
":",
"indent",
"-=",
"2",
"elif",
"type",
"==",
"\"EndTag\"",
":",
"indent",
"-=",
"2",
"elif",
"type",
"==",
"\"Comment\"",
":",
"output",
".",
"append",
"(",
"\"%s<!-- %s -->\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
"token",
"[",
"\"data\"",
"]",
")",
")",
"elif",
"type",
"==",
"\"Doctype\"",
":",
"if",
"token",
"[",
"\"name\"",
"]",
":",
"if",
"token",
"[",
"\"publicId\"",
"]",
":",
"output",
".",
"append",
"(",
"\"\"\"%s<!DOCTYPE %s \"%s\" \"%s\">\"\"\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
"token",
"[",
"\"name\"",
"]",
",",
"token",
"[",
"\"publicId\"",
"]",
",",
"token",
"[",
"\"systemId\"",
"]",
"if",
"token",
"[",
"\"systemId\"",
"]",
"else",
"\"\"",
")",
")",
"elif",
"token",
"[",
"\"systemId\"",
"]",
":",
"output",
".",
"append",
"(",
"\"\"\"%s<!DOCTYPE %s \"\" \"%s\">\"\"\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
"token",
"[",
"\"name\"",
"]",
",",
"token",
"[",
"\"systemId\"",
"]",
")",
")",
"else",
":",
"output",
".",
"append",
"(",
"\"%s<!DOCTYPE %s>\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
"token",
"[",
"\"name\"",
"]",
")",
")",
"else",
":",
"output",
".",
"append",
"(",
"\"%s<!DOCTYPE >\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
")",
")",
"elif",
"type",
"==",
"\"Characters\"",
":",
"output",
".",
"append",
"(",
"\"%s\\\"%s\\\"\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
"token",
"[",
"\"data\"",
"]",
")",
")",
"elif",
"type",
"==",
"\"SpaceCharacters\"",
":",
"assert",
"False",
",",
"\"concatenateCharacterTokens should have got rid of all Space tokens\"",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown token type, %s\"",
"%",
"type",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"output",
")"
] | 37.92 | 19.186667 |
def set_description(self, id, **kwargs): # noqa: E501
"""Set description associated with a specific source # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_description(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str body:
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_description_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.set_description_with_http_info(id, **kwargs) # noqa: E501
return data | [
"def",
"set_description",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"set_description_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"set_description_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | 40.636364 | 18.045455 |
def calculate_euc_distance(a, b):
"""Calculates Eclidian distances from two points a and b
Args
----
a : (:obj:`float`, :obj:`float`)
Two-dimension tuple (x1,y1)
b : (:obj:`float`, :obj:`float`)
Two-dimension tuple (x2,y2)
Returns
-------
float
the distance.
"""
x1, y1 = a
x2, y2 = b
return int(round(math.sqrt(((x1 - x2) ** 2) + (((y1 - y2) ** 2))))) | [
"def",
"calculate_euc_distance",
"(",
"a",
",",
"b",
")",
":",
"x1",
",",
"y1",
"=",
"a",
"x2",
",",
"y2",
"=",
"b",
"return",
"int",
"(",
"round",
"(",
"math",
".",
"sqrt",
"(",
"(",
"(",
"x1",
"-",
"x2",
")",
"**",
"2",
")",
"+",
"(",
"(",
"(",
"y1",
"-",
"y2",
")",
"**",
"2",
")",
")",
")",
")",
")"
] | 21.736842 | 20.842105 |
def pg_dsn(settings: Settings) -> str:
"""
:param settings: settings including connection settings
:return: DSN url suitable for sqlalchemy and aiopg.
"""
return str(URL(
database=settings.DB_NAME,
password=settings.DB_PASSWORD,
host=settings.DB_HOST,
port=settings.DB_PORT,
username=settings.DB_USER,
drivername='postgres',
)) | [
"def",
"pg_dsn",
"(",
"settings",
":",
"Settings",
")",
"->",
"str",
":",
"return",
"str",
"(",
"URL",
"(",
"database",
"=",
"settings",
".",
"DB_NAME",
",",
"password",
"=",
"settings",
".",
"DB_PASSWORD",
",",
"host",
"=",
"settings",
".",
"DB_HOST",
",",
"port",
"=",
"settings",
".",
"DB_PORT",
",",
"username",
"=",
"settings",
".",
"DB_USER",
",",
"drivername",
"=",
"'postgres'",
",",
")",
")"
] | 29.769231 | 10.384615 |
def register(self, name):
"""Return decorator to register item with a specific name."""
def decorator(func):
"""Register decorated function."""
self[name] = func
return func
return decorator | [
"def",
"register",
"(",
"self",
",",
"name",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"\"\"\"Register decorated function.\"\"\"",
"self",
"[",
"name",
"]",
"=",
"func",
"return",
"func",
"return",
"decorator"
] | 30.5 | 13.875 |
def deserialize(encoded, **kwargs):
'''Construct a muda transformation from a JSON encoded string.
Parameters
----------
encoded : str
JSON encoding of the transformation or pipeline
kwargs
Additional keyword arguments to `jsonpickle.decode()`
Returns
-------
obj
The transformation
See Also
--------
serialize
Examples
--------
>>> D = muda.deformers.TimeStretch(rate=1.5)
>>> D_serial = muda.serialize(D)
>>> D2 = muda.deserialize(D_serial)
>>> D2
TimeStretch(rate=1.5)
'''
params = jsonpickle.decode(encoded, **kwargs)
return __reconstruct(params) | [
"def",
"deserialize",
"(",
"encoded",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"jsonpickle",
".",
"decode",
"(",
"encoded",
",",
"*",
"*",
"kwargs",
")",
"return",
"__reconstruct",
"(",
"params",
")"
] | 19.8125 | 25.125 |
def be_array_from_bytes(fmt, data):
"""
Reads an array from bytestring with big-endian data.
"""
arr = array.array(str(fmt), data)
return fix_byteorder(arr) | [
"def",
"be_array_from_bytes",
"(",
"fmt",
",",
"data",
")",
":",
"arr",
"=",
"array",
".",
"array",
"(",
"str",
"(",
"fmt",
")",
",",
"data",
")",
"return",
"fix_byteorder",
"(",
"arr",
")"
] | 28.5 | 5.833333 |
def initialize_repo(self):
"""
Clones repository & sets up usernames.
"""
logging.info('Repo {} doesn\'t exist. Cloning...'.format(self.repo_dir))
clone_args = ['git', 'clone']
if self.depth and self.depth > 0:
clone_args.extend(['--depth', str(self.depth)])
clone_args.extend(['--branch', self.branch_name])
clone_args.extend([self.git_url, self.repo_dir])
yield from execute_cmd(clone_args)
yield from execute_cmd(['git', 'config', 'user.email', 'nbgitpuller@example.com'], cwd=self.repo_dir)
yield from execute_cmd(['git', 'config', 'user.name', 'nbgitpuller'], cwd=self.repo_dir)
logging.info('Repo {} initialized'.format(self.repo_dir)) | [
"def",
"initialize_repo",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Repo {} doesn\\'t exist. Cloning...'",
".",
"format",
"(",
"self",
".",
"repo_dir",
")",
")",
"clone_args",
"=",
"[",
"'git'",
",",
"'clone'",
"]",
"if",
"self",
".",
"depth",
"and",
"self",
".",
"depth",
">",
"0",
":",
"clone_args",
".",
"extend",
"(",
"[",
"'--depth'",
",",
"str",
"(",
"self",
".",
"depth",
")",
"]",
")",
"clone_args",
".",
"extend",
"(",
"[",
"'--branch'",
",",
"self",
".",
"branch_name",
"]",
")",
"clone_args",
".",
"extend",
"(",
"[",
"self",
".",
"git_url",
",",
"self",
".",
"repo_dir",
"]",
")",
"yield",
"from",
"execute_cmd",
"(",
"clone_args",
")",
"yield",
"from",
"execute_cmd",
"(",
"[",
"'git'",
",",
"'config'",
",",
"'user.email'",
",",
"'nbgitpuller@example.com'",
"]",
",",
"cwd",
"=",
"self",
".",
"repo_dir",
")",
"yield",
"from",
"execute_cmd",
"(",
"[",
"'git'",
",",
"'config'",
",",
"'user.name'",
",",
"'nbgitpuller'",
"]",
",",
"cwd",
"=",
"self",
".",
"repo_dir",
")",
"logging",
".",
"info",
"(",
"'Repo {} initialized'",
".",
"format",
"(",
"self",
".",
"repo_dir",
")",
")"
] | 49.066667 | 20.533333 |
def api_key_from_file(url):
""" Check bugzillarc for an API key for this Bugzilla URL. """
path = os.path.expanduser('~/.config/python-bugzilla/bugzillarc')
cfg = SafeConfigParser()
cfg.read(path)
domain = urlparse(url)[1]
if domain not in cfg.sections():
return None
if not cfg.has_option(domain, 'api_key'):
return None
return cfg.get(domain, 'api_key') | [
"def",
"api_key_from_file",
"(",
"url",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~/.config/python-bugzilla/bugzillarc'",
")",
"cfg",
"=",
"SafeConfigParser",
"(",
")",
"cfg",
".",
"read",
"(",
"path",
")",
"domain",
"=",
"urlparse",
"(",
"url",
")",
"[",
"1",
"]",
"if",
"domain",
"not",
"in",
"cfg",
".",
"sections",
"(",
")",
":",
"return",
"None",
"if",
"not",
"cfg",
".",
"has_option",
"(",
"domain",
",",
"'api_key'",
")",
":",
"return",
"None",
"return",
"cfg",
".",
"get",
"(",
"domain",
",",
"'api_key'",
")"
] | 35.727273 | 12.818182 |
def add_shapes(self,**kwargs):
"""
Add a shape to the QuantFigure.
kwargs :
hline : int, list or dict
Draws a horizontal line at the
indicated y position(s)
Extra parameters can be passed in
the form of a dictionary (see shapes)
vline : int, list or dict
Draws a vertical line at the
indicated x position(s)
Extra parameters can be passed in
the form of a dictionary (see shapes)
hspan : (y0,y1)
Draws a horizontal rectangle at the
indicated (y0,y1) positions.
Extra parameters can be passed in
the form of a dictionary (see shapes)
vspan : (x0,x1)
Draws a vertical rectangle at the
indicated (x0,x1) positions.
Extra parameters can be passed in
the form of a dictionary (see shapes)
shapes : dict or list(dict)
List of dictionaries with the
specifications of a given shape.
See help(cufflinks.tools.get_shape)
for more information
"""
kwargs=utils.check_kwargs(kwargs,get_shapes_kwargs(),{},clean_origin=True)
for k,v in list(kwargs.items()):
if k in self.layout['shapes']:
if utils.is_list(v):
self.layout['shapes'][k].extend(v)
else:
self.layout['shapes'][k].append(v)
else:
self.layout['shapes'][k]=utils.make_list(v) | [
"def",
"add_shapes",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"utils",
".",
"check_kwargs",
"(",
"kwargs",
",",
"get_shapes_kwargs",
"(",
")",
",",
"{",
"}",
",",
"clean_origin",
"=",
"True",
")",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"kwargs",
".",
"items",
"(",
")",
")",
":",
"if",
"k",
"in",
"self",
".",
"layout",
"[",
"'shapes'",
"]",
":",
"if",
"utils",
".",
"is_list",
"(",
"v",
")",
":",
"self",
".",
"layout",
"[",
"'shapes'",
"]",
"[",
"k",
"]",
".",
"extend",
"(",
"v",
")",
"else",
":",
"self",
".",
"layout",
"[",
"'shapes'",
"]",
"[",
"k",
"]",
".",
"append",
"(",
"v",
")",
"else",
":",
"self",
".",
"layout",
"[",
"'shapes'",
"]",
"[",
"k",
"]",
"=",
"utils",
".",
"make_list",
"(",
"v",
")"
] | 29.853659 | 10.731707 |
def rename(self, **kwargs):
'''Rename series in the group.'''
for old, new in kwargs.iteritems():
if old in self.groups:
self.groups[new] = self.groups[old]
del self.groups[old] | [
"def",
"rename",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"old",
",",
"new",
"in",
"kwargs",
".",
"iteritems",
"(",
")",
":",
"if",
"old",
"in",
"self",
".",
"groups",
":",
"self",
".",
"groups",
"[",
"new",
"]",
"=",
"self",
".",
"groups",
"[",
"old",
"]",
"del",
"self",
".",
"groups",
"[",
"old",
"]"
] | 38.666667 | 6.333333 |
def N50(arr):
"""N50 often used in assessing denovo assembly.
:param arr: list of numbers
:type arr: number[] a number array
:return: N50
:rtype: float
"""
if len(arr) == 0:
sys.stderr.write("ERROR: no content in array to take N50\n")
sys.exit()
tot = sum(arr)
half = float(tot)/float(2)
cummulative = 0
for l in sorted(arr):
cummulative += l
if float(cummulative) > half:
return l
sys.stderr.write("ERROR: problem finding M50\n")
sys.exit() | [
"def",
"N50",
"(",
"arr",
")",
":",
"if",
"len",
"(",
"arr",
")",
"==",
"0",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"ERROR: no content in array to take N50\\n\"",
")",
"sys",
".",
"exit",
"(",
")",
"tot",
"=",
"sum",
"(",
"arr",
")",
"half",
"=",
"float",
"(",
"tot",
")",
"/",
"float",
"(",
"2",
")",
"cummulative",
"=",
"0",
"for",
"l",
"in",
"sorted",
"(",
"arr",
")",
":",
"cummulative",
"+=",
"l",
"if",
"float",
"(",
"cummulative",
")",
">",
"half",
":",
"return",
"l",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"ERROR: problem finding M50\\n\"",
")",
"sys",
".",
"exit",
"(",
")"
] | 22.47619 | 19.52381 |
def member_add(self, stream_id, user_id):
''' add a user to a stream '''
req_hook = 'pod/v1/room/' + str(stream_id) + '/membership/add'
req_args = '{ "id": %s }' % user_id
status_code, response = self.__rest__.POST_query(req_hook, req_args)
self.logger.debug('%s: %s' % (status_code, response))
return status_code, response | [
"def",
"member_add",
"(",
"self",
",",
"stream_id",
",",
"user_id",
")",
":",
"req_hook",
"=",
"'pod/v1/room/'",
"+",
"str",
"(",
"stream_id",
")",
"+",
"'/membership/add'",
"req_args",
"=",
"'{ \"id\": %s }'",
"%",
"user_id",
"status_code",
",",
"response",
"=",
"self",
".",
"__rest__",
".",
"POST_query",
"(",
"req_hook",
",",
"req_args",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'%s: %s'",
"%",
"(",
"status_code",
",",
"response",
")",
")",
"return",
"status_code",
",",
"response"
] | 52.142857 | 13.857143 |
def Lines(startPoints, endPoints=None, scale=1, lw=1, c=None, alpha=1, dotted=False):
"""
Build the line segments between two lists of points `startPoints` and `endPoints`.
`startPoints` can be also passed in the form ``[[point1, point2], ...]``.
:param float scale: apply a rescaling factor to the length
|lines|
.. hint:: |fitspheres2.py|_
"""
if endPoints is not None:
startPoints = list(zip(startPoints, endPoints))
polylns = vtk.vtkAppendPolyData()
for twopts in startPoints:
lineSource = vtk.vtkLineSource()
lineSource.SetPoint1(twopts[0])
if scale != 1:
vers = (np.array(twopts[1]) - twopts[0]) * scale
pt2 = np.array(twopts[0]) + vers
else:
pt2 = twopts[1]
lineSource.SetPoint2(pt2)
polylns.AddInputConnection(lineSource.GetOutputPort())
polylns.Update()
actor = Actor(polylns.GetOutput(), c, alpha)
actor.GetProperty().SetLineWidth(lw)
if dotted:
actor.GetProperty().SetLineStipplePattern(0xF0F0)
actor.GetProperty().SetLineStippleRepeatFactor(1)
settings.collectable_actors.append(actor)
return actor | [
"def",
"Lines",
"(",
"startPoints",
",",
"endPoints",
"=",
"None",
",",
"scale",
"=",
"1",
",",
"lw",
"=",
"1",
",",
"c",
"=",
"None",
",",
"alpha",
"=",
"1",
",",
"dotted",
"=",
"False",
")",
":",
"if",
"endPoints",
"is",
"not",
"None",
":",
"startPoints",
"=",
"list",
"(",
"zip",
"(",
"startPoints",
",",
"endPoints",
")",
")",
"polylns",
"=",
"vtk",
".",
"vtkAppendPolyData",
"(",
")",
"for",
"twopts",
"in",
"startPoints",
":",
"lineSource",
"=",
"vtk",
".",
"vtkLineSource",
"(",
")",
"lineSource",
".",
"SetPoint1",
"(",
"twopts",
"[",
"0",
"]",
")",
"if",
"scale",
"!=",
"1",
":",
"vers",
"=",
"(",
"np",
".",
"array",
"(",
"twopts",
"[",
"1",
"]",
")",
"-",
"twopts",
"[",
"0",
"]",
")",
"*",
"scale",
"pt2",
"=",
"np",
".",
"array",
"(",
"twopts",
"[",
"0",
"]",
")",
"+",
"vers",
"else",
":",
"pt2",
"=",
"twopts",
"[",
"1",
"]",
"lineSource",
".",
"SetPoint2",
"(",
"pt2",
")",
"polylns",
".",
"AddInputConnection",
"(",
"lineSource",
".",
"GetOutputPort",
"(",
")",
")",
"polylns",
".",
"Update",
"(",
")",
"actor",
"=",
"Actor",
"(",
"polylns",
".",
"GetOutput",
"(",
")",
",",
"c",
",",
"alpha",
")",
"actor",
".",
"GetProperty",
"(",
")",
".",
"SetLineWidth",
"(",
"lw",
")",
"if",
"dotted",
":",
"actor",
".",
"GetProperty",
"(",
")",
".",
"SetLineStipplePattern",
"(",
"0xF0F0",
")",
"actor",
".",
"GetProperty",
"(",
")",
".",
"SetLineStippleRepeatFactor",
"(",
"1",
")",
"settings",
".",
"collectable_actors",
".",
"append",
"(",
"actor",
")",
"return",
"actor"
] | 31.216216 | 21 |
def get_assessment_ids(self):
"""Gets the Ids of any assessments associated with this activity.
return: (osid.id.IdList) - list of assessment Ids
raise: IllegalState - is_assessment_based_activity() is false
compliance: mandatory - This method must be implemented.
"""
if not self.is_assessment_based_activity():
raise IllegalState()
else:
return [Id(a) for a in self._my_map['assessmentIds']] | [
"def",
"get_assessment_ids",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_assessment_based_activity",
"(",
")",
":",
"raise",
"IllegalState",
"(",
")",
"else",
":",
"return",
"[",
"Id",
"(",
"a",
")",
"for",
"a",
"in",
"self",
".",
"_my_map",
"[",
"'assessmentIds'",
"]",
"]"
] | 38.75 | 19.416667 |
def get_readable_string(integer):
r"""
Convert an integer to a readable 2-character representation. This is useful for reversing
examples: 41 == ".A", 13 == "\n", 20 (space) == "__"
Returns a readable 2-char representation of an int.
"""
if integer == 9: #\t
readable_string = "\\t"
elif integer == 10: #\r
readable_string = "\\r"
elif integer == 13: #\n
readable_string = "\\n"
elif integer == 32: # space
readable_string = '__'
elif integer >= 33 and integer <= 126: # Readable ascii
readable_string = ''.join([chr(integer), '.'])
else: # rest
readable_string = int_to_padded_hex_byte(integer)
return readable_string | [
"def",
"get_readable_string",
"(",
"integer",
")",
":",
"if",
"integer",
"==",
"9",
":",
"#\\t",
"readable_string",
"=",
"\"\\\\t\"",
"elif",
"integer",
"==",
"10",
":",
"#\\r",
"readable_string",
"=",
"\"\\\\r\"",
"elif",
"integer",
"==",
"13",
":",
"#\\n",
"readable_string",
"=",
"\"\\\\n\"",
"elif",
"integer",
"==",
"32",
":",
"# space",
"readable_string",
"=",
"'__'",
"elif",
"integer",
">=",
"33",
"and",
"integer",
"<=",
"126",
":",
"# Readable ascii",
"readable_string",
"=",
"''",
".",
"join",
"(",
"[",
"chr",
"(",
"integer",
")",
",",
"'.'",
"]",
")",
"else",
":",
"# rest",
"readable_string",
"=",
"int_to_padded_hex_byte",
"(",
"integer",
")",
"return",
"readable_string"
] | 35.75 | 15.7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.